diff --git "a/pipeline.py" "b/pipeline.py"
--- "a/pipeline.py"
+++ "b/pipeline.py"
@@ -1,52 +1,53 @@
-# source https://github.com/huggingface/diffusers/blob/main/examples/community/lpw_stable_diffusion_xl.py
-## ----------------------------------------------------------
-# A SDXL pipeline can take unlimited weighted prompt
-#
-# Author: Andrew Zhu
-# Github: https://github.com/xhinker
-# Medium: https://medium.com/@xhinker
-## -----------------------------------------------------------
-
+# source https://github.com/huggingface/diffusers/blob/main/examples/community/lpw_stable_diffusion.py
import inspect
-import os
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+import re
+from typing import Any, Callable, Dict, List, Optional, Union
+import numpy as np
+import PIL.Image
import torch
-from PIL import Image
-from transformers import (
- CLIPImageProcessor,
- CLIPTextModel,
- CLIPTextModelWithProjection,
- CLIPTokenizer,
- CLIPVisionModelWithProjection,
-)
-
-from diffusers import DiffusionPipeline, StableDiffusionXLPipeline
-from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
-from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
-from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
-from diffusers.models.attention_processor import (
- AttnProcessor2_0,
- FusedAttnProcessor2_0,
- LoRAAttnProcessor2_0,
- LoRAXFormersAttnProcessor,
- XFormersAttnProcessor,
-)
-from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
+from packaging import version
+from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
+
+from diffusers import DiffusionPipeline
+from diffusers.configuration_utils import FrozenDict
+from diffusers.image_processor import VaeImageProcessor
+from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
+from diffusers.models import AutoencoderKL, UNet2DConditionModel
+from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
+ PIL_INTERPOLATION,
deprecate,
is_accelerate_available,
is_accelerate_version,
- is_invisible_watermark_available,
logging,
- replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor
-if is_invisible_watermark_available():
- from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
+# ------------------------------------------------------------------------------
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+re_attention = re.compile(
+ r"""
+\\\(|
+\\\)|
+\\\[|
+\\]|
+\\\\|
+\\|
+\(|
+\[|
+:([+-]?[.\d]+)\)|
+\)|
+]|
+[^\\()\[\]:]+|
+:
+""",
+ re.X,
+)
def parse_prompt_attention(text):
@@ -62,7 +63,6 @@ def parse_prompt_attention(text):
\\] - literal character ']'
\\ - literal character '\'
anything else - just text
-
>>> parse_prompt_attention('normal text')
[['normal text', 1.0]]
>>> parse_prompt_attention('an (important) word')
@@ -84,17 +84,6 @@ def parse_prompt_attention(text):
['sky', 1.4641000000000006],
['.', 1.1]]
"""
- import re
-
- re_attention = re.compile(
- r"""
- \\\(|\\\)|\\\[|\\]|\\\\|\\|\(|\[|:([+-]?[.\d]+)\)|
- \)|]|[^\\()\[\]:]+|:
- """,
- re.X,
- )
-
- re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
res = []
round_brackets = []
@@ -124,11 +113,7 @@ def parse_prompt_attention(text):
elif text == "]" and len(square_brackets) > 0:
multiply_range(square_brackets.pop(), square_bracket_multiplier)
else:
- parts = re.split(re_break, text)
- for i, part in enumerate(parts):
- if i > 0:
- res.append(["BREAK", -1])
- res.append([part, 1.0])
+ res.append([text, 1.0])
for pos in round_brackets:
multiply_range(pos, round_bracket_multiplier)
@@ -151,538 +136,464 @@ def parse_prompt_attention(text):
return res
-def get_prompts_tokens_with_weights(clip_tokenizer: CLIPTokenizer, prompt: str):
- """
- Get prompt token ids and weights, this function works for both prompt and negative prompt
-
- Args:
- pipe (CLIPTokenizer)
- A CLIPTokenizer
- prompt (str)
- A prompt string with weights
-
- Returns:
- text_tokens (list)
- A list contains token ids
- text_weight (list)
- A list contains the correspodent weight of token ids
-
- Example:
- import torch
- from transformers import CLIPTokenizer
-
- clip_tokenizer = CLIPTokenizer.from_pretrained(
- "stablediffusionapi/deliberate-v2"
- , subfolder = "tokenizer"
- , dtype = torch.float16
- )
+def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
+ r"""
+ Tokenize a list of prompts and return its tokens with weights of each token.
- token_id_list, token_weight_list = get_prompts_tokens_with_weights(
- clip_tokenizer = clip_tokenizer
- ,prompt = "a (red:1.5) cat"*70
- )
+ No padding, starting or ending token is included.
"""
- texts_and_weights = parse_prompt_attention(prompt)
- text_tokens, text_weights = [], []
- for word, weight in texts_and_weights:
- # tokenize and discard the starting and the ending token
- token = clip_tokenizer(word, truncation=False).input_ids[1:-1] # so that tokenize whatever length prompt
- # the returned token is a 1d list: [320, 1125, 539, 320]
-
- # merge the new tokens to the all tokens holder: text_tokens
- text_tokens = [*text_tokens, *token]
-
- # each token chunk will come with one weight, like ['red cat', 2.0]
- # need to expand weight for each token.
- chunk_weights = [weight] * len(token)
-
- # append the weight back to the weight holder: text_weights
- text_weights = [*text_weights, *chunk_weights]
- return text_tokens, text_weights
+ tokens = []
+ weights = []
+ truncated = False
+ for text in prompt:
+ texts_and_weights = parse_prompt_attention(text)
+ text_token = []
+ text_weight = []
+ for word, weight in texts_and_weights:
+ # tokenize and discard the starting and the ending token
+ token = pipe.tokenizer(word).input_ids[1:-1]
+ text_token += token
+ # copy the weight by length of token
+ text_weight += [weight] * len(token)
+ # stop if the text is too long (longer than truncation limit)
+ if len(text_token) > max_length:
+ truncated = True
+ break
+ # truncate
+ if len(text_token) > max_length:
+ truncated = True
+ text_token = text_token[:max_length]
+ text_weight = text_weight[:max_length]
+ tokens.append(text_token)
+ weights.append(text_weight)
+ if truncated:
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
+ return tokens, weights
+
+
+def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
+ r"""
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
+ """
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
+ for i in range(len(tokens)):
+ tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
+ if no_boseos_middle:
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
+ else:
+ w = []
+ if len(weights[i]) == 0:
+ w = [1.0] * weights_length
+ else:
+ for j in range(max_embeddings_multiples):
+ w.append(1.0) # weight for starting token in this chunk
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
+ w.append(1.0) # weight for ending token in this chunk
+ w += [1.0] * (weights_length - len(w))
+ weights[i] = w[:]
+ return tokens, weights
-def group_tokens_and_weights(token_ids: list, weights: list, pad_last_block=False):
- """
- Produce tokens and weights in groups and pad the missing tokens
- Args:
- token_ids (list)
- The token ids from tokenizer
- weights (list)
- The weights list from function get_prompts_tokens_with_weights
- pad_last_block (bool)
- Control if fill the last token list to 75 tokens with eos
- Returns:
- new_token_ids (2d list)
- new_weights (2d list)
-
- Example:
- token_groups,weight_groups = group_tokens_and_weights(
- token_ids = token_id_list
- , weights = token_weight_list
- )
- """
- bos, eos = 49406, 49407
-
- # this will be a 2d list
- new_token_ids = []
- new_weights = []
- while len(token_ids) >= 75:
- # get the first 75 tokens
- head_75_tokens = [token_ids.pop(0) for _ in range(75)]
- head_75_weights = [weights.pop(0) for _ in range(75)]
-
- # extract token ids and weights
- temp_77_token_ids = [bos] + head_75_tokens + [eos]
- temp_77_weights = [1.0] + head_75_weights + [1.0]
-
- # add 77 token and weights chunk to the holder list
- new_token_ids.append(temp_77_token_ids)
- new_weights.append(temp_77_weights)
-
- # padding the left
- if len(token_ids) > 0:
- padding_len = 75 - len(token_ids) if pad_last_block else 0
-
- temp_77_token_ids = [bos] + token_ids + [eos] * padding_len + [eos]
- new_token_ids.append(temp_77_token_ids)
-
- temp_77_weights = [1.0] + weights + [1.0] * padding_len + [1.0]
- new_weights.append(temp_77_weights)
-
- return new_token_ids, new_weights
-
-
-def get_weighted_text_embeddings_sdxl(
- pipe: StableDiffusionXLPipeline,
- prompt: str = "",
- prompt_2: str = None,
- neg_prompt: str = "",
- neg_prompt_2: str = None,
- num_images_per_prompt: int = 1,
- device: Optional[torch.device] = None,
- clip_skip: Optional[int] = None,
+def get_unweighted_text_embeddings(
+ pipe: DiffusionPipeline,
+ text_input: torch.Tensor,
+ chunk_length: int,
+ no_boseos_middle: Optional[bool] = True,
):
"""
- This function can process long prompt with weights, no length limitation
- for Stable Diffusion XL
-
- Args:
- pipe (StableDiffusionPipeline)
- prompt (str)
- prompt_2 (str)
- neg_prompt (str)
- neg_prompt_2 (str)
- num_images_per_prompt (int)
- device (torch.device)
- clip_skip (int)
- Returns:
- prompt_embeds (torch.Tensor)
- neg_prompt_embeds (torch.Tensor)
+ When the length of tokens is a multiple of the capacity of the text encoder,
+ it should be split into chunks and sent to the text encoder individually.
"""
- device = device or pipe._execution_device
-
- if prompt_2:
- prompt = f"{prompt} {prompt_2}"
-
- if neg_prompt_2:
- neg_prompt = f"{neg_prompt} {neg_prompt_2}"
-
- prompt_t1 = prompt_t2 = prompt
- neg_prompt_t1 = neg_prompt_t2 = neg_prompt
-
- if isinstance(pipe, TextualInversionLoaderMixin):
- prompt_t1 = pipe.maybe_convert_prompt(prompt_t1, pipe.tokenizer)
- neg_prompt_t1 = pipe.maybe_convert_prompt(neg_prompt_t1, pipe.tokenizer)
- prompt_t2 = pipe.maybe_convert_prompt(prompt_t2, pipe.tokenizer_2)
- neg_prompt_t2 = pipe.maybe_convert_prompt(neg_prompt_t2, pipe.tokenizer_2)
-
- eos = pipe.tokenizer.eos_token_id
-
- # tokenizer 1
- prompt_tokens, prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, prompt_t1)
- neg_prompt_tokens, neg_prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, neg_prompt_t1)
-
- # tokenizer 2
- prompt_tokens_2, prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, prompt_t2)
- neg_prompt_tokens_2, neg_prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, neg_prompt_t2)
-
- # padding the shorter one for prompt set 1
- prompt_token_len = len(prompt_tokens)
- neg_prompt_token_len = len(neg_prompt_tokens)
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
+ if max_embeddings_multiples > 1:
+ text_embeddings = []
+ for i in range(max_embeddings_multiples):
+ # extract the i-th chunk
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
+
+ # cover the head and the tail by the starting and the ending tokens
+ text_input_chunk[:, 0] = text_input[0, 0]
+ text_input_chunk[:, -1] = text_input[0, -1]
+ text_embedding = pipe.text_encoder(text_input_chunk)[0]
+
+ if no_boseos_middle:
+ if i == 0:
+ # discard the ending token
+ text_embedding = text_embedding[:, :-1]
+ elif i == max_embeddings_multiples - 1:
+ # discard the starting token
+ text_embedding = text_embedding[:, 1:]
+ else:
+ # discard both starting and ending tokens
+ text_embedding = text_embedding[:, 1:-1]
- if prompt_token_len > neg_prompt_token_len:
- # padding the neg_prompt with eos token
- neg_prompt_tokens = neg_prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len)
- neg_prompt_weights = neg_prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len)
- else:
- # padding the prompt
- prompt_tokens = prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len)
- prompt_weights = prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len)
-
- # padding the shorter one for token set 2
- prompt_token_len_2 = len(prompt_tokens_2)
- neg_prompt_token_len_2 = len(neg_prompt_tokens_2)
-
- if prompt_token_len_2 > neg_prompt_token_len_2:
- # padding the neg_prompt with eos token
- neg_prompt_tokens_2 = neg_prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
- neg_prompt_weights_2 = neg_prompt_weights_2 + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
+ text_embeddings.append(text_embedding)
+ text_embeddings = torch.concat(text_embeddings, axis=1)
else:
- # padding the prompt
- prompt_tokens_2 = prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
- prompt_weights_2 = prompt_weights + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2)
-
- embeds = []
- neg_embeds = []
+ text_embeddings = pipe.text_encoder(text_input)[0]
+ return text_embeddings
+
+
+def get_weighted_text_embeddings(
+ pipe: DiffusionPipeline,
+ prompt: Union[str, List[str]],
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
+ max_embeddings_multiples: Optional[int] = 3,
+ no_boseos_middle: Optional[bool] = False,
+ skip_parsing: Optional[bool] = False,
+ skip_weighting: Optional[bool] = False,
+):
+ r"""
+ Prompts can be assigned with local weights using brackets. For example,
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
- prompt_token_groups, prompt_weight_groups = group_tokens_and_weights(prompt_tokens.copy(), prompt_weights.copy())
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
- neg_prompt_token_groups, neg_prompt_weight_groups = group_tokens_and_weights(
- neg_prompt_tokens.copy(), neg_prompt_weights.copy()
- )
+ Args:
+ pipe (`DiffusionPipeline`):
+ Pipe to provide access to the tokenizer and the text encoder.
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ uncond_prompt (`str` or `List[str]`):
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
+ ending token in each of the chunk in the middle.
+ skip_parsing (`bool`, *optional*, defaults to `False`):
+ Skip the parsing of brackets.
+ skip_weighting (`bool`, *optional*, defaults to `False`):
+ Skip the weighting. When the parsing is skipped, it is forced True.
+ """
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
+ if isinstance(prompt, str):
+ prompt = [prompt]
+
+ if not skip_parsing:
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
+ if uncond_prompt is not None:
+ if isinstance(uncond_prompt, str):
+ uncond_prompt = [uncond_prompt]
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
+ else:
+ prompt_tokens = [
+ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
+ ]
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
+ if uncond_prompt is not None:
+ if isinstance(uncond_prompt, str):
+ uncond_prompt = [uncond_prompt]
+ uncond_tokens = [
+ token[1:-1]
+ for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
+ ]
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
- prompt_token_groups_2, prompt_weight_groups_2 = group_tokens_and_weights(
- prompt_tokens_2.copy(), prompt_weights_2.copy()
- )
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
+ max_length = max([len(token) for token in prompt_tokens])
+ if uncond_prompt is not None:
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
- neg_prompt_token_groups_2, neg_prompt_weight_groups_2 = group_tokens_and_weights(
- neg_prompt_tokens_2.copy(), neg_prompt_weights_2.copy()
+ max_embeddings_multiples = min(
+ max_embeddings_multiples,
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
)
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
- # get prompt embeddings one by one is not working.
- for i in range(len(prompt_token_groups)):
- # get positive prompt embeddings with weights
- token_tensor = torch.tensor([prompt_token_groups[i]], dtype=torch.long, device=device)
- weight_tensor = torch.tensor(prompt_weight_groups[i], dtype=torch.float16, device=device)
-
- token_tensor_2 = torch.tensor([prompt_token_groups_2[i]], dtype=torch.long, device=device)
-
- # use first text encoder
- prompt_embeds_1 = pipe.text_encoder(token_tensor.to(device), output_hidden_states=True)
-
- # use second text encoder
- prompt_embeds_2 = pipe.text_encoder_2(token_tensor_2.to(device), output_hidden_states=True)
- pooled_prompt_embeds = prompt_embeds_2[0]
-
- if clip_skip is None:
- prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-2]
- prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-2]
- else:
- # "2" because SDXL always indexes from the penultimate layer.
- prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-(clip_skip + 2)]
- prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-(clip_skip + 2)]
-
- prompt_embeds_list = [prompt_embeds_1_hidden_states, prompt_embeds_2_hidden_states]
- token_embedding = torch.concat(prompt_embeds_list, dim=-1).squeeze(0)
-
- for j in range(len(weight_tensor)):
- if weight_tensor[j] != 1.0:
- token_embedding[j] = (
- token_embedding[-1] + (token_embedding[j] - token_embedding[-1]) * weight_tensor[j]
- )
-
- token_embedding = token_embedding.unsqueeze(0)
- embeds.append(token_embedding)
-
- # get negative prompt embeddings with weights
- neg_token_tensor = torch.tensor([neg_prompt_token_groups[i]], dtype=torch.long, device=device)
- neg_token_tensor_2 = torch.tensor([neg_prompt_token_groups_2[i]], dtype=torch.long, device=device)
- neg_weight_tensor = torch.tensor(neg_prompt_weight_groups[i], dtype=torch.float16, device=device)
-
- # use first text encoder
- neg_prompt_embeds_1 = pipe.text_encoder(neg_token_tensor.to(device), output_hidden_states=True)
- neg_prompt_embeds_1_hidden_states = neg_prompt_embeds_1.hidden_states[-2]
-
- # use second text encoder
- neg_prompt_embeds_2 = pipe.text_encoder_2(neg_token_tensor_2.to(device), output_hidden_states=True)
- neg_prompt_embeds_2_hidden_states = neg_prompt_embeds_2.hidden_states[-2]
- negative_pooled_prompt_embeds = neg_prompt_embeds_2[0]
-
- neg_prompt_embeds_list = [neg_prompt_embeds_1_hidden_states, neg_prompt_embeds_2_hidden_states]
- neg_token_embedding = torch.concat(neg_prompt_embeds_list, dim=-1).squeeze(0)
-
- for z in range(len(neg_weight_tensor)):
- if neg_weight_tensor[z] != 1.0:
- neg_token_embedding[z] = (
- neg_token_embedding[-1] + (neg_token_embedding[z] - neg_token_embedding[-1]) * neg_weight_tensor[z]
- )
-
- neg_token_embedding = neg_token_embedding.unsqueeze(0)
- neg_embeds.append(neg_token_embedding)
-
- prompt_embeds = torch.cat(embeds, dim=1)
- negative_prompt_embeds = torch.cat(neg_embeds, dim=1)
-
- bs_embed, seq_len, _ = prompt_embeds.shape
- # duplicate text embeddings for each generation per prompt, using mps friendly method
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
- seq_len = negative_prompt_embeds.shape[1]
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
- negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view(
- bs_embed * num_images_per_prompt, -1
+ # pad the length of tokens and weights
+ bos = pipe.tokenizer.bos_token_id
+ eos = pipe.tokenizer.eos_token_id
+ pad = getattr(pipe.tokenizer, "pad_token_id", eos)
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
+ prompt_tokens,
+ prompt_weights,
+ max_length,
+ bos,
+ eos,
+ pad,
+ no_boseos_middle=no_boseos_middle,
+ chunk_length=pipe.tokenizer.model_max_length,
)
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view(
- bs_embed * num_images_per_prompt, -1
+ prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
+ if uncond_prompt is not None:
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
+ uncond_tokens,
+ uncond_weights,
+ max_length,
+ bos,
+ eos,
+ pad,
+ no_boseos_middle=no_boseos_middle,
+ chunk_length=pipe.tokenizer.model_max_length,
+ )
+ uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
+
+ # get the embeddings
+ text_embeddings = get_unweighted_text_embeddings(
+ pipe,
+ prompt_tokens,
+ pipe.tokenizer.model_max_length,
+ no_boseos_middle=no_boseos_middle,
)
-
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
-
-
-# -------------------------------------------------------------------------------------------------------------------------------
-# reuse the backbone code from StableDiffusionXLPipeline
-# -------------------------------------------------------------------------------------------------------------------------------
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-EXAMPLE_DOC_STRING = """
- Examples:
- ```py
- from diffusers import DiffusionPipeline
- import torch
-
- pipe = DiffusionPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-base-1.0"
- , torch_dtype = torch.float16
- , use_safetensors = True
- , variant = "fp16"
- , custom_pipeline = "lpw_stable_diffusion_xl",
+ prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
+ if uncond_prompt is not None:
+ uncond_embeddings = get_unweighted_text_embeddings(
+ pipe,
+ uncond_tokens,
+ pipe.tokenizer.model_max_length,
+ no_boseos_middle=no_boseos_middle,
)
+ uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
+
+ # assign weights to the prompts and normalize in the sense of mean
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
+ if (not skip_parsing) and (not skip_weighting):
+ previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
+ text_embeddings *= prompt_weights.unsqueeze(-1)
+ current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
+ text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
+ if uncond_prompt is not None:
+ previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
+ uncond_embeddings *= uncond_weights.unsqueeze(-1)
+ current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
+ uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
+
+ if uncond_prompt is not None:
+ return text_embeddings, uncond_embeddings
+ return text_embeddings, None
+
+
+def preprocess_image(image, batch_size):
+ w, h = image.size
+ w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
+ image = np.array(image).astype(np.float32) / 255.0
+ image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
+ image = torch.from_numpy(image)
+ return 2.0 * image - 1.0
+
+
+def preprocess_mask(mask, batch_size, scale_factor=8):
+ if not isinstance(mask, torch.FloatTensor):
+ mask = mask.convert("L")
+ w, h = mask.size
+ w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
+ mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
+ mask = np.array(mask).astype(np.float32) / 255.0
+ mask = np.tile(mask, (4, 1, 1))
+ mask = np.vstack([mask[None]] * batch_size)
+ mask = 1 - mask # repaint white, keep black
+ mask = torch.from_numpy(mask)
+ return mask
- prompt = "a white cat running on the grass"*20
- prompt2 = "play a football"*20
- prompt = f"{prompt},{prompt2}"
- neg_prompt = "blur, low quality"
-
- pipe.to("cuda")
- images = pipe(
- prompt = prompt
- , negative_prompt = neg_prompt
- ).images[0]
-
- pipe.to("cpu")
- torch.cuda.empty_cache()
- images
- ```
-"""
-
-
-# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
-def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
- """
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
- """
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
- # rescale the results from guidance (fixes overexposure)
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
- return noise_cfg
-
-
-# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
-def retrieve_latents(
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
-):
- if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
- return encoder_output.latent_dist.sample(generator)
- elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
- return encoder_output.latent_dist.mode()
- elif hasattr(encoder_output, "latents"):
- return encoder_output.latents
else:
- raise AttributeError("Could not access latents of provided encoder_output")
-
-
-# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
-def retrieve_timesteps(
- scheduler,
- num_inference_steps: Optional[int] = None,
- device: Optional[Union[str, torch.device]] = None,
- timesteps: Optional[List[int]] = None,
- **kwargs,
-):
- """
- Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
- custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
-
- Args:
- scheduler (`SchedulerMixin`):
- The scheduler to get timesteps from.
- num_inference_steps (`int`):
- The number of diffusion steps used when generating samples with a pre-trained model. If used,
- `timesteps` must be `None`.
- device (`str` or `torch.device`, *optional*):
- The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
- timesteps (`List[int]`, *optional*):
- Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
- timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
- must be `None`.
-
- Returns:
- `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
- second element is the number of inference steps.
- """
- if timesteps is not None:
- accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
- if not accepts_timesteps:
+ valid_mask_channel_sizes = [1, 3]
+ # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W)
+ if mask.shape[3] in valid_mask_channel_sizes:
+ mask = mask.permute(0, 3, 1, 2)
+ elif mask.shape[1] not in valid_mask_channel_sizes:
raise ValueError(
- f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
- f" timestep schedules. Please check whether you are using the correct scheduler."
+ f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension,"
+ f" but received mask of shape {tuple(mask.shape)}"
)
- scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
- timesteps = scheduler.timesteps
- num_inference_steps = len(timesteps)
- else:
- scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
- timesteps = scheduler.timesteps
- return timesteps, num_inference_steps
+ # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape
+ mask = mask.mean(dim=1, keepdim=True)
+ h, w = mask.shape[-2:]
+ h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8
+ mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor))
+ return mask
-class SDXLLongPromptWeightingPipeline(
- DiffusionPipeline, FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
+class StableDiffusionLongPromptWeightingPipeline(
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
):
r"""
- Pipeline for text-to-image generation using Stable Diffusion XL.
+ Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
+ weighting in prompt.
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
-
- The pipeline also inherits the following loading methods:
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
- - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
- Frozen text-encoder. Stable Diffusion XL uses the text portion of
+ Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
- text_encoder_2 ([` CLIPTextModelWithProjection`]):
- Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
- specifically the
- [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
- variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- tokenizer_2 (`CLIPTokenizer`):
- Second Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- unet ([`UNet2DConditionModel`]):
- Conditional U-Net architecture to denoise the encoded image latents.
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
+ safety_checker ([`StableDiffusionSafetyChecker`]):
+ Classification module that estimates whether generated images could be considered offensive or harmful.
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
+ feature_extractor ([`CLIPImageProcessor`]):
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
- model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
- _optional_components = [
- "tokenizer",
- # "tokenizer_2",
- "text_encoder",
- # "text_encoder_2",
- "image_encoder",
- "feature_extractor",
- ]
- _callback_tensor_inputs = [
- "latents",
- "prompt_embeds",
- "negative_prompt_embeds",
- "add_text_embeds",
- "add_time_ids",
- "negative_pooled_prompt_embeds",
- "negative_add_time_ids",
- ]
+ _optional_components = ["safety_checker", "feature_extractor"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
- # text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
- # tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
- feature_extractor: Optional[CLIPImageProcessor] = None,
- image_encoder: Optional[CLIPVisionModelWithProjection] = None,
- force_zeros_for_empty_prompt: bool = True,
- add_watermarker: Optional[bool] = None,
+ safety_checker: StableDiffusionSafetyChecker,
+ feature_extractor: CLIPImageProcessor,
+ requires_safety_checker: bool = True,
):
super().__init__()
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
+ deprecation_message = (
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
+ " file"
+ )
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
+ new_config = dict(scheduler.config)
+ new_config["steps_offset"] = 1
+ scheduler._internal_dict = FrozenDict(new_config)
+
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
+ deprecation_message = (
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
+ )
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
+ new_config = dict(scheduler.config)
+ new_config["clip_sample"] = False
+ scheduler._internal_dict = FrozenDict(new_config)
+
+ if safety_checker is None and requires_safety_checker:
+ logger.warning(
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
+ )
+
+ if safety_checker is not None and feature_extractor is None:
+ raise ValueError(
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
+ )
+
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
+ version.parse(unet.config._diffusers_version).base_version
+ ) < version.parse("0.9.0.dev0")
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
+ deprecation_message = (
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
+ " the `unet/config.json` file"
+ )
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
+ new_config = dict(unet.config)
+ new_config["sample_size"] = 64
+ unet._internal_dict = FrozenDict(new_config)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
- text_encoder_2=text_encoder,
tokenizer=tokenizer,
- tokenizer_2=tokenizer,
unet=unet,
scheduler=scheduler,
+ safety_checker=safety_checker,
feature_extractor=feature_extractor,
- image_encoder=image_encoder,
)
- self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
- self.mask_processor = VaeImageProcessor(
- vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
+ self.register_to_config(
+ requires_safety_checker=requires_safety_checker,
)
- self.default_sample_size = self.unet.config.sample_size
-
- add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
- if add_watermarker:
- self.watermark = StableDiffusionXLWatermarker()
- else:
- self.watermark = None
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
+ Enable sliced VAE decoding.
+
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
+ steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
- Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
- compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
- processing larger images.
+ Enable tiled VAE decoding.
+
+ When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
+ several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
"""
self.vae.enable_tiling()
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
+ def enable_sequential_cpu_offload(self, gpu_id=0):
+ r"""
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
+ `enable_model_cpu_offload`, but performance is lower.
+ """
+ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
+ from accelerate import cpu_offload
+ else:
+ raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
+
+ device = torch.device(f"cuda:{gpu_id}")
+
+ if self.device.type != "cpu":
+ self.to("cpu", silence_dtype_warnings=True)
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
+
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
+ cpu_offload(cpu_offloaded_model, device)
+
+ if self.safety_checker is not None:
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
@@ -701,80 +612,64 @@ class SDXLLongPromptWeightingPipeline(
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
- model_sequence = (
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
- )
- model_sequence.extend([self.unet, self.vae])
-
hook = None
- for cpu_offloaded_model in model_sequence:
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
+ if self.safety_checker is not None:
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
+
# We'll offload the last model manually.
self.final_offload_hook = hook
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
+ @property
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
+ def _execution_device(self):
+ r"""
+ Returns the device on which the pipeline's models will be executed. After calling
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
+ hooks.
+ """
+ if not hasattr(self.unet, "_hf_hook"):
+ return self.device
+ for module in self.unet.modules():
+ if (
+ hasattr(module, "_hf_hook")
+ and hasattr(module._hf_hook, "execution_device")
+ and module._hf_hook.execution_device is not None
+ ):
+ return torch.device(module._hf_hook.execution_device)
+ return self.device
+
def encode_prompt(
self,
- prompt: str,
- prompt_2: Optional[str] = None,
- device: Optional[torch.device] = None,
- num_images_per_prompt: int = 1,
- do_classifier_free_guidance: bool = True,
- negative_prompt: Optional[str] = None,
- negative_prompt_2: Optional[str] = None,
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt=None,
+ max_embeddings_multiples=10,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
- lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
- prompt (`str` or `List[str]`, *optional*):
+ prompt (`str` or `list(int)`):
prompt to be encoded
- prompt_2 (`str` or `List[str]`, *optional*):
- The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
- used in both text-encoders
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
- less than `1`).
- negative_prompt_2 (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
- provided, text embeddings will be generated from `prompt` input argument.
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
- argument.
- pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
- negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
- input argument.
- lora_scale (`float`, *optional*):
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
+ negative_prompt (`str` or `List[str]`):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
"""
- device = device or self._execution_device
-
- # set lora scale so that monkey patched LoRA
- # function of text encoder can correctly access it
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
- self._lora_scale = lora_scale
-
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
@@ -782,187 +677,57 @@ class SDXLLongPromptWeightingPipeline(
else:
batch_size = prompt_embeds.shape[0]
- # Define tokenizers and text encoders
- tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
- text_encoders = (
- [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
- )
-
- if prompt_embeds is None:
- prompt_2 = prompt_2 or prompt
- # textual inversion: procecss multi-vector tokens if necessary
- prompt_embeds_list = []
- prompts = [prompt, prompt_2]
- for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
- if isinstance(self, TextualInversionLoaderMixin):
- prompt = self.maybe_convert_prompt(prompt, tokenizer)
-
- text_inputs = tokenizer(
- prompt,
- padding="max_length",
- max_length=tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
-
- text_input_ids = text_inputs.input_ids
- untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
- text_input_ids, untruncated_ids
- ):
- removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {tokenizer.model_max_length} tokens: {removed_text}"
- )
-
- prompt_embeds = text_encoder(
- text_input_ids.to(device),
- output_hidden_states=True,
- )
-
- # We are only ALWAYS interested in the pooled output of the final text encoder
- pooled_prompt_embeds = prompt_embeds[0]
- prompt_embeds = prompt_embeds.hidden_states[-2]
-
- prompt_embeds_list.append(prompt_embeds)
-
- prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
-
- # get unconditional embeddings for classifier free guidance
- zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
- if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
- negative_prompt_embeds = torch.zeros_like(prompt_embeds)
- negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
- elif do_classifier_free_guidance and negative_prompt_embeds is None:
- negative_prompt = negative_prompt or ""
- negative_prompt_2 = negative_prompt_2 or negative_prompt
-
- uncond_tokens: List[str]
- if prompt is not None and type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
+ if negative_prompt_embeds is None:
+ if negative_prompt is None:
+ negative_prompt = [""] * batch_size
elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt, negative_prompt_2]
- elif batch_size != len(negative_prompt):
+ negative_prompt = [negative_prompt] * batch_size
+ if batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
- else:
- uncond_tokens = [negative_prompt, negative_prompt_2]
-
- negative_prompt_embeds_list = []
- for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
- if isinstance(self, TextualInversionLoaderMixin):
- negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
-
- max_length = prompt_embeds.shape[1]
- uncond_input = tokenizer(
- negative_prompt,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="pt",
- )
-
- negative_prompt_embeds = text_encoder(
- uncond_input.input_ids.to(device),
- output_hidden_states=True,
- )
- # We are only ALWAYS interested in the pooled output of the final text encoder
- negative_pooled_prompt_embeds = negative_prompt_embeds[0]
- negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
-
- negative_prompt_embeds_list.append(negative_prompt_embeds)
-
- negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
+ if prompt_embeds is None or negative_prompt_embeds is None:
+ if isinstance(self, TextualInversionLoaderMixin):
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer)
+
+ prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings(
+ pipe=self,
+ prompt=prompt,
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
+ max_embeddings_multiples=max_embeddings_multiples,
+ )
+ if prompt_embeds is None:
+ prompt_embeds = prompt_embeds1
+ if negative_prompt_embeds is None:
+ negative_prompt_embeds = negative_prompt_embeds1
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_classifier_free_guidance:
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = negative_prompt_embeds.shape[1]
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
+ bs_embed, seq_len, _ = negative_prompt_embeds.shape
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
-
- pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
- bs_embed * num_images_per_prompt, -1
- )
- if do_classifier_free_guidance:
- negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
- bs_embed * num_images_per_prompt, -1
- )
-
- return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
- def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
- dtype = next(self.image_encoder.parameters()).dtype
-
- if not isinstance(image, torch.Tensor):
- image = self.feature_extractor(image, return_tensors="pt").pixel_values
-
- image = image.to(device=device, dtype=dtype)
- if output_hidden_states:
- image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
- image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
- uncond_image_enc_hidden_states = self.image_encoder(
- torch.zeros_like(image), output_hidden_states=True
- ).hidden_states[-2]
- uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
- num_images_per_prompt, dim=0
- )
- return image_enc_hidden_states, uncond_image_enc_hidden_states
- else:
- image_embeds = self.image_encoder(image).image_embeds
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
- uncond_image_embeds = torch.zeros_like(image_embeds)
-
- return image_embeds, uncond_image_embeds
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
- def prepare_extra_step_kwargs(self, generator, eta):
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
-
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
+ negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
- return extra_step_kwargs
+ return prompt_embeds
def check_inputs(
self,
prompt,
- prompt_2,
height,
width,
strength,
callback_steps,
negative_prompt=None,
- negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
- pooled_prompt_embeds=None,
- negative_pooled_prompt_embeds=None,
- callback_on_step_end_tensor_inputs=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
@@ -970,48 +735,31 @@ class SDXLLongPromptWeightingPipeline(
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
- if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
+ if (callback_steps is None) or (
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
+ ):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
- if callback_on_step_end_tensor_inputs is not None and not all(
- k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
- ):
- raise ValueError(
- f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
- )
-
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
- elif prompt_2 is not None and prompt_embeds is not None:
- raise ValueError(
- f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
- " only forward one of the two."
- )
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
- elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
- raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
- elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
- raise ValueError(
- f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
- )
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
@@ -1021,163 +769,69 @@ class SDXLLongPromptWeightingPipeline(
f" {negative_prompt_embeds.shape}."
)
- if prompt_embeds is not None and pooled_prompt_embeds is None:
- raise ValueError(
- "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
- )
-
- if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
- raise ValueError(
- "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
- )
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
- def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
- r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
-
- The suffixes after the scaling factors represent the stages where they are being applied.
-
- Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
- that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
-
- Args:
- s1 (`float`):
- Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
- mitigate "oversmoothing effect" in the enhanced denoising process.
- s2 (`float`):
- Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
- mitigate "oversmoothing effect" in the enhanced denoising process.
- b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
- b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
- """
- if not hasattr(self, "unet"):
- raise ValueError("The pipeline must have `unet` for using FreeU.")
- self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
- def disable_freeu(self):
- """Disables the FreeU mechanism if enabled."""
- self.unet.disable_freeu()
-
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
- def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
- """
- Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
- key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
-
-
-
- This API is 🧪 experimental.
-
-
-
- Args:
- unet (`bool`, defaults to `True`): To apply fusion on the UNet.
- vae (`bool`, defaults to `True`): To apply fusion on the VAE.
- """
- self.fusing_unet = False
- self.fusing_vae = False
-
- if unet:
- self.fusing_unet = True
- self.unet.fuse_qkv_projections()
- self.unet.set_attn_processor(FusedAttnProcessor2_0())
-
- if vae:
- if not isinstance(self.vae, AutoencoderKL):
- raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
-
- self.fusing_vae = True
- self.vae.fuse_qkv_projections()
- self.vae.set_attn_processor(FusedAttnProcessor2_0())
-
- # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
- def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
- """Disable QKV projection fusion if enabled.
-
-
-
- This API is 🧪 experimental.
-
-
-
- Args:
- unet (`bool`, defaults to `True`): To apply fusion on the UNet.
- vae (`bool`, defaults to `True`): To apply fusion on the VAE.
-
- """
- if unet:
- if not self.fusing_unet:
- logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
- else:
- self.unet.unfuse_qkv_projections()
- self.fusing_unet = False
-
- if vae:
- if not self.fusing_vae:
- logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
- else:
- self.vae.unfuse_qkv_projections()
- self.fusing_vae = False
-
- def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
- # get the original timestep using init_timestep
- if denoising_start is None:
+ def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
+ if is_text2img:
+ return self.scheduler.timesteps.to(device), num_inference_steps
+ else:
+ # get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
+
t_start = max(num_inference_steps - init_timestep, 0)
- else:
- t_start = 0
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
+ return timesteps, num_inference_steps - t_start
- # Strength is irrelevant if we directly request a timestep to start at;
- # that is, strength is determined by the denoising_start instead.
- if denoising_start is not None:
- discrete_timestep_cutoff = int(
- round(
- self.scheduler.config.num_train_timesteps
- - (denoising_start * self.scheduler.config.num_train_timesteps)
- )
+ def run_safety_checker(self, image, device, dtype):
+ if self.safety_checker is not None:
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
+ image, has_nsfw_concept = self.safety_checker(
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
+ else:
+ has_nsfw_concept = None
+ return image, has_nsfw_concept
+
+ def decode_latents(self, latents):
+ latents = 1 / self.vae.config.scaling_factor * latents
+ image = self.vae.decode(latents).sample
+ image = (image / 2 + 0.5).clamp(0, 1)
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
+ return image
- num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
- if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
- # if the scheduler is a 2nd order scheduler we might have to do +1
- # because `num_inference_steps` might be even given that every timestep
- # (except the highest one) is duplicated. If `num_inference_steps` is even it would
- # mean that we cut the timesteps in the middle of the denoising step
- # (between 1st and 2nd devirative) which leads to incorrect results. By adding 1
- # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
- num_inference_steps = num_inference_steps + 1
+ def prepare_extra_step_kwargs(self, generator, eta):
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
+ # and should be between [0, 1]
- # because t_n+1 >= t_n, we slice the timesteps starting from the end
- timesteps = timesteps[-num_inference_steps:]
- return timesteps, num_inference_steps
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ extra_step_kwargs = {}
+ if accepts_eta:
+ extra_step_kwargs["eta"] = eta
- return timesteps, num_inference_steps - t_start
+ # check if the scheduler accepts generator
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ if accepts_generator:
+ extra_step_kwargs["generator"] = generator
+ return extra_step_kwargs
def prepare_latents(
self,
image,
- mask,
- width,
- height,
- num_channels_latents,
timestep,
- batch_size,
num_images_per_prompt,
+ batch_size,
+ num_channels_latents,
+ height,
+ width,
dtype,
device,
- generator=None,
- add_noise=True,
+ generator,
latents=None,
- is_strength_max=True,
- return_noise=False,
- return_image_latents=False,
):
- batch_size *= num_images_per_prompt
-
if image is None:
+ batch_size = batch_size * num_images_per_prompt
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
@@ -1192,397 +846,91 @@ class SDXLLongPromptWeightingPipeline(
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
- return latents
-
- elif mask is None:
- if not isinstance(image, (torch.Tensor, Image.Image, list)):
- raise ValueError(
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
- )
-
- # Offload text encoder if `enable_model_cpu_offload` was enabled
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
- self.text_encoder_2.to("cpu")
- torch.cuda.empty_cache()
-
- image = image.to(device=device, dtype=dtype)
-
- if image.shape[1] == 4:
- init_latents = image
-
- else:
- # make sure the VAE is in float32 mode, as it overflows in float16
- if self.vae.config.force_upcast:
- image = image.float()
- self.vae.to(dtype=torch.float32)
-
- if isinstance(generator, list) and len(generator) != batch_size:
- raise ValueError(
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
- )
-
- elif isinstance(generator, list):
- init_latents = [
- retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
- for i in range(batch_size)
- ]
- init_latents = torch.cat(init_latents, dim=0)
- else:
- init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
-
- if self.vae.config.force_upcast:
- self.vae.to(dtype)
-
- init_latents = init_latents.to(dtype)
- init_latents = self.vae.config.scaling_factor * init_latents
-
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
- # expand init_latents for batch_size
- additional_image_per_prompt = batch_size // init_latents.shape[0]
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
- raise ValueError(
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
- )
- else:
- init_latents = torch.cat([init_latents], dim=0)
-
- if add_noise:
- shape = init_latents.shape
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
- # get latents
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
-
- latents = init_latents
- return latents
-
+ return latents, None, None
else:
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
- if isinstance(generator, list) and len(generator) != batch_size:
- raise ValueError(
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
- )
-
- if (image is None or timestep is None) and not is_strength_max:
- raise ValueError(
- "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
- "However, either the image or the noise timestep has not been provided."
- )
-
- if image.shape[1] == 4:
- image_latents = image.to(device=device, dtype=dtype)
- image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
- elif return_image_latents or (latents is None and not is_strength_max):
- image = image.to(device=device, dtype=dtype)
- image_latents = self._encode_vae_image(image=image, generator=generator)
- image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
-
- if latents is None and add_noise:
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
- # if strength is 1. then initialise the latents to noise, else initial to image + noise
- latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
- # if pure noise then scale the initial latents by the Scheduler's init sigma
- latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
- elif add_noise:
- noise = latents.to(device)
- latents = noise * self.scheduler.init_noise_sigma
- else:
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
- latents = image_latents.to(device)
-
- outputs = (latents,)
-
- if return_noise:
- outputs += (noise,)
-
- if return_image_latents:
- outputs += (image_latents,)
-
- return outputs
-
- def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
- dtype = image.dtype
- if self.vae.config.force_upcast:
- image = image.float()
- self.vae.to(dtype=torch.float32)
-
- if isinstance(generator, list):
- image_latents = [
- retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
- for i in range(image.shape[0])
- ]
- image_latents = torch.cat(image_latents, dim=0)
- else:
- image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
-
- if self.vae.config.force_upcast:
- self.vae.to(dtype)
-
- image_latents = image_latents.to(dtype)
- image_latents = self.vae.config.scaling_factor * image_latents
-
- return image_latents
-
- def prepare_mask_latents(
- self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
- ):
- # resize the mask to latents shape as we concatenate the mask to the latents
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
- # and half precision
- mask = torch.nn.functional.interpolate(
- mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
- )
- mask = mask.to(device=device, dtype=dtype)
-
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
- if mask.shape[0] < batch_size:
- if not batch_size % mask.shape[0] == 0:
- raise ValueError(
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
- f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
- " of masks that you pass is divisible by the total requested batch size."
- )
- mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
-
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
-
- if masked_image is not None and masked_image.shape[1] == 4:
- masked_image_latents = masked_image
- else:
- masked_image_latents = None
-
- if masked_image is not None:
- if masked_image_latents is None:
- masked_image = masked_image.to(device=device, dtype=dtype)
- masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
-
- if masked_image_latents.shape[0] < batch_size:
- if not batch_size % masked_image_latents.shape[0] == 0:
- raise ValueError(
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
- " Make sure the number of images that you pass is divisible by the total requested batch size."
- )
- masked_image_latents = masked_image_latents.repeat(
- batch_size // masked_image_latents.shape[0], 1, 1, 1
- )
-
- masked_image_latents = (
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
- )
-
- # aligning device to prevent device errors when concating it with the latent model input
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
-
- return mask, masked_image_latents
-
- def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
- add_time_ids = list(original_size + crops_coords_top_left + target_size)
-
- passed_add_embed_dim = (
- self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
- )
- expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
-
- if expected_add_embed_dim != passed_add_embed_dim:
- raise ValueError(
- f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
- )
-
- add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
- return add_time_ids
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
- def upcast_vae(self):
- dtype = self.vae.dtype
- self.vae.to(dtype=torch.float32)
- use_torch_2_0_or_xformers = isinstance(
- self.vae.decoder.mid_block.attentions[0].processor,
- (
- AttnProcessor2_0,
- XFormersAttnProcessor,
- LoRAXFormersAttnProcessor,
- LoRAAttnProcessor2_0,
- ),
- )
- # if xformers or torch_2_0 is used attention block does not need
- # to be in float32 which can save lots of memory
- if use_torch_2_0_or_xformers:
- self.vae.post_quant_conv.to(dtype)
- self.vae.decoder.conv_in.to(dtype)
- self.vae.decoder.mid_block.to(dtype)
-
- # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
- def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
- """
- See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
-
- Args:
- timesteps (`torch.Tensor`):
- generate embedding vectors at these timesteps
- embedding_dim (`int`, *optional*, defaults to 512):
- dimension of the embeddings to generate
- dtype:
- data type of the generated embeddings
-
- Returns:
- `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
- """
- assert len(w.shape) == 1
- w = w * 1000.0
-
- half_dim = embedding_dim // 2
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
- emb = w.to(dtype)[:, None] * emb[None, :]
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
- if embedding_dim % 2 == 1: # zero pad
- emb = torch.nn.functional.pad(emb, (0, 1))
- assert emb.shape == (w.shape[0], embedding_dim)
- return emb
-
- @property
- def guidance_scale(self):
- return self._guidance_scale
-
- @property
- def guidance_rescale(self):
- return self._guidance_rescale
-
- @property
- def clip_skip(self):
- return self._clip_skip
-
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- @property
- def do_classifier_free_guidance(self):
- return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
-
- @property
- def cross_attention_kwargs(self):
- return self._cross_attention_kwargs
+ image = image.to(device=self.device, dtype=dtype)
+ init_latent_dist = self.vae.encode(image).latent_dist
+ init_latents = init_latent_dist.sample(generator=generator)
+ init_latents = self.vae.config.scaling_factor * init_latents
- @property
- def denoising_end(self):
- return self._denoising_end
+ # Expand init_latents for batch_size and num_images_per_prompt
+ init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
+ init_latents_orig = init_latents
- @property
- def denoising_start(self):
- return self._denoising_start
-
- @property
- def num_timesteps(self):
- return self._num_timesteps
+ # add noise to latents using the timesteps
+ noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype)
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
+ latents = init_latents
+ return latents, init_latents_orig, noise
@torch.no_grad()
- @replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
- prompt: str = None,
- prompt_2: Optional[str] = None,
- image: Optional[PipelineImageInput] = None,
- mask_image: Optional[PipelineImageInput] = None,
- masked_image_latents: Optional[torch.FloatTensor] = None,
- height: Optional[int] = None,
- width: Optional[int] = None,
- strength: float = 0.8,
+ prompt: Union[str, List[str]],
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
+ height: int = 512,
+ width: int = 512,
num_inference_steps: int = 50,
- timesteps: List[int] = None,
- denoising_start: Optional[float] = None,
- denoising_end: Optional[float] = None,
- guidance_scale: float = 5.0,
- negative_prompt: Optional[str] = None,
- negative_prompt_2: Optional[str] = None,
+ guidance_scale: float = 7.5,
+ strength: float = 0.8,
num_images_per_prompt: Optional[int] = 1,
+ add_predicted_noise: Optional[bool] = False,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
- ip_adapter_image: Optional[PipelineImageInput] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
+ max_embeddings_multiples: Optional[int] = 3,
output_type: Optional[str] = "pil",
return_dict: bool = True,
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
+ callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- guidance_rescale: float = 0.0,
- original_size: Optional[Tuple[int, int]] = None,
- crops_coords_top_left: Tuple[int, int] = (0, 0),
- target_size: Optional[Tuple[int, int]] = None,
- clip_skip: Optional[int] = None,
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
- **kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
- prompt (`str`):
- The prompt to guide the image generation. If not defined, one has to pass `prompt_embeds`.
- instead.
- prompt_2 (`str`):
- The prompt to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
- used in both text-encoders
- image (`PipelineImageInput`, *optional*):
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
`Image`, or tensor representing an image batch, that will be used as the starting point for the
process.
- mask_image (`PipelineImageInput`, *optional*):
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
+ height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
+ width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
- strength (`float`, *optional*, defaults to 0.8):
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
- `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
- number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
- noise will be maximum and the denoising process will run for the full number of iterations specified in
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
- timesteps (`List[int]`, *optional*):
- Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
- in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
- passed will be used. Must be in descending order.
- denoising_start (`float`, *optional*):
- When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
- bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
- it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
- strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
- is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image
- Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality).
- denoising_end (`float`, *optional*):
- When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
- completed before it is intentionally prematurely terminated. As a result, the returned sample will
- still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
- denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
- final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
- forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refine Image
- Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality).
- guidance_scale (`float`, *optional*, defaults to 5.0):
+ guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
- negative_prompt (`str`):
- The prompt not to guide the image generation. If not defined, one has to pass
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
- less than `1`).
- negative_prompt_2 (`str`):
- The prompt not to guide the image generation to be sent to `tokenizer_2` and
- `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
+ strength (`float`, *optional*, defaults to 0.8):
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
+ add_predicted_noise (`bool`, *optional*, defaults to True):
+ Use predicted noise instead of random noise when constructing noisy versions of the original image in
+ the reverse diffusion process
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
@@ -1593,8 +941,6 @@ class SDXLLongPromptWeightingPipeline(
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
- ip_adapter_image: (`PipelineImageInput`, *optional*):
- Optional image input to work with IP Adapters.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
@@ -1602,110 +948,45 @@ class SDXLLongPromptWeightingPipeline(
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
- pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
- If not provided, pooled text embeddings will be generated from `prompt` input argument.
- negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
- input argument.
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
- of a plain tuple.
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
+ plain tuple.
+ callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. The function will be
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ is_cancelled_callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. If the function returns
+ `True`, the inference will be cancelled.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
+ called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
- guidance_rescale (`float`, *optional*, defaults to 0.0):
- Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
- Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
- [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
- Guidance rescale factor should fix overexposure when using zero terminal SNR.
- original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
- If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
- `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
- explained in section 2.2 of
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
- crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
- `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
- `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
- `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
- [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
- target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
- For most cases, `target_size` should be set to the desired height and width of the generated image. If
- not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
- section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
- clip_skip (`int`, *optional*):
- Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
- the output of the pre-final layer will be used for computing the prompt embeddings.
- callback_on_step_end (`Callable`, *optional*):
- A function that calls at the end of each denoising steps during the inference. The function is called
- with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
- callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
- `callback_on_step_end_tensor_inputs`.
- callback_on_step_end_tensor_inputs (`List`, *optional*):
- The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
- will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
- `._callback_tensor_inputs` attribute of your pipeine class.
-
- Examples:
Returns:
- [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
- [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
- `tuple`. When returning a tuple, the first element is a list with the generated images.
+ `None` if cancelled by `is_cancelled_callback`,
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
+ (nsfw) content, according to the `safety_checker`.
"""
-
- callback = kwargs.pop("callback", None)
- callback_steps = kwargs.pop("callback_steps", None)
-
- if callback is not None:
- deprecate(
- "callback",
- "1.0.0",
- "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
- )
- if callback_steps is not None:
- deprecate(
- "callback_steps",
- "1.0.0",
- "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
- )
-
# 0. Default height and width to unet
- height = height or self.default_sample_size * self.vae_scale_factor
- width = width or self.default_sample_size * self.vae_scale_factor
-
- original_size = original_size or (height, width)
- target_size = target_size or (height, width)
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs. Raise error if not correct
self.check_inputs(
- prompt,
- prompt_2,
- height,
- width,
- strength,
- callback_steps,
- negative_prompt,
- negative_prompt_2,
- prompt_embeds,
- negative_prompt_embeds,
- pooled_prompt_embeds,
- negative_pooled_prompt_embeds,
- callback_on_step_end_tensor_inputs,
+ prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
)
- self._guidance_scale = guidance_scale
- self._guidance_rescale = guidance_rescale
- self._clip_skip = clip_skip
- self._cross_attention_kwargs = cross_attention_kwargs
- self._denoising_end = denoising_end
- self._denoising_start = denoising_start
-
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
@@ -1715,606 +996,477 @@ class SDXLLongPromptWeightingPipeline(
batch_size = prompt_embeds.shape[0]
device = self._execution_device
-
- if ip_adapter_image is not None:
- output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
- image_embeds, negative_image_embeds = self.encode_image(
- ip_adapter_image, device, num_images_per_prompt, output_hidden_state
- )
- if self.do_classifier_free_guidance:
- image_embeds = torch.cat([negative_image_embeds, image_embeds])
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
- (self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None)
-
- negative_prompt = negative_prompt if negative_prompt is not None else ""
-
- (
- prompt_embeds,
- negative_prompt_embeds,
- pooled_prompt_embeds,
- negative_pooled_prompt_embeds,
- ) = get_weighted_text_embeddings_sdxl(
- pipe=self,
- prompt=prompt,
- neg_prompt=negative_prompt,
- num_images_per_prompt=num_images_per_prompt,
- clip_skip=clip_skip,
+ prompt_embeds = self.encode_prompt(
+ prompt,
+ device,
+ num_images_per_prompt,
+ do_classifier_free_guidance,
+ negative_prompt,
+ max_embeddings_multiples,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
)
dtype = prompt_embeds.dtype
- if isinstance(image, Image.Image):
- image = self.image_processor.preprocess(image, height=height, width=width)
+ # 4. Preprocess image and mask
+ if isinstance(image, PIL.Image.Image):
+ image = preprocess_image(image, batch_size)
if image is not None:
image = image.to(device=self.device, dtype=dtype)
-
- if isinstance(mask_image, Image.Image):
- mask = self.mask_processor.preprocess(mask_image, height=height, width=width)
- else:
- mask = mask_image
+ if isinstance(mask_image, PIL.Image.Image):
+ mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor)
if mask_image is not None:
- mask = mask.to(device=self.device, dtype=dtype)
-
- if masked_image_latents is not None:
- masked_image = masked_image_latents
- elif image.shape[1] == 4:
- # if image is in latent space, we can't mask it
- masked_image = None
- else:
- masked_image = image * (mask < 0.5)
+ mask = mask_image.to(device=self.device, dtype=dtype)
+ mask = torch.cat([mask] * num_images_per_prompt)
else:
mask = None
- # 4. Prepare timesteps
- def denoising_value_valid(dnv):
- return isinstance(self.denoising_end, float) and 0 < dnv < 1
-
- timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
- if image is not None:
- timesteps, num_inference_steps = self.get_timesteps(
- num_inference_steps,
- strength,
- device,
- denoising_start=self.denoising_start if denoising_value_valid else None,
- )
-
- # check that number of inference steps is not < 1 - as this doesn't make sense
- if num_inference_steps < 1:
- raise ValueError(
- f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
- f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
- )
-
+ # 5. set timesteps
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
- is_strength_max = strength == 1.0
- add_noise = True if self.denoising_start is None else False
-
- # 5. Prepare latent variables
- num_channels_latents = self.vae.config.latent_channels
- num_channels_unet = self.unet.config.in_channels
- return_image_latents = num_channels_unet == 4
- latents = self.prepare_latents(
- image=image,
- mask=mask,
- width=width,
- height=height,
- num_channels_latents=num_channels_unet,
- timestep=latent_timestep,
- batch_size=batch_size,
- num_images_per_prompt=num_images_per_prompt,
- dtype=prompt_embeds.dtype,
- device=device,
- generator=generator,
- add_noise=add_noise,
- latents=latents,
- is_strength_max=is_strength_max,
- return_noise=True,
- return_image_latents=return_image_latents,
+ # 6. Prepare latent variables
+ latents, init_latents_orig, noise = self.prepare_latents(
+ image,
+ latent_timestep,
+ num_images_per_prompt,
+ batch_size,
+ self.unet.config.in_channels,
+ height,
+ width,
+ dtype,
+ device,
+ generator,
+ latents,
)
- if mask is not None:
- if return_image_latents:
- latents, noise, image_latents = latents
- else:
- latents, noise = latents
-
- # 5.1 Prepare mask latent variables
- if mask is not None:
- mask, masked_image_latents = self.prepare_mask_latents(
- mask=mask,
- masked_image=masked_image,
- batch_size=batch_size * num_images_per_prompt,
- height=height,
- width=width,
- dtype=prompt_embeds.dtype,
- device=device,
- generator=generator,
- do_classifier_free_guidance=self.do_classifier_free_guidance,
- )
-
- # Check that sizes of mask, masked image and latents match
- if num_channels_unet == 9:
- # default case for runwayml/stable-diffusion-inpainting
- num_channels_mask = mask.shape[1]
- num_channels_masked_image = masked_image_latents.shape[1]
- if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet:
- raise ValueError(
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
- " `pipeline.unet` or your `mask_image` or `image` input."
- )
- elif num_channels_unet != 4:
- raise ValueError(
- f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
- )
-
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
- # 6.1 Add image embeds for IP-Adapter
- added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else {}
-
- height, width = latents.shape[-2:]
- height = height * self.vae_scale_factor
- width = width * self.vae_scale_factor
-
- original_size = original_size or (height, width)
- target_size = target_size or (height, width)
-
- # 7. Prepare added time ids & embeddings
- add_text_embeds = pooled_prompt_embeds
- add_time_ids = self._get_add_time_ids(
- original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
- )
-
- if self.do_classifier_free_guidance:
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
- add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
- add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
-
- prompt_embeds = prompt_embeds.to(device)
- add_text_embeds = add_text_embeds.to(device)
- add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
-
- num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
-
- # 7.1 Apply denoising_end
- if (
- self.denoising_end is not None
- and self.denoising_start is not None
- and denoising_value_valid(self.denoising_end)
- and denoising_value_valid(self.denoising_start)
- and self.denoising_start >= self.denoising_end
- ):
- raise ValueError(
- f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: "
- + f" {self.denoising_end} when using type float."
- )
- elif self.denoising_end is not None and denoising_value_valid(self.denoising_end):
- discrete_timestep_cutoff = int(
- round(
- self.scheduler.config.num_train_timesteps
- - (self.denoising_end * self.scheduler.config.num_train_timesteps)
- )
- )
- num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
- timesteps = timesteps[:num_inference_steps]
-
- # 8. Optionally get Guidance Scale Embedding
- timestep_cond = None
- if self.unet.config.time_cond_proj_dim is not None:
- guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
- timestep_cond = self.get_guidance_scale_embedding(
- guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
- ).to(device=device, dtype=latents.dtype)
-
- self._num_timesteps = len(timesteps)
-
- # 9. Denoising loop
+ # 8. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
-
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
- if mask is not None and num_channels_unet == 9:
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
-
# predict the noise residual
- added_cond_kwargs.update({"text_embeds": add_text_embeds, "time_ids": add_time_ids})
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
- timestep_cond=timestep_cond,
- cross_attention_kwargs=self.cross_attention_kwargs,
- added_cond_kwargs=added_cond_kwargs,
- return_dict=False,
- )[0]
+ cross_attention_kwargs=cross_attention_kwargs,
+ ).sample
# perform guidance
- if self.do_classifier_free_guidance:
+ if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- if self.do_classifier_free_guidance and guidance_rescale > 0.0:
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
-
- if mask is not None and num_channels_unet == 4:
- init_latents_proper = image_latents
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
- if self.do_classifier_free_guidance:
- init_mask, _ = mask.chunk(2)
- else:
- init_mask = mask
-
- if i < len(timesteps) - 1:
- noise_timestep = timesteps[i + 1]
+ if mask is not None:
+ # masking
+ if add_predicted_noise:
init_latents_proper = self.scheduler.add_noise(
- init_latents_proper, noise, torch.tensor([noise_timestep])
+ init_latents_orig, noise_pred_uncond, torch.tensor([t])
)
-
- latents = (1 - init_mask) * init_latents_proper + init_mask * latents
-
- if callback_on_step_end is not None:
- callback_kwargs = {}
- for k in callback_on_step_end_tensor_inputs:
- callback_kwargs[k] = locals()[k]
- callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
-
- latents = callback_outputs.pop("latents", latents)
- prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
- negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
- add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
- negative_pooled_prompt_embeds = callback_outputs.pop(
- "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
- )
- add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
+ else:
+ init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
- if callback is not None and i % callback_steps == 0:
- step_idx = i // getattr(self.scheduler, "order", 1)
- callback(step_idx, t, latents)
-
- if not output_type == "latent":
- # make sure the VAE is in float32 mode, as it overflows in float16
- needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
-
- if needs_upcasting:
- self.upcast_vae()
- latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
+ if i % callback_steps == 0:
+ if callback is not None:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, latents)
+ if is_cancelled_callback is not None and is_cancelled_callback():
+ return None
+
+ if output_type == "latent":
+ image = latents
+ has_nsfw_concept = None
+ elif output_type == "pil":
+ # 9. Post-processing
+ image = self.decode_latents(latents)
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
+ # 10. Run safety checker
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
- # cast back to fp16 if needed
- if needs_upcasting:
- self.vae.to(dtype=torch.float16)
+ # 11. Convert to PIL
+ image = self.numpy_to_pil(image)
else:
- image = latents
- return StableDiffusionXLPipelineOutput(images=image)
-
- # apply watermark if available
- if self.watermark is not None:
- image = self.watermark.apply_watermark(image)
+ # 9. Post-processing
+ image = self.decode_latents(latents)
- image = self.image_processor.postprocess(image, output_type=output_type)
+ # 10. Run safety checker
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
- return (image,)
+ return image, has_nsfw_concept
- return StableDiffusionXLPipelineOutput(images=image)
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
def text2img(
self,
- prompt: str = None,
- prompt_2: Optional[str] = None,
- height: Optional[int] = None,
- width: Optional[int] = None,
+ prompt: Union[str, List[str]],
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ height: int = 512,
+ width: int = 512,
num_inference_steps: int = 50,
- timesteps: List[int] = None,
- denoising_start: Optional[float] = None,
- denoising_end: Optional[float] = None,
- guidance_scale: float = 5.0,
- negative_prompt: Optional[str] = None,
- negative_prompt_2: Optional[str] = None,
+ guidance_scale: float = 7.5,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
- ip_adapter_image: Optional[PipelineImageInput] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
+ max_embeddings_multiples: Optional[int] = 3,
output_type: Optional[str] = "pil",
return_dict: bool = True,
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
+ callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- guidance_rescale: float = 0.0,
- original_size: Optional[Tuple[int, int]] = None,
- crops_coords_top_left: Tuple[int, int] = (0, 0),
- target_size: Optional[Tuple[int, int]] = None,
- clip_skip: Optional[int] = None,
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
- **kwargs,
):
r"""
- Function invoked when calling pipeline for text-to-image.
+ Function for text-to-image generation.
+ Args:
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ height (`int`, *optional*, defaults to 512):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to 512):
+ The width in pixels of the generated image.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ guidance_scale (`float`, *optional*, defaults to 7.5):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ eta (`float`, *optional*, defaults to 0.0):
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
+ [`schedulers.DDIMScheduler`], will be ignored for others.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
+ plain tuple.
+ callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. The function will be
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ is_cancelled_callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. If the function returns
+ `True`, the inference will be cancelled.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
+ called at every step.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
- Refer to the documentation of the `__call__` method for parameter descriptions.
+ Returns:
+ `None` if cancelled by `is_cancelled_callback`,
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
+ (nsfw) content, according to the `safety_checker`.
"""
return self.__call__(
prompt=prompt,
- prompt_2=prompt_2,
+ negative_prompt=negative_prompt,
height=height,
width=width,
num_inference_steps=num_inference_steps,
- timesteps=timesteps,
- denoising_start=denoising_start,
- denoising_end=denoising_end,
guidance_scale=guidance_scale,
- negative_prompt=negative_prompt,
- negative_prompt_2=negative_prompt_2,
num_images_per_prompt=num_images_per_prompt,
eta=eta,
generator=generator,
latents=latents,
- ip_adapter_image=ip_adapter_image,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
- pooled_prompt_embeds=pooled_prompt_embeds,
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
+ max_embeddings_multiples=max_embeddings_multiples,
output_type=output_type,
return_dict=return_dict,
+ callback=callback,
+ is_cancelled_callback=is_cancelled_callback,
+ callback_steps=callback_steps,
cross_attention_kwargs=cross_attention_kwargs,
- guidance_rescale=guidance_rescale,
- original_size=original_size,
- crops_coords_top_left=crops_coords_top_left,
- target_size=target_size,
- clip_skip=clip_skip,
- callback_on_step_end=callback_on_step_end,
- callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
- **kwargs,
)
def img2img(
self,
- prompt: str = None,
- prompt_2: Optional[str] = None,
- image: Optional[PipelineImageInput] = None,
- height: Optional[int] = None,
- width: Optional[int] = None,
+ image: Union[torch.FloatTensor, PIL.Image.Image],
+ prompt: Union[str, List[str]],
+ negative_prompt: Optional[Union[str, List[str]]] = None,
strength: float = 0.8,
- num_inference_steps: int = 50,
- timesteps: List[int] = None,
- denoising_start: Optional[float] = None,
- denoising_end: Optional[float] = None,
- guidance_scale: float = 5.0,
- negative_prompt: Optional[str] = None,
- negative_prompt_2: Optional[str] = None,
+ num_inference_steps: Optional[int] = 50,
+ guidance_scale: Optional[float] = 7.5,
num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
+ eta: Optional[float] = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- ip_adapter_image: Optional[PipelineImageInput] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
+ max_embeddings_multiples: Optional[int] = 3,
output_type: Optional[str] = "pil",
return_dict: bool = True,
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
+ callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- guidance_rescale: float = 0.0,
- original_size: Optional[Tuple[int, int]] = None,
- crops_coords_top_left: Tuple[int, int] = (0, 0),
- target_size: Optional[Tuple[int, int]] = None,
- clip_skip: Optional[int] = None,
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
- **kwargs,
):
r"""
- Function invoked when calling pipeline for image-to-image.
+ Function for image-to-image generation.
+ Args:
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
+ process.
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ strength (`float`, *optional*, defaults to 0.8):
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference. This parameter will be modulated by `strength`.
+ guidance_scale (`float`, *optional*, defaults to 7.5):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ eta (`float`, *optional*, defaults to 0.0):
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
+ [`schedulers.DDIMScheduler`], will be ignored for others.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
+ plain tuple.
+ callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. The function will be
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ is_cancelled_callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. If the function returns
+ `True`, the inference will be cancelled.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
+ called at every step.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
- Refer to the documentation of the `__call__` method for parameter descriptions.
+ Returns:
+ `None` if cancelled by `is_cancelled_callback`,
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
+ (nsfw) content, according to the `safety_checker`.
"""
return self.__call__(
prompt=prompt,
- prompt_2=prompt_2,
+ negative_prompt=negative_prompt,
image=image,
- height=height,
- width=width,
- strength=strength,
num_inference_steps=num_inference_steps,
- timesteps=timesteps,
- denoising_start=denoising_start,
- denoising_end=denoising_end,
guidance_scale=guidance_scale,
- negative_prompt=negative_prompt,
- negative_prompt_2=negative_prompt_2,
+ strength=strength,
num_images_per_prompt=num_images_per_prompt,
eta=eta,
generator=generator,
- latents=latents,
- ip_adapter_image=ip_adapter_image,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
- pooled_prompt_embeds=pooled_prompt_embeds,
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
+ max_embeddings_multiples=max_embeddings_multiples,
output_type=output_type,
return_dict=return_dict,
+ callback=callback,
+ is_cancelled_callback=is_cancelled_callback,
+ callback_steps=callback_steps,
cross_attention_kwargs=cross_attention_kwargs,
- guidance_rescale=guidance_rescale,
- original_size=original_size,
- crops_coords_top_left=crops_coords_top_left,
- target_size=target_size,
- clip_skip=clip_skip,
- callback_on_step_end=callback_on_step_end,
- callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
- **kwargs,
)
def inpaint(
self,
- prompt: str = None,
- prompt_2: Optional[str] = None,
- image: Optional[PipelineImageInput] = None,
- mask_image: Optional[PipelineImageInput] = None,
- masked_image_latents: Optional[torch.FloatTensor] = None,
- height: Optional[int] = None,
- width: Optional[int] = None,
+ image: Union[torch.FloatTensor, PIL.Image.Image],
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
+ prompt: Union[str, List[str]],
+ negative_prompt: Optional[Union[str, List[str]]] = None,
strength: float = 0.8,
- num_inference_steps: int = 50,
- timesteps: List[int] = None,
- denoising_start: Optional[float] = None,
- denoising_end: Optional[float] = None,
- guidance_scale: float = 5.0,
- negative_prompt: Optional[str] = None,
- negative_prompt_2: Optional[str] = None,
+ num_inference_steps: Optional[int] = 50,
+ guidance_scale: Optional[float] = 7.5,
num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
+ add_predicted_noise: Optional[bool] = False,
+ eta: Optional[float] = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- ip_adapter_image: Optional[PipelineImageInput] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
+ max_embeddings_multiples: Optional[int] = 3,
output_type: Optional[str] = "pil",
return_dict: bool = True,
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
+ callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- guidance_rescale: float = 0.0,
- original_size: Optional[Tuple[int, int]] = None,
- crops_coords_top_left: Tuple[int, int] = (0, 0),
- target_size: Optional[Tuple[int, int]] = None,
- clip_skip: Optional[int] = None,
- callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
- callback_on_step_end_tensor_inputs: List[str] = ["latents"],
- **kwargs,
):
r"""
- Function invoked when calling pipeline for inpainting.
+ Function for inpaint.
+ Args:
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
+ process. This is the image whose masked region will be inpainted.
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
+ prompt (`str` or `List[str]`):
+ The prompt or prompts to guide the image generation.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
+ if `guidance_scale` is less than `1`).
+ strength (`float`, *optional*, defaults to 0.8):
+ Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
+ is 1, the denoising process will be run on the masked area for the full number of iterations specified
+ in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
+ noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
+ the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
+ guidance_scale (`float`, *optional*, defaults to 7.5):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ add_predicted_noise (`bool`, *optional*, defaults to True):
+ Use predicted noise instead of random noise when constructing noisy versions of the original image in
+ the reverse diffusion process
+ eta (`float`, *optional*, defaults to 0.0):
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
+ [`schedulers.DDIMScheduler`], will be ignored for others.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
+ plain tuple.
+ callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. The function will be
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
+ is_cancelled_callback (`Callable`, *optional*):
+ A function that will be called every `callback_steps` steps during inference. If the function returns
+ `True`, the inference will be cancelled.
+ callback_steps (`int`, *optional*, defaults to 1):
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
+ called at every step.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
- Refer to the documentation of the `__call__` method for parameter descriptions.
+ Returns:
+ `None` if cancelled by `is_cancelled_callback`,
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
+ (nsfw) content, according to the `safety_checker`.
"""
return self.__call__(
prompt=prompt,
- prompt_2=prompt_2,
+ negative_prompt=negative_prompt,
image=image,
mask_image=mask_image,
- masked_image_latents=masked_image_latents,
- height=height,
- width=width,
- strength=strength,
num_inference_steps=num_inference_steps,
- timesteps=timesteps,
- denoising_start=denoising_start,
- denoising_end=denoising_end,
guidance_scale=guidance_scale,
- negative_prompt=negative_prompt,
- negative_prompt_2=negative_prompt_2,
+ strength=strength,
num_images_per_prompt=num_images_per_prompt,
+ add_predicted_noise=add_predicted_noise,
eta=eta,
generator=generator,
- latents=latents,
- ip_adapter_image=ip_adapter_image,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
- pooled_prompt_embeds=pooled_prompt_embeds,
- negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
+ max_embeddings_multiples=max_embeddings_multiples,
output_type=output_type,
return_dict=return_dict,
+ callback=callback,
+ is_cancelled_callback=is_cancelled_callback,
+ callback_steps=callback_steps,
cross_attention_kwargs=cross_attention_kwargs,
- guidance_rescale=guidance_rescale,
- original_size=original_size,
- crops_coords_top_left=crops_coords_top_left,
- target_size=target_size,
- clip_skip=clip_skip,
- callback_on_step_end=callback_on_step_end,
- callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
- **kwargs,
- )
-
- # Overrride to properly handle the loading and unloading of the additional text encoder.
- def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
- # We could have accessed the unet config from `lora_state_dict()` too. We pass
- # it here explicitly to be able to tell that it's coming from an SDXL
- # pipeline.
- state_dict, network_alphas = self.lora_state_dict(
- pretrained_model_name_or_path_or_dict,
- unet_config=self.unet.config,
- **kwargs,
- )
- self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
-
- text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
- if len(text_encoder_state_dict) > 0:
- self.load_lora_into_text_encoder(
- text_encoder_state_dict,
- network_alphas=network_alphas,
- text_encoder=self.text_encoder,
- prefix="text_encoder",
- lora_scale=self.lora_scale,
- )
-
- text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
- if len(text_encoder_2_state_dict) > 0:
- self.load_lora_into_text_encoder(
- text_encoder_2_state_dict,
- network_alphas=network_alphas,
- text_encoder=self.text_encoder_2,
- prefix="text_encoder_2",
- lora_scale=self.lora_scale,
- )
-
- @classmethod
- def save_lora_weights(
- self,
- save_directory: Union[str, os.PathLike],
- unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
- text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
- text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
- is_main_process: bool = True,
- weight_name: str = None,
- save_function: Callable = None,
- safe_serialization: bool = False,
- ):
- state_dict = {}
-
- def pack_weights(layers, prefix):
- layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
- layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
- return layers_state_dict
-
- state_dict.update(pack_weights(unet_lora_layers, "unet"))
-
- if text_encoder_lora_layers and text_encoder_2_lora_layers:
- state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
- state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
-
- self.write_lora_layers(
- state_dict=state_dict,
- save_directory=save_directory,
- is_main_process=is_main_process,
- weight_name=weight_name,
- save_function=save_function,
- safe_serialization=safe_serialization,
- )
-
- def _remove_text_encoder_monkey_patch(self):
- self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
- self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
\ No newline at end of file
+ )
\ No newline at end of file