jzli commited on
Commit
f0b403d
·
verified ·
1 Parent(s): 2073afd

Upload pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +1713 -0
pipeline.py ADDED
@@ -0,0 +1,1713 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ from typing import Any, Callable, Dict, List, Optional, Union
4
+
5
+ import numpy as np
6
+ import PIL
7
+ import torch
8
+ from packaging import version
9
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
10
+ import random
11
+ import sys
12
+ from tqdm.auto import tqdm
13
+
14
+ from diffusers import DiffusionPipeline
15
+ from diffusers.configuration_utils import FrozenDict
16
+ from diffusers.image_processor import VaeImageProcessor
17
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
18
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
19
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
20
+ from diffusers.schedulers import KarrasDiffusionSchedulers
21
+ from diffusers.utils import (
22
+ PIL_INTERPOLATION,
23
+ deprecate,
24
+ is_accelerate_available,
25
+ is_accelerate_version,
26
+ logging,
27
+ )
28
+ from diffusers.utils.torch_utils import randn_tensor
29
+
30
+ # ------------------------------------------------------------------------------
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+ re_attention = re.compile(
35
+ r"""
36
+ \\\(|
37
+ \\\)|
38
+ \\\[|
39
+ \\]|
40
+ \\\\|
41
+ \\|
42
+ \(|
43
+ \[|
44
+ :([+-]?[.\d]+)\)|
45
+ \)|
46
+ ]|
47
+ [^\\()\[\]:]+|
48
+ :
49
+ """,
50
+ re.X,
51
+ )
52
+
53
+
54
+ def parse_prompt_attention(text):
55
+ """
56
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
57
+ Accepted tokens are:
58
+ (abc) - increases attention to abc by a multiplier of 1.1
59
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
60
+ [abc] - decreases attention to abc by a multiplier of 1.1
61
+ \( - literal character '('
62
+ \[ - literal character '['
63
+ \) - literal character ')'
64
+ \] - literal character ']'
65
+ \\ - literal character '\'
66
+ anything else - just text
67
+ >>> parse_prompt_attention('normal text')
68
+ [['normal text', 1.0]]
69
+ >>> parse_prompt_attention('an (important) word')
70
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
71
+ >>> parse_prompt_attention('(unbalanced')
72
+ [['unbalanced', 1.1]]
73
+ >>> parse_prompt_attention('\(literal\]')
74
+ [['(literal]', 1.0]]
75
+ >>> parse_prompt_attention('(unnecessary)(parens)')
76
+ [['unnecessaryparens', 1.1]]
77
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
78
+ [['a ', 1.0],
79
+ ['house', 1.5730000000000004],
80
+ [' ', 1.1],
81
+ ['on', 1.0],
82
+ [' a ', 1.1],
83
+ ['hill', 0.55],
84
+ [', sun, ', 1.1],
85
+ ['sky', 1.4641000000000006],
86
+ ['.', 1.1]]
87
+ """
88
+
89
+ res = []
90
+ round_brackets = []
91
+ square_brackets = []
92
+
93
+ round_bracket_multiplier = 1.1
94
+ square_bracket_multiplier = 1 / 1.1
95
+
96
+ def multiply_range(start_position, multiplier):
97
+ for p in range(start_position, len(res)):
98
+ res[p][1] *= multiplier
99
+
100
+ for m in re_attention.finditer(text):
101
+ text = m.group(0)
102
+ weight = m.group(1)
103
+
104
+ if text.startswith("\\"):
105
+ res.append([text[1:], 1.0])
106
+ elif text == "(":
107
+ round_brackets.append(len(res))
108
+ elif text == "[":
109
+ square_brackets.append(len(res))
110
+ elif weight is not None and len(round_brackets) > 0:
111
+ multiply_range(round_brackets.pop(), float(weight))
112
+ elif text == ")" and len(round_brackets) > 0:
113
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
114
+ elif text == "]" and len(square_brackets) > 0:
115
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
116
+ else:
117
+ res.append([text, 1.0])
118
+
119
+ for pos in round_brackets:
120
+ multiply_range(pos, round_bracket_multiplier)
121
+
122
+ for pos in square_brackets:
123
+ multiply_range(pos, square_bracket_multiplier)
124
+
125
+ if len(res) == 0:
126
+ res = [["", 1.0]]
127
+
128
+ # merge runs of identical weights
129
+ i = 0
130
+ while i + 1 < len(res):
131
+ if res[i][1] == res[i + 1][1]:
132
+ res[i][0] += res[i + 1][0]
133
+ res.pop(i + 1)
134
+ else:
135
+ i += 1
136
+
137
+ return res
138
+
139
+
140
+ def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
141
+ r"""
142
+ Tokenize a list of prompts and return its tokens with weights of each token.
143
+
144
+ No padding, starting or ending token is included.
145
+ """
146
+ tokens = []
147
+ weights = []
148
+ truncated = False
149
+ for text in prompt:
150
+ texts_and_weights = parse_prompt_attention(text)
151
+ text_token = []
152
+ text_weight = []
153
+ for word, weight in texts_and_weights:
154
+ # tokenize and discard the starting and the ending token
155
+ token = pipe.tokenizer(word).input_ids[1:-1]
156
+ text_token += token
157
+ # copy the weight by length of token
158
+ text_weight += [weight] * len(token)
159
+ # stop if the text is too long (longer than truncation limit)
160
+ if len(text_token) > max_length:
161
+ truncated = True
162
+ break
163
+ # truncate
164
+ if len(text_token) > max_length:
165
+ truncated = True
166
+ text_token = text_token[:max_length]
167
+ text_weight = text_weight[:max_length]
168
+ tokens.append(text_token)
169
+ weights.append(text_weight)
170
+ if truncated:
171
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
172
+ return tokens, weights
173
+
174
+
175
+ def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
176
+ r"""
177
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
178
+ """
179
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
180
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
181
+ for i in range(len(tokens)):
182
+ tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
183
+ if no_boseos_middle:
184
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
185
+ else:
186
+ w = []
187
+ if len(weights[i]) == 0:
188
+ w = [1.0] * weights_length
189
+ else:
190
+ for j in range(max_embeddings_multiples):
191
+ w.append(1.0) # weight for starting token in this chunk
192
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
193
+ w.append(1.0) # weight for ending token in this chunk
194
+ w += [1.0] * (weights_length - len(w))
195
+ weights[i] = w[:]
196
+
197
+ return tokens, weights
198
+
199
+
200
+ def get_unweighted_text_embeddings(
201
+ pipe: DiffusionPipeline,
202
+ text_input: torch.Tensor,
203
+ chunk_length: int,
204
+ no_boseos_middle: Optional[bool] = True,
205
+ ):
206
+ """
207
+ When the length of tokens is a multiple of the capacity of the text encoder,
208
+ it should be split into chunks and sent to the text encoder individually.
209
+ """
210
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
211
+ if max_embeddings_multiples > 1:
212
+ text_embeddings = []
213
+ for i in range(max_embeddings_multiples):
214
+ # extract the i-th chunk
215
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
216
+
217
+ # cover the head and the tail by the starting and the ending tokens
218
+ text_input_chunk[:, 0] = text_input[0, 0]
219
+ text_input_chunk[:, -1] = text_input[0, -1]
220
+ text_embedding = pipe.text_encoder(text_input_chunk)[0]
221
+
222
+ if no_boseos_middle:
223
+ if i == 0:
224
+ # discard the ending token
225
+ text_embedding = text_embedding[:, :-1]
226
+ elif i == max_embeddings_multiples - 1:
227
+ # discard the starting token
228
+ text_embedding = text_embedding[:, 1:]
229
+ else:
230
+ # discard both starting and ending tokens
231
+ text_embedding = text_embedding[:, 1:-1]
232
+
233
+ text_embeddings.append(text_embedding)
234
+ text_embeddings = torch.concat(text_embeddings, axis=1)
235
+ else:
236
+ text_embeddings = pipe.text_encoder(text_input)[0]
237
+ return text_embeddings
238
+
239
+
240
+ def get_weighted_text_embeddings(
241
+ pipe: DiffusionPipeline,
242
+ prompt: Union[str, List[str]],
243
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
244
+ max_embeddings_multiples: Optional[int] = 3,
245
+ no_boseos_middle: Optional[bool] = False,
246
+ skip_parsing: Optional[bool] = False,
247
+ skip_weighting: Optional[bool] = False,
248
+ ):
249
+ r"""
250
+ Prompts can be assigned with local weights using brackets. For example,
251
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
252
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
253
+
254
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
255
+
256
+ Args:
257
+ pipe (`DiffusionPipeline`):
258
+ Pipe to provide access to the tokenizer and the text encoder.
259
+ prompt (`str` or `List[str]`):
260
+ The prompt or prompts to guide the image generation.
261
+ uncond_prompt (`str` or `List[str]`):
262
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
263
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
264
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
265
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
266
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
267
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
268
+ ending token in each of the chunk in the middle.
269
+ skip_parsing (`bool`, *optional*, defaults to `False`):
270
+ Skip the parsing of brackets.
271
+ skip_weighting (`bool`, *optional*, defaults to `False`):
272
+ Skip the weighting. When the parsing is skipped, it is forced True.
273
+ """
274
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
275
+ if isinstance(prompt, str):
276
+ prompt = [prompt]
277
+
278
+ if not skip_parsing:
279
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
280
+ if uncond_prompt is not None:
281
+ if isinstance(uncond_prompt, str):
282
+ uncond_prompt = [uncond_prompt]
283
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
284
+ else:
285
+ prompt_tokens = [
286
+ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
287
+ ]
288
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
289
+ if uncond_prompt is not None:
290
+ if isinstance(uncond_prompt, str):
291
+ uncond_prompt = [uncond_prompt]
292
+ uncond_tokens = [
293
+ token[1:-1]
294
+ for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
295
+ ]
296
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
297
+
298
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
299
+ max_length = max([len(token) for token in prompt_tokens])
300
+ if uncond_prompt is not None:
301
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
302
+
303
+ max_embeddings_multiples = min(
304
+ max_embeddings_multiples,
305
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
306
+ )
307
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
308
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
309
+
310
+ # pad the length of tokens and weights
311
+ bos = pipe.tokenizer.bos_token_id
312
+ eos = pipe.tokenizer.eos_token_id
313
+ pad = getattr(pipe.tokenizer, "pad_token_id", eos)
314
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
315
+ prompt_tokens,
316
+ prompt_weights,
317
+ max_length,
318
+ bos,
319
+ eos,
320
+ pad,
321
+ no_boseos_middle=no_boseos_middle,
322
+ chunk_length=pipe.tokenizer.model_max_length,
323
+ )
324
+ prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
325
+ if uncond_prompt is not None:
326
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
327
+ uncond_tokens,
328
+ uncond_weights,
329
+ max_length,
330
+ bos,
331
+ eos,
332
+ pad,
333
+ no_boseos_middle=no_boseos_middle,
334
+ chunk_length=pipe.tokenizer.model_max_length,
335
+ )
336
+ uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
337
+
338
+ # get the embeddings
339
+ text_embeddings = get_unweighted_text_embeddings(
340
+ pipe,
341
+ prompt_tokens,
342
+ pipe.tokenizer.model_max_length,
343
+ no_boseos_middle=no_boseos_middle,
344
+ )
345
+ prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
346
+ if uncond_prompt is not None:
347
+ uncond_embeddings = get_unweighted_text_embeddings(
348
+ pipe,
349
+ uncond_tokens,
350
+ pipe.tokenizer.model_max_length,
351
+ no_boseos_middle=no_boseos_middle,
352
+ )
353
+ uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
354
+
355
+ # assign weights to the prompts and normalize in the sense of mean
356
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
357
+ if (not skip_parsing) and (not skip_weighting):
358
+ previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
359
+ text_embeddings *= prompt_weights.unsqueeze(-1)
360
+ current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
361
+ text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
362
+ if uncond_prompt is not None:
363
+ previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
364
+ uncond_embeddings *= uncond_weights.unsqueeze(-1)
365
+ current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
366
+ uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
367
+
368
+ if uncond_prompt is not None:
369
+ return text_embeddings, uncond_embeddings
370
+ return text_embeddings, None
371
+
372
+
373
+ def preprocess_image(image, batch_size):
374
+ w, h = image.size
375
+ w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
376
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
377
+ image = np.array(image).astype(np.float32) / 255.0
378
+ image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
379
+ image = torch.from_numpy(image)
380
+ return 2.0 * image - 1.0
381
+
382
+
383
+ def preprocess_mask(mask, batch_size, scale_factor=8):
384
+ if not isinstance(mask, torch.FloatTensor):
385
+ mask = mask.convert("L")
386
+ w, h = mask.size
387
+ w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
388
+ mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
389
+ mask = np.array(mask).astype(np.float32) / 255.0
390
+ mask = np.tile(mask, (4, 1, 1))
391
+ mask = np.vstack([mask[None]] * batch_size)
392
+ mask = 1 - mask # repaint white, keep black
393
+ mask = torch.from_numpy(mask)
394
+ return mask
395
+
396
+ else:
397
+ valid_mask_channel_sizes = [1, 3]
398
+ # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W)
399
+ if mask.shape[3] in valid_mask_channel_sizes:
400
+ mask = mask.permute(0, 3, 1, 2)
401
+ elif mask.shape[1] not in valid_mask_channel_sizes:
402
+ raise ValueError(
403
+ f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension,"
404
+ f" but received mask of shape {tuple(mask.shape)}"
405
+ )
406
+ # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape
407
+ mask = mask.mean(dim=1, keepdim=True)
408
+ h, w = mask.shape[-2:]
409
+ h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8
410
+ mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor))
411
+ return mask
412
+
413
+
414
+ class StableDiffusionLongPromptWeightingPipeline(
415
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
416
+ ):
417
+ r"""
418
+ Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
419
+ weighting in prompt.
420
+
421
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
422
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
423
+
424
+ Args:
425
+ vae ([`AutoencoderKL`]):
426
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
427
+ text_encoder ([`CLIPTextModel`]):
428
+ Frozen text-encoder. Stable Diffusion uses the text portion of
429
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
430
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
431
+ tokenizer (`CLIPTokenizer`):
432
+ Tokenizer of class
433
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
434
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
435
+ scheduler ([`SchedulerMixin`]):
436
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
437
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
438
+ safety_checker ([`StableDiffusionSafetyChecker`]):
439
+ Classification module that estimates whether generated images could be considered offensive or harmful.
440
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
441
+ feature_extractor ([`CLIPImageProcessor`]):
442
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
443
+ """
444
+
445
+ _optional_components = ["safety_checker", "feature_extractor"]
446
+
447
+ def __init__(
448
+ self,
449
+ vae: AutoencoderKL,
450
+ text_encoder: CLIPTextModel,
451
+ tokenizer: CLIPTokenizer,
452
+ unet: UNet2DConditionModel,
453
+ scheduler: KarrasDiffusionSchedulers,
454
+ safety_checker: StableDiffusionSafetyChecker,
455
+ feature_extractor: CLIPImageProcessor,
456
+ requires_safety_checker: bool = True,
457
+ ):
458
+ super().__init__()
459
+
460
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
461
+ deprecation_message = (
462
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
463
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
464
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
465
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
466
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
467
+ " file"
468
+ )
469
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
470
+ new_config = dict(scheduler.config)
471
+ new_config["steps_offset"] = 1
472
+ scheduler._internal_dict = FrozenDict(new_config)
473
+
474
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
475
+ deprecation_message = (
476
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
477
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
478
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
479
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
480
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
481
+ )
482
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
483
+ new_config = dict(scheduler.config)
484
+ new_config["clip_sample"] = False
485
+ scheduler._internal_dict = FrozenDict(new_config)
486
+
487
+ if safety_checker is None and requires_safety_checker:
488
+ logger.warning(
489
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
490
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
491
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
492
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
493
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
494
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
495
+ )
496
+
497
+ if safety_checker is not None and feature_extractor is None:
498
+ raise ValueError(
499
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
500
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
501
+ )
502
+
503
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
504
+ version.parse(unet.config._diffusers_version).base_version
505
+ ) < version.parse("0.9.0.dev0")
506
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
507
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
508
+ deprecation_message = (
509
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
510
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
511
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
512
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
513
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
514
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
515
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
516
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
517
+ " the `unet/config.json` file"
518
+ )
519
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
520
+ new_config = dict(unet.config)
521
+ new_config["sample_size"] = 64
522
+ unet._internal_dict = FrozenDict(new_config)
523
+ self.register_modules(
524
+ vae=vae,
525
+ text_encoder=text_encoder,
526
+ tokenizer=tokenizer,
527
+ unet=unet,
528
+ scheduler=scheduler,
529
+ safety_checker=safety_checker,
530
+ feature_extractor=feature_extractor,
531
+ )
532
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
533
+
534
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
535
+ self.register_to_config(
536
+ requires_safety_checker=requires_safety_checker,
537
+ )
538
+
539
+ def enable_vae_slicing(self):
540
+ r"""
541
+ Enable sliced VAE decoding.
542
+
543
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
544
+ steps. This is useful to save some memory and allow larger batch sizes.
545
+ """
546
+ self.vae.enable_slicing()
547
+
548
+ def disable_vae_slicing(self):
549
+ r"""
550
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
551
+ computing decoding in one step.
552
+ """
553
+ self.vae.disable_slicing()
554
+
555
+ def enable_vae_tiling(self):
556
+ r"""
557
+ Enable tiled VAE decoding.
558
+
559
+ When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
560
+ several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
561
+ """
562
+ self.vae.enable_tiling()
563
+
564
+ def disable_vae_tiling(self):
565
+ r"""
566
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
567
+ computing decoding in one step.
568
+ """
569
+ self.vae.disable_tiling()
570
+
571
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
572
+ def enable_sequential_cpu_offload(self, gpu_id=0):
573
+ r"""
574
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
575
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
576
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
577
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
578
+ `enable_model_cpu_offload`, but performance is lower.
579
+ """
580
+ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
581
+ from accelerate import cpu_offload
582
+ else:
583
+ raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
584
+
585
+ device = torch.device(f"cuda:{gpu_id}")
586
+
587
+ if self.device.type != "cpu":
588
+ self.to("cpu", silence_dtype_warnings=True)
589
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
590
+
591
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
592
+ cpu_offload(cpu_offloaded_model, device)
593
+
594
+ if self.safety_checker is not None:
595
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
596
+
597
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
598
+ def enable_model_cpu_offload(self, gpu_id=0):
599
+ r"""
600
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
601
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
602
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
603
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
604
+ """
605
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
606
+ from accelerate import cpu_offload_with_hook
607
+ else:
608
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
609
+
610
+ device = torch.device(f"cuda:{gpu_id}")
611
+
612
+ if self.device.type != "cpu":
613
+ self.to("cpu", silence_dtype_warnings=True)
614
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
615
+
616
+ hook = None
617
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
618
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
619
+
620
+ if self.safety_checker is not None:
621
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
622
+
623
+ # We'll offload the last model manually.
624
+ self.final_offload_hook = hook
625
+
626
+ @property
627
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
628
+ def _execution_device(self):
629
+ r"""
630
+ Returns the device on which the pipeline's models will be executed. After calling
631
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
632
+ hooks.
633
+ """
634
+ if not hasattr(self.unet, "_hf_hook"):
635
+ return self.device
636
+ for module in self.unet.modules():
637
+ if (
638
+ hasattr(module, "_hf_hook")
639
+ and hasattr(module._hf_hook, "execution_device")
640
+ and module._hf_hook.execution_device is not None
641
+ ):
642
+ return torch.device(module._hf_hook.execution_device)
643
+ return self.device
644
+
645
+ def _encode_prompt(
646
+ self,
647
+ prompt,
648
+ device,
649
+ num_images_per_prompt,
650
+ do_classifier_free_guidance,
651
+ negative_prompt=None,
652
+ max_embeddings_multiples=3,
653
+ prompt_embeds: Optional[torch.FloatTensor] = None,
654
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
655
+ ):
656
+ r"""
657
+ Encodes the prompt into text encoder hidden states.
658
+
659
+ Args:
660
+ prompt (`str` or `list(int)`):
661
+ prompt to be encoded
662
+ device: (`torch.device`):
663
+ torch device
664
+ num_images_per_prompt (`int`):
665
+ number of images that should be generated per prompt
666
+ do_classifier_free_guidance (`bool`):
667
+ whether to use classifier free guidance or not
668
+ negative_prompt (`str` or `List[str]`):
669
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
670
+ if `guidance_scale` is less than `1`).
671
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
672
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
673
+ """
674
+ if prompt is not None and isinstance(prompt, str):
675
+ batch_size = 1
676
+ elif prompt is not None and isinstance(prompt, list):
677
+ batch_size = len(prompt)
678
+ else:
679
+ batch_size = prompt_embeds.shape[0]
680
+
681
+ if negative_prompt_embeds is None:
682
+ if negative_prompt is None:
683
+ negative_prompt = [""] * batch_size
684
+ elif isinstance(negative_prompt, str):
685
+ negative_prompt = [negative_prompt] * batch_size
686
+ if batch_size != len(negative_prompt):
687
+ raise ValueError(
688
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
689
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
690
+ " the batch size of `prompt`."
691
+ )
692
+ if prompt_embeds is None or negative_prompt_embeds is None:
693
+ if isinstance(self, TextualInversionLoaderMixin):
694
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
695
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
696
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer)
697
+
698
+ prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings(
699
+ pipe=self,
700
+ prompt=prompt,
701
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
702
+ max_embeddings_multiples=max_embeddings_multiples,
703
+ )
704
+ if prompt_embeds is None:
705
+ prompt_embeds = prompt_embeds1
706
+ if negative_prompt_embeds is None:
707
+ negative_prompt_embeds = negative_prompt_embeds1
708
+
709
+ bs_embed, seq_len, _ = prompt_embeds.shape
710
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
711
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
712
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
713
+
714
+ if do_classifier_free_guidance:
715
+ bs_embed, seq_len, _ = negative_prompt_embeds.shape
716
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
717
+ negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
718
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
719
+
720
+ return prompt_embeds
721
+
722
+ def check_inputs(
723
+ self,
724
+ prompt,
725
+ height,
726
+ width,
727
+ strength,
728
+ callback_steps,
729
+ negative_prompt=None,
730
+ prompt_embeds=None,
731
+ negative_prompt_embeds=None,
732
+ ):
733
+ if height % 8 != 0 or width % 8 != 0:
734
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
735
+
736
+ if strength < 0 or strength > 1:
737
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
738
+
739
+ if (callback_steps is None) or (
740
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
741
+ ):
742
+ raise ValueError(
743
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
744
+ f" {type(callback_steps)}."
745
+ )
746
+
747
+ if prompt is not None and prompt_embeds is not None:
748
+ raise ValueError(
749
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
750
+ " only forward one of the two."
751
+ )
752
+ elif prompt is None and prompt_embeds is None:
753
+ raise ValueError(
754
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
755
+ )
756
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
757
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
758
+
759
+ if negative_prompt is not None and negative_prompt_embeds is not None:
760
+ raise ValueError(
761
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
762
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
763
+ )
764
+
765
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
766
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
767
+ raise ValueError(
768
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
769
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
770
+ f" {negative_prompt_embeds.shape}."
771
+ )
772
+
773
+ def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
774
+ if is_text2img:
775
+ return self.scheduler.timesteps.to(device), num_inference_steps
776
+ else:
777
+ # get the original timestep using init_timestep
778
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
779
+
780
+ t_start = max(num_inference_steps - init_timestep, 0)
781
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
782
+
783
+ return timesteps, num_inference_steps - t_start
784
+
785
+ def run_safety_checker(self, image, device, dtype):
786
+ if self.safety_checker is not None:
787
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
788
+ image, has_nsfw_concept = self.safety_checker(
789
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
790
+ )
791
+ else:
792
+ has_nsfw_concept = None
793
+ return image, has_nsfw_concept
794
+
795
+ def decode_latents(self, latents):
796
+ latents = 1 / self.vae.config.scaling_factor * latents
797
+ image = self.vae.decode(latents, return_dict=False)[0] #).sample
798
+ image = (image / 2 + 0.5).clamp(0, 1)
799
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
800
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
801
+ return image
802
+
803
+ def prepare_extra_step_kwargs(self, generator, eta):
804
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
805
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
806
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
807
+ # and should be between [0, 1]
808
+
809
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
810
+ extra_step_kwargs = {}
811
+ if accepts_eta:
812
+ extra_step_kwargs["eta"] = eta
813
+
814
+ # check if the scheduler accepts generator
815
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
816
+ if accepts_generator:
817
+ extra_step_kwargs["generator"] = generator
818
+ return extra_step_kwargs
819
+
820
+ def prepare_latents(
821
+ self,
822
+ image,
823
+ timestep,
824
+ num_images_per_prompt,
825
+ batch_size,
826
+ num_channels_latents,
827
+ height,
828
+ width,
829
+ dtype,
830
+ device,
831
+ generator,
832
+ latents=None,
833
+ ):
834
+ if image is None:
835
+ batch_size = batch_size * num_images_per_prompt
836
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
837
+ if isinstance(generator, list) and len(generator) != batch_size:
838
+ raise ValueError(
839
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
840
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
841
+ )
842
+
843
+ if latents is None:
844
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
845
+ else:
846
+ latents = latents.to(device)
847
+
848
+ # scale the initial noise by the standard deviation required by the scheduler
849
+ latents = latents * self.scheduler.init_noise_sigma
850
+ return latents, None, None
851
+ else:
852
+ image = image.to(device=self.device, dtype=dtype)
853
+ init_latent_dist = self.vae.encode(image).latent_dist
854
+ init_latents = init_latent_dist.sample(generator=generator)
855
+ init_latents = self.vae.config.scaling_factor * init_latents
856
+
857
+ # Expand init_latents for batch_size and num_images_per_prompt
858
+ init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
859
+ init_latents_orig = init_latents
860
+
861
+ # add noise to latents using the timesteps
862
+ noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype)
863
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
864
+ latents = init_latents
865
+ return latents, init_latents_orig, noise
866
+
867
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
868
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
869
+
870
+ The suffixes after the scaling factors represent the stages where they are being applied.
871
+
872
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
873
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
874
+
875
+ Args:
876
+ s1 (`float`):
877
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
878
+ mitigate "oversmoothing effect" in the enhanced denoising process.
879
+ s2 (`float`):
880
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
881
+ mitigate "oversmoothing effect" in the enhanced denoising process.
882
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
883
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
884
+ """
885
+ if not hasattr(self, "unet"):
886
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
887
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
888
+
889
+ def disable_freeu(self):
890
+ """Disables the FreeU mechanism if enabled."""
891
+ self.unet.disable_freeu()
892
+
893
+ @torch.no_grad()
894
+ def __call__(
895
+ self,
896
+ prompt: Union[str, List[str]],
897
+ negative_prompt: Optional[Union[str, List[str]]] = None,
898
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
899
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
900
+ height: int = 512,
901
+ width: int = 512,
902
+ num_inference_steps: int = 50,
903
+ guidance_scale: float = 7.5,
904
+ strength: float = 0.8,
905
+ num_images_per_prompt: Optional[int] = 1,
906
+ add_predicted_noise: Optional[bool] = False,
907
+ eta: float = 0.0,
908
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
909
+ latents: Optional[torch.FloatTensor] = None,
910
+ prompt_embeds: Optional[torch.FloatTensor] = None,
911
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
912
+ max_embeddings_multiples: Optional[int] = 3,
913
+ output_type: Optional[str] = "pil",
914
+ return_dict: bool = True,
915
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
916
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
917
+ callback_steps: int = 1,
918
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
919
+ ):
920
+ r"""
921
+ Function invoked when calling the pipeline for generation.
922
+
923
+ Args:
924
+ prompt (`str` or `List[str]`):
925
+ The prompt or prompts to guide the image generation.
926
+ negative_prompt (`str` or `List[str]`, *optional*):
927
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
928
+ if `guidance_scale` is less than `1`).
929
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
930
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
931
+ process.
932
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
933
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
934
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
935
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
936
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
937
+ height (`int`, *optional*, defaults to 512):
938
+ The height in pixels of the generated image.
939
+ width (`int`, *optional*, defaults to 512):
940
+ The width in pixels of the generated image.
941
+ num_inference_steps (`int`, *optional*, defaults to 50):
942
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
943
+ expense of slower inference.
944
+ guidance_scale (`float`, *optional*, defaults to 7.5):
945
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
946
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
947
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
948
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
949
+ usually at the expense of lower image quality.
950
+ strength (`float`, *optional*, defaults to 0.8):
951
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
952
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
953
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
954
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
955
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
956
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
957
+ The number of images to generate per prompt.
958
+ add_predicted_noise (`bool`, *optional*, defaults to True):
959
+ Use predicted noise instead of random noise when constructing noisy versions of the original image in
960
+ the reverse diffusion process
961
+ eta (`float`, *optional*, defaults to 0.0):
962
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
963
+ [`schedulers.DDIMScheduler`], will be ignored for others.
964
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
965
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
966
+ to make generation deterministic.
967
+ latents (`torch.FloatTensor`, *optional*):
968
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
969
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
970
+ tensor will ge generated by sampling using the supplied random `generator`.
971
+ prompt_embeds (`torch.FloatTensor`, *optional*):
972
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
973
+ provided, text embeddings will be generated from `prompt` input argument.
974
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
975
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
976
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
977
+ argument.
978
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
979
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
980
+ output_type (`str`, *optional*, defaults to `"pil"`):
981
+ The output format of the generate image. Choose between
982
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
983
+ return_dict (`bool`, *optional*, defaults to `True`):
984
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
985
+ plain tuple.
986
+ callback (`Callable`, *optional*):
987
+ A function that will be called every `callback_steps` steps during inference. The function will be
988
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
989
+ is_cancelled_callback (`Callable`, *optional*):
990
+ A function that will be called every `callback_steps` steps during inference. If the function returns
991
+ `True`, the inference will be cancelled.
992
+ callback_steps (`int`, *optional*, defaults to 1):
993
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
994
+ called at every step.
995
+ cross_attention_kwargs (`dict`, *optional*):
996
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
997
+ `self.processor` in
998
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
999
+
1000
+ Returns:
1001
+ `None` if cancelled by `is_cancelled_callback`,
1002
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1003
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1004
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1005
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1006
+ (nsfw) content, according to the `safety_checker`.
1007
+ """
1008
+ # 0. Default height and width to unet
1009
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
1010
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
1011
+
1012
+ # 1. Check inputs. Raise error if not correct
1013
+ self.check_inputs(
1014
+ prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
1015
+ )
1016
+
1017
+ # 2. Define call parameters
1018
+ if prompt is not None and isinstance(prompt, str):
1019
+ batch_size = 1
1020
+ elif prompt is not None and isinstance(prompt, list):
1021
+ batch_size = len(prompt)
1022
+ else:
1023
+ batch_size = prompt_embeds.shape[0]
1024
+
1025
+ device = self._execution_device
1026
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1027
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1028
+ # corresponds to doing no classifier free guidance.
1029
+ do_classifier_free_guidance = guidance_scale > 1.0
1030
+
1031
+ # 3. Encode input prompt
1032
+ prompt_embeds = self._encode_prompt(
1033
+ prompt,
1034
+ device,
1035
+ num_images_per_prompt,
1036
+ do_classifier_free_guidance,
1037
+ negative_prompt,
1038
+ max_embeddings_multiples,
1039
+ prompt_embeds=prompt_embeds,
1040
+ negative_prompt_embeds=negative_prompt_embeds,
1041
+ )
1042
+ dtype = prompt_embeds.dtype
1043
+
1044
+ # 4. Preprocess image and mask
1045
+ if isinstance(image, PIL.Image.Image):
1046
+ image = preprocess_image(image, batch_size)
1047
+ if image is not None:
1048
+ image = image.to(device=self.device, dtype=dtype)
1049
+ if isinstance(mask_image, PIL.Image.Image):
1050
+ mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor)
1051
+ if mask_image is not None:
1052
+ mask = mask_image.to(device=self.device, dtype=dtype)
1053
+ mask = torch.cat([mask] * num_images_per_prompt)
1054
+ else:
1055
+ mask = None
1056
+
1057
+ # 5. set timesteps
1058
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1059
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
1060
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1061
+
1062
+ # 6. Prepare latent variables
1063
+ latents, init_latents_orig, noise = self.prepare_latents(
1064
+ image,
1065
+ latent_timestep,
1066
+ num_images_per_prompt,
1067
+ batch_size,
1068
+ self.unet.config.in_channels,
1069
+ height,
1070
+ width,
1071
+ dtype,
1072
+ device,
1073
+ generator,
1074
+ latents,
1075
+ )
1076
+
1077
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1078
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1079
+
1080
+ # 8. Denoising loop
1081
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1082
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1083
+ for i, t in enumerate(timesteps):
1084
+ # expand the latents if we are doing classifier free guidance
1085
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1086
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1087
+
1088
+ # predict the noise residual
1089
+ noise_pred = self.unet(
1090
+ latent_model_input,
1091
+ t,
1092
+ encoder_hidden_states=prompt_embeds,
1093
+ cross_attention_kwargs=cross_attention_kwargs,
1094
+ return_dict=False,
1095
+ )[0]
1096
+ #).sample
1097
+
1098
+ # perform guidance
1099
+ if do_classifier_free_guidance:
1100
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1101
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1102
+
1103
+ # compute the previous noisy sample x_t -> x_t-1
1104
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] #).prev_sample
1105
+
1106
+ if mask is not None:
1107
+ # masking
1108
+ if add_predicted_noise:
1109
+ init_latents_proper = self.scheduler.add_noise(
1110
+ init_latents_orig, noise_pred_uncond, torch.tensor([t])
1111
+ )
1112
+ else:
1113
+ init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
1114
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
1115
+
1116
+ # call the callback, if provided
1117
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1118
+ progress_bar.update()
1119
+ if i % callback_steps == 0:
1120
+ if callback is not None:
1121
+ callback(i, t, latents)
1122
+ if is_cancelled_callback is not None and is_cancelled_callback():
1123
+ return None
1124
+
1125
+ if output_type == "latent":
1126
+ image = latents
1127
+ has_nsfw_concept = None
1128
+ elif output_type == "pil":
1129
+ # 9. Post-processing
1130
+ image = self.decode_latents(latents)
1131
+
1132
+ # 10. Run safety checker
1133
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1134
+
1135
+ # 11. Convert to PIL
1136
+ image = self.numpy_to_pil(image)
1137
+ else:
1138
+ # 9. Post-processing
1139
+ image = self.decode_latents(latents)
1140
+
1141
+ # 10. Run safety checker
1142
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1143
+
1144
+ # Offload last model to CPU
1145
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1146
+ self.final_offload_hook.offload()
1147
+
1148
+ if not return_dict:
1149
+ return image, has_nsfw_concept
1150
+
1151
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1152
+
1153
+ def text2img(
1154
+ self,
1155
+ prompt: Union[str, List[str]],
1156
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1157
+ height: int = 512,
1158
+ width: int = 512,
1159
+ num_inference_steps: int = 50,
1160
+ guidance_scale: float = 7.5,
1161
+ num_images_per_prompt: Optional[int] = 1,
1162
+ eta: float = 0.0,
1163
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1164
+ latents: Optional[torch.FloatTensor] = None,
1165
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1166
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1167
+ max_embeddings_multiples: Optional[int] = 3,
1168
+ output_type: Optional[str] = "pil",
1169
+ return_dict: bool = True,
1170
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1171
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1172
+ callback_steps: int = 1,
1173
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1174
+ ):
1175
+ r"""
1176
+ Function for text-to-image generation.
1177
+ Args:
1178
+ prompt (`str` or `List[str]`):
1179
+ The prompt or prompts to guide the image generation.
1180
+ negative_prompt (`str` or `List[str]`, *optional*):
1181
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1182
+ if `guidance_scale` is less than `1`).
1183
+ height (`int`, *optional*, defaults to 512):
1184
+ The height in pixels of the generated image.
1185
+ width (`int`, *optional*, defaults to 512):
1186
+ The width in pixels of the generated image.
1187
+ num_inference_steps (`int`, *optional*, defaults to 50):
1188
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1189
+ expense of slower inference.
1190
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1191
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1192
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1193
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1194
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1195
+ usually at the expense of lower image quality.
1196
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1197
+ The number of images to generate per prompt.
1198
+ eta (`float`, *optional*, defaults to 0.0):
1199
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1200
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1201
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1202
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1203
+ to make generation deterministic.
1204
+ latents (`torch.FloatTensor`, *optional*):
1205
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1206
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1207
+ tensor will ge generated by sampling using the supplied random `generator`.
1208
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1209
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1210
+ provided, text embeddings will be generated from `prompt` input argument.
1211
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1212
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1213
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1214
+ argument.
1215
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1216
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1217
+ output_type (`str`, *optional*, defaults to `"pil"`):
1218
+ The output format of the generate image. Choose between
1219
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1220
+ return_dict (`bool`, *optional*, defaults to `True`):
1221
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1222
+ plain tuple.
1223
+ callback (`Callable`, *optional*):
1224
+ A function that will be called every `callback_steps` steps during inference. The function will be
1225
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1226
+ is_cancelled_callback (`Callable`, *optional*):
1227
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1228
+ `True`, the inference will be cancelled.
1229
+ callback_steps (`int`, *optional*, defaults to 1):
1230
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1231
+ called at every step.
1232
+ cross_attention_kwargs (`dict`, *optional*):
1233
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1234
+ `self.processor` in
1235
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
1236
+
1237
+ Returns:
1238
+ `None` if cancelled by `is_cancelled_callback`,
1239
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1240
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1241
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1242
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1243
+ (nsfw) content, according to the `safety_checker`.
1244
+ """
1245
+ return self.__call__(
1246
+ prompt=prompt,
1247
+ negative_prompt=negative_prompt,
1248
+ height=height,
1249
+ width=width,
1250
+ num_inference_steps=num_inference_steps,
1251
+ guidance_scale=guidance_scale,
1252
+ num_images_per_prompt=num_images_per_prompt,
1253
+ eta=eta,
1254
+ generator=generator,
1255
+ latents=latents,
1256
+ prompt_embeds=prompt_embeds,
1257
+ negative_prompt_embeds=negative_prompt_embeds,
1258
+ max_embeddings_multiples=max_embeddings_multiples,
1259
+ output_type=output_type,
1260
+ return_dict=return_dict,
1261
+ callback=callback,
1262
+ is_cancelled_callback=is_cancelled_callback,
1263
+ callback_steps=callback_steps,
1264
+ cross_attention_kwargs=cross_attention_kwargs,
1265
+ )
1266
+
1267
+ def img2img(
1268
+ self,
1269
+ image: Union[torch.FloatTensor, PIL.Image.Image],
1270
+ prompt: Union[str, List[str]],
1271
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1272
+ strength: float = 0.8,
1273
+ num_inference_steps: Optional[int] = 50,
1274
+ guidance_scale: Optional[float] = 7.5,
1275
+ num_images_per_prompt: Optional[int] = 1,
1276
+ eta: Optional[float] = 0.0,
1277
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1278
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1279
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1280
+ max_embeddings_multiples: Optional[int] = 3,
1281
+ output_type: Optional[str] = "pil",
1282
+ return_dict: bool = True,
1283
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1284
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1285
+ callback_steps: int = 1,
1286
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1287
+ ):
1288
+ r"""
1289
+ Function for image-to-image generation.
1290
+ Args:
1291
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
1292
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1293
+ process.
1294
+ prompt (`str` or `List[str]`):
1295
+ The prompt or prompts to guide the image generation.
1296
+ negative_prompt (`str` or `List[str]`, *optional*):
1297
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1298
+ if `guidance_scale` is less than `1`).
1299
+ strength (`float`, *optional*, defaults to 0.8):
1300
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
1301
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
1302
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
1303
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
1304
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
1305
+ num_inference_steps (`int`, *optional*, defaults to 50):
1306
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1307
+ expense of slower inference. This parameter will be modulated by `strength`.
1308
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1309
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1310
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1311
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1312
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1313
+ usually at the expense of lower image quality.
1314
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1315
+ The number of images to generate per prompt.
1316
+ eta (`float`, *optional*, defaults to 0.0):
1317
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1318
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1319
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1320
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1321
+ to make generation deterministic.
1322
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1323
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1324
+ provided, text embeddings will be generated from `prompt` input argument.
1325
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1326
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1327
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1328
+ argument.
1329
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1330
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1331
+ output_type (`str`, *optional*, defaults to `"pil"`):
1332
+ The output format of the generate image. Choose between
1333
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1334
+ return_dict (`bool`, *optional*, defaults to `True`):
1335
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1336
+ plain tuple.
1337
+ callback (`Callable`, *optional*):
1338
+ A function that will be called every `callback_steps` steps during inference. The function will be
1339
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1340
+ is_cancelled_callback (`Callable`, *optional*):
1341
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1342
+ `True`, the inference will be cancelled.
1343
+ callback_steps (`int`, *optional*, defaults to 1):
1344
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1345
+ called at every step.
1346
+ cross_attention_kwargs (`dict`, *optional*):
1347
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1348
+ `self.processor` in
1349
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
1350
+
1351
+ Returns:
1352
+ `None` if cancelled by `is_cancelled_callback`,
1353
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1354
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1355
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1356
+ (nsfw) content, according to the `safety_checker`.
1357
+ """
1358
+ return self.__call__(
1359
+ prompt=prompt,
1360
+ negative_prompt=negative_prompt,
1361
+ image=image,
1362
+ num_inference_steps=num_inference_steps,
1363
+ guidance_scale=guidance_scale,
1364
+ strength=strength,
1365
+ num_images_per_prompt=num_images_per_prompt,
1366
+ eta=eta,
1367
+ generator=generator,
1368
+ prompt_embeds=prompt_embeds,
1369
+ negative_prompt_embeds=negative_prompt_embeds,
1370
+ max_embeddings_multiples=max_embeddings_multiples,
1371
+ output_type=output_type,
1372
+ return_dict=return_dict,
1373
+ callback=callback,
1374
+ is_cancelled_callback=is_cancelled_callback,
1375
+ callback_steps=callback_steps,
1376
+ cross_attention_kwargs=cross_attention_kwargs,
1377
+ )
1378
+
1379
+ def inpaint(
1380
+ self,
1381
+ image: Union[torch.FloatTensor, PIL.Image.Image],
1382
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
1383
+ prompt: Union[str, List[str]],
1384
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1385
+ strength: float = 0.8,
1386
+ num_inference_steps: Optional[int] = 50,
1387
+ guidance_scale: Optional[float] = 7.5,
1388
+ num_images_per_prompt: Optional[int] = 1,
1389
+ add_predicted_noise: Optional[bool] = False,
1390
+ eta: Optional[float] = 0.0,
1391
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1392
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1393
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1394
+ max_embeddings_multiples: Optional[int] = 3,
1395
+ output_type: Optional[str] = "pil",
1396
+ return_dict: bool = True,
1397
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1398
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1399
+ callback_steps: int = 1,
1400
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1401
+ ):
1402
+ r"""
1403
+ Function for inpaint.
1404
+ Args:
1405
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
1406
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1407
+ process. This is the image whose masked region will be inpainted.
1408
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
1409
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1410
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1411
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1412
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1413
+ prompt (`str` or `List[str]`):
1414
+ The prompt or prompts to guide the image generation.
1415
+ negative_prompt (`str` or `List[str]`, *optional*):
1416
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1417
+ if `guidance_scale` is less than `1`).
1418
+ strength (`float`, *optional*, defaults to 0.8):
1419
+ Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1420
+ is 1, the denoising process will be run on the masked area for the full number of iterations specified
1421
+ in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1422
+ noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1423
+ num_inference_steps (`int`, *optional*, defaults to 50):
1424
+ The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1425
+ the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1426
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1427
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1428
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1429
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1430
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1431
+ usually at the expense of lower image quality.
1432
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1433
+ The number of images to generate per prompt.
1434
+ add_predicted_noise (`bool`, *optional*, defaults to True):
1435
+ Use predicted noise instead of random noise when constructing noisy versions of the original image in
1436
+ the reverse diffusion process
1437
+ eta (`float`, *optional*, defaults to 0.0):
1438
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1439
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1440
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1441
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1442
+ to make generation deterministic.
1443
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1444
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1445
+ provided, text embeddings will be generated from `prompt` input argument.
1446
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1447
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1448
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1449
+ argument.
1450
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1451
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1452
+ output_type (`str`, *optional*, defaults to `"pil"`):
1453
+ The output format of the generate image. Choose between
1454
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1455
+ return_dict (`bool`, *optional*, defaults to `True`):
1456
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1457
+ plain tuple.
1458
+ callback (`Callable`, *optional*):
1459
+ A function that will be called every `callback_steps` steps during inference. The function will be
1460
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1461
+ is_cancelled_callback (`Callable`, *optional*):
1462
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1463
+ `True`, the inference will be cancelled.
1464
+ callback_steps (`int`, *optional*, defaults to 1):
1465
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1466
+ called at every step.
1467
+ cross_attention_kwargs (`dict`, *optional*):
1468
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1469
+ `self.processor` in
1470
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
1471
+
1472
+ Returns:
1473
+ `None` if cancelled by `is_cancelled_callback`,
1474
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1475
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1476
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1477
+ (nsfw) content, according to the `safety_checker`.
1478
+ """
1479
+ return self.__call__(
1480
+ prompt=prompt,
1481
+ negative_prompt=negative_prompt,
1482
+ image=image,
1483
+ mask_image=mask_image,
1484
+ num_inference_steps=num_inference_steps,
1485
+ guidance_scale=guidance_scale,
1486
+ strength=strength,
1487
+ num_images_per_prompt=num_images_per_prompt,
1488
+ add_predicted_noise=add_predicted_noise,
1489
+ eta=eta,
1490
+ generator=generator,
1491
+ prompt_embeds=prompt_embeds,
1492
+ negative_prompt_embeds=negative_prompt_embeds,
1493
+ max_embeddings_multiples=max_embeddings_multiples,
1494
+ output_type=output_type,
1495
+ return_dict=return_dict,
1496
+ callback=callback,
1497
+ is_cancelled_callback=is_cancelled_callback,
1498
+ callback_steps=callback_steps,
1499
+ cross_attention_kwargs=cross_attention_kwargs,
1500
+ )
1501
+
1502
+
1503
+ # Borrowed from https://github.com/csaluski/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py
1504
+ def get_text_latent_space(self, prompt, guidance_scale = 7.5):
1505
+ # get prompt text embeddings
1506
+ text_input = self.tokenizer(
1507
+ prompt,
1508
+ padding="max_length",
1509
+ max_length=self.tokenizer.model_max_length,
1510
+ truncation=True,
1511
+ return_tensors="pt",
1512
+ )
1513
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
1514
+
1515
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1516
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1517
+ # corresponds to doing no classifier free guidance.
1518
+ do_classifier_free_guidance = guidance_scale > 1.0
1519
+ # get unconditional embeddings for classifier free guidance
1520
+ if do_classifier_free_guidance:
1521
+ max_length = text_input.input_ids.shape[-1]
1522
+ uncond_input = self.tokenizer(
1523
+ [""], padding="max_length", max_length=max_length, return_tensors="pt"
1524
+ )
1525
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
1526
+
1527
+ # For classifier free guidance, we need to do two forward passes.
1528
+ # Here we concatenate the unconditional and text embeddings into a single batch
1529
+ # to avoid doing two forward passes
1530
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
1531
+
1532
+ return text_embeddings
1533
+
1534
+ def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995):
1535
+ """ helper function to spherically interpolate two arrays v1 v2
1536
+ from https://gist.github.com/karpathy/00103b0037c5aaea32fe1da1af553355
1537
+ this should be better than lerping for moving between noise spaces """
1538
+
1539
+ if not isinstance(v0, np.ndarray):
1540
+ inputs_are_torch = True
1541
+ input_device = v0.device
1542
+ v0 = v0.cpu().numpy()
1543
+ v1 = v1.cpu().numpy()
1544
+
1545
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
1546
+ if np.abs(dot) > DOT_THRESHOLD:
1547
+ v2 = (1 - t) * v0 + t * v1
1548
+ else:
1549
+ theta_0 = np.arccos(dot)
1550
+ sin_theta_0 = np.sin(theta_0)
1551
+ theta_t = theta_0 * t
1552
+ sin_theta_t = np.sin(theta_t)
1553
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
1554
+ s1 = sin_theta_t / sin_theta_0
1555
+ v2 = s0 * v0 + s1 * v1
1556
+
1557
+ if inputs_are_torch:
1558
+ v2 = torch.from_numpy(v2).to(input_device)
1559
+
1560
+ return v2
1561
+
1562
+ def lerp_between_prompts(self, first_prompt, second_prompt, seed = None, length = 10, save=False, guidance_scale: Optional[float] = 7.5, **kwargs):
1563
+ first_embedding = self.get_text_latent_space(first_prompt)
1564
+ second_embedding = self.get_text_latent_space(second_prompt)
1565
+ if not seed:
1566
+ seed = random.randint(0, sys.maxsize)
1567
+ generator = torch.Generator(self.device)
1568
+ generator.manual_seed(seed)
1569
+ generator_state = generator.get_state()
1570
+ lerp_embed_points = []
1571
+ for i in range(length):
1572
+ weight = i / length
1573
+ tensor_lerp = torch.lerp(first_embedding, second_embedding, weight)
1574
+ lerp_embed_points.append(tensor_lerp)
1575
+ images = []
1576
+ for idx, latent_point in enumerate(lerp_embed_points):
1577
+ generator.set_state(generator_state)
1578
+ image = self.diffuse_from_inits(latent_point, **kwargs)["image"][0]
1579
+ images.append(image)
1580
+ if save:
1581
+ image.save(f"{first_prompt}-{second_prompt}-{idx:02d}.png", "PNG")
1582
+ return {"images": images, "latent_points": lerp_embed_points,"generator_state": generator_state}
1583
+
1584
+ def slerp_through_seeds(self,
1585
+ prompt,
1586
+ height: Optional[int] = 512,
1587
+ width: Optional[int] = 512,
1588
+ save = False,
1589
+ seed = None, steps = 10, **kwargs):
1590
+
1591
+ if not seed:
1592
+ seed = random.randint(0, sys.maxsize)
1593
+ generator = torch.Generator(self.device)
1594
+ generator.manual_seed(seed)
1595
+ init_start = torch.randn(
1596
+ (1, self.unet.in_channels, height // 8, width // 8),
1597
+ generator = generator, device = self.device)
1598
+ init_end = torch.randn(
1599
+ (1, self.unet.in_channels, height // 8, width // 8),
1600
+ generator = generator, device = self.device)
1601
+ generator_state = generator.get_state()
1602
+ slerp_embed_points = []
1603
+ # weight from 0 to 1/(steps - 1), add init_end specifically so that we
1604
+ # have len(images) = steps
1605
+ for i in range(steps - 1):
1606
+ weight = i / steps
1607
+ tensor_slerp = self.slerp(weight, init_start, init_end)
1608
+ slerp_embed_points.append(tensor_slerp)
1609
+ slerp_embed_points.append(init_end)
1610
+ images = []
1611
+ embed_point = self.get_text_latent_space(prompt)
1612
+ for idx, noise_point in enumerate(slerp_embed_points):
1613
+ generator.set_state(generator_state)
1614
+ image = self.diffuse_from_inits(embed_point, init = noise_point, **kwargs)["image"][0]
1615
+ images.append(image)
1616
+ if save:
1617
+ image.save(f"{seed}-{idx:02d}.png", "PNG")
1618
+ return {"images": images, "noise_samples": slerp_embed_points,"generator_state": generator_state}
1619
+
1620
+ @torch.no_grad()
1621
+ def diffuse_from_inits(self, text_embeddings,
1622
+ init = None,
1623
+ height: Optional[int] = 512,
1624
+ width: Optional[int] = 512,
1625
+ num_inference_steps: Optional[int] = 50,
1626
+ guidance_scale: Optional[float] = 7.5,
1627
+ eta: Optional[float] = 0.0,
1628
+ generator: Optional[torch.Generator] = None,
1629
+ output_type: Optional[str] = "pil",
1630
+ **kwargs,):
1631
+
1632
+ from diffusers.schedulers import LMSDiscreteScheduler
1633
+ batch_size = 1
1634
+
1635
+ if generator == None:
1636
+ generator = torch.Generator("cuda")
1637
+ generator_state = generator.get_state()
1638
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1639
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1640
+ # corresponds to doing no classifier free guidance.
1641
+ do_classifier_free_guidance = guidance_scale > 1.0
1642
+ # get the intial random noise
1643
+ latents = init if init is not None else torch.randn(
1644
+ (batch_size, self.unet.in_channels, height // 8, width // 8),
1645
+ generator=generator,
1646
+ device=self.device,)
1647
+
1648
+ # set timesteps
1649
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
1650
+ extra_set_kwargs = {}
1651
+ if accepts_offset:
1652
+ extra_set_kwargs["offset"] = 1
1653
+
1654
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
1655
+
1656
+ # if we use LMSDiscreteScheduler, let's make sure latents are mulitplied by sigmas
1657
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
1658
+ latents = latents * self.scheduler.sigmas[0]
1659
+
1660
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
1661
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
1662
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
1663
+ # and should be between [0, 1]
1664
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
1665
+ extra_step_kwargs = {}
1666
+ if accepts_eta:
1667
+ extra_step_kwargs["eta"] = eta
1668
+
1669
+ for i, t in tqdm(enumerate(self.scheduler.timesteps)):
1670
+ # expand the latents if we are doing classifier free guidance
1671
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1672
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
1673
+ sigma = self.scheduler.sigmas[i]
1674
+ latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5)
1675
+
1676
+ # predict the noise residual
1677
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings, return_dict=False)[0] #).sample
1678
+
1679
+ # perform guidance
1680
+ if do_classifier_free_guidance:
1681
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1682
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1683
+
1684
+ # compute the previous noisy sample x_t -> x_t-1
1685
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
1686
+ latents = self.scheduler.step(noise_pred, i, latents, **extra_step_kwargs, return_dict=False)[0] #).prev_sample
1687
+ else:
1688
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] #).prev_sample
1689
+
1690
+ # scale and decode the image latents with vae
1691
+ latents = 1 / 0.18215 * latents
1692
+ image = self.vae.decode(latents)
1693
+
1694
+ image = (image / 2 + 0.5).clamp(0, 1)
1695
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
1696
+
1697
+ if output_type == "pil":
1698
+ image = self.numpy_to_pil(image)
1699
+
1700
+ return {"image": image, "generator_state": generator_state}
1701
+
1702
+ def variation(self, text_embeddings, generator_state, variation_magnitude = 100, **kwargs):
1703
+ # random vector to move in latent space
1704
+ rand_t = (torch.rand(text_embeddings.shape, device = self.device) * 2) - 1
1705
+ rand_mag = torch.sum(torch.abs(rand_t)) / variation_magnitude
1706
+ scaled_rand_t = rand_t / rand_mag
1707
+ variation_embedding = text_embeddings + scaled_rand_t
1708
+
1709
+ generator = torch.Generator("cuda")
1710
+ generator.set_state(generator_state)
1711
+ result = self.diffuse_from_inits(variation_embedding, generator=generator, **kwargs)
1712
+ result.update({"latent_point": variation_embedding})
1713
+ return result