charlielidbury commited on
Commit
79e77b8
·
verified ·
1 Parent(s): 0899efe

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. golden-examples.json +300 -0
  2. huggingface_diffusers/.github/ISSUE_TEMPLATE/bug-report.yml +51 -0
  3. huggingface_diffusers/.github/ISSUE_TEMPLATE/config.yml +4 -0
  4. huggingface_diffusers/.github/ISSUE_TEMPLATE/feature_request.md +20 -0
  5. huggingface_diffusers/.github/ISSUE_TEMPLATE/feedback.md +12 -0
  6. huggingface_diffusers/.github/ISSUE_TEMPLATE/new-model-addition.yml +31 -0
  7. huggingface_diffusers/.github/actions/setup-miniconda/action.yml +146 -0
  8. huggingface_diffusers/.github/workflows/build_docker_images.yml +50 -0
  9. huggingface_diffusers/.github/workflows/build_documentation.yml +18 -0
  10. huggingface_diffusers/.github/workflows/build_pr_documentation.yml +17 -0
  11. huggingface_diffusers/.github/workflows/delete_doc_comment.yml +13 -0
  12. huggingface_diffusers/.github/workflows/nightly_tests.yml +162 -0
  13. huggingface_diffusers/.github/workflows/pr_quality.yml +50 -0
  14. huggingface_diffusers/.github/workflows/pr_tests.yml +155 -0
  15. huggingface_diffusers/.github/workflows/push_tests.yml +156 -0
  16. huggingface_diffusers/.github/workflows/stale.yml +27 -0
  17. huggingface_diffusers/.github/workflows/typos.yml +14 -0
  18. huggingface_diffusers/.gitignore +171 -0
  19. huggingface_diffusers/CODE_OF_CONDUCT.md +129 -0
  20. huggingface_diffusers/CONTRIBUTING.md +294 -0
  21. huggingface_diffusers/LICENSE +201 -0
  22. huggingface_diffusers/MANIFEST.in +2 -0
  23. huggingface_diffusers/Makefile +98 -0
  24. huggingface_diffusers/README.md +563 -0
  25. huggingface_diffusers/_typos.toml +13 -0
  26. huggingface_diffusers/docker/diffusers-flax-cpu/Dockerfile +44 -0
  27. huggingface_diffusers/docker/diffusers-flax-tpu/Dockerfile +46 -0
  28. huggingface_diffusers/docker/diffusers-onnxruntime-cpu/Dockerfile +44 -0
  29. huggingface_diffusers/docker/diffusers-onnxruntime-cuda/Dockerfile +44 -0
  30. huggingface_diffusers/docker/diffusers-pytorch-cpu/Dockerfile +43 -0
  31. huggingface_diffusers/docker/diffusers-pytorch-cuda/Dockerfile +43 -0
  32. huggingface_diffusers/examples/README.md +70 -0
  33. huggingface_diffusers/examples/community/README.md +953 -0
  34. huggingface_diffusers/examples/community/bit_diffusion.py +264 -0
  35. huggingface_diffusers/examples/community/checkpoint_merger.py +285 -0
  36. huggingface_diffusers/examples/community/clip_guided_stable_diffusion.py +351 -0
  37. huggingface_diffusers/examples/community/composable_stable_diffusion.py +583 -0
  38. huggingface_diffusers/examples/community/imagic_stable_diffusion.py +502 -0
  39. huggingface_diffusers/examples/community/img2img_inpainting.py +463 -0
  40. huggingface_diffusers/examples/community/interpolate_stable_diffusion.py +524 -0
  41. huggingface_diffusers/examples/community/lpw_stable_diffusion.py +1162 -0
  42. huggingface_diffusers/examples/community/lpw_stable_diffusion_onnx.py +1148 -0
  43. huggingface_diffusers/examples/community/magic_mix.py +152 -0
  44. huggingface_diffusers/examples/community/multilingual_stable_diffusion.py +436 -0
  45. huggingface_diffusers/examples/community/one_step_unet.py +24 -0
  46. huggingface_diffusers/examples/community/sd_text2img_k_diffusion.py +476 -0
  47. huggingface_diffusers/examples/community/seed_resize_stable_diffusion.py +367 -0
  48. huggingface_diffusers/examples/community/speech_to_image_diffusion.py +261 -0
  49. huggingface_diffusers/examples/community/stable_diffusion_comparison.py +405 -0
  50. huggingface_diffusers/examples/community/stable_diffusion_mega.py +228 -0
golden-examples.json ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "prompt": "Here is some code that is missing a line denoted with \"# <MISSING LINE>\". What should go in the missing line?\n\n\nimport os\nimport shutil\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport numpy as np\n\nfrom huggingface_hub import hf_hub_download\n\nfrom ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging\n\n\nif is_onnx_available():\n import onnxruntime as ort\n\n\nlogger = logging.get_logger(__name__)\n\nORT_TO_NP_TYPE = {\n \"tensor(bool)\": np.bool_,\n \"tensor(int8)\": np.int8,\n \"tensor(uint8)\": np.uint8,\n \"tensor(int16)\": np.int16,\n \"tensor(uint16)\": np.uint16,\n \"tensor(int32)\": np.int32,\n \"tensor(uint32)\": np.uint32,\n \"tensor(int64)\": np.int64,\n \"tensor(uint64)\": np.uint64,\n \"tensor(float16)\": np.float16,\n \"tensor(float)\": np.float32,\n \"tensor(double)\": np.float64,\n}\n\n\nclass OnnxRuntimeModel:\n def __init__(self, model=None, **kwargs):\n logger.info(\"`diffusers.OnnxRuntimeModel` is experimental and might change in the future.\")\n self.model = model\n # <MISSING LINE>\n self.latest_model_name = kwargs.get(\"latest_model_name\", ONNX_WEIGHTS_NAME)\n\n def __call__(self, **kwargs):\n inputs = {k: np.array(v) for k, v in kwargs.items()}\n return self.model.run(None, inputs)\n\n @staticmethod\n def load_model(path: Union[str, Path], provider=None, sess_options=None):\n \"\"\"\n Loads an ONNX Inference session with an ExecutionProvider. Default provider is `CPUExecutionProvider`\n\n Arguments:\n path (`str` or `Path`):\n Directory from which to load\n provider(`str`, *optional*):\n Onnxruntime execution provider to use for loading the model, defaults to `CPUExecutionProvider`\n \"\"\"\n if provider is None:\n logger.info(\"No onnxruntime provider specified, using CPUExecutionProvider\")\n provider = \"CPUExecutionProvider\"\n\n return ort.InferenceSession(path, providers=[provider], sess_options=sess_options)\n",
4
+ "response": " self.model_save_dir = kwargs.get(\"model_save_dir\", None)",
5
+ "example_id": 1,
6
+ "chunks": [
7
+ {
8
+ "start_lineno": 133,
9
+ "end_lineno": 191,
10
+ "document_path": "huggingface_diffusers/src/diffusers/pipelines/onnx_utils.py"
11
+ },
12
+ {
13
+ "start_lineno": 453,
14
+ "end_lineno": 486,
15
+ "document_path": "src/diffusers/models/modeling_utils.py"
16
+ },
17
+ {
18
+ "start_lineno": 486,
19
+ "end_lineno": 526,
20
+ "document_path": "src/diffusers/models/modeling_flax_utils.py"
21
+ },
22
+ {
23
+ "start_lineno": 109,
24
+ "end_lineno": 151,
25
+ "document_path": "scripts/convert_stable_diffusion_checkpoint_to_onnx.py"
26
+ },
27
+ {
28
+ "start_lineno": 1,
29
+ "end_lineno": 26,
30
+ "document_path": "scripts/convert_stable_diffusion_checkpoint_to_onnx.py"
31
+ },
32
+ {
33
+ "start_lineno": 169,
34
+ "document_path": "src/diffusers/pipelines/onnx_utils.py"
35
+ },
36
+ {
37
+ "start_lineno": 93,
38
+ "document_path": "src/diffusers/pipelines/onnx_utils.py"
39
+ },
40
+ {
41
+ "document_path": "scripts/convert_stable_diffusion_checkpoint_to_onnx.py"
42
+ },
43
+ {
44
+ "end_lineno": 225,
45
+ "document_path": "scripts/convert_stable_diffusion_checkpoint_to_onnx.py"
46
+ },
47
+ {
48
+ "end_lineno": 212,
49
+ "document_path": "src/diffusers/pipelines/onnx_utils.py"
50
+ }
51
+ ]
52
+ },
53
+ {
54
+ "prompt": null,
55
+ "response": " num_hidden_layers=4,",
56
+ "example_id": 2,
57
+ "chunks": [
58
+ {
59
+ "end_lineno": 279,
60
+ "document_path": "tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py"
61
+ }
62
+ ]
63
+ },
64
+ {
65
+ "prompt": "Here is some code that is missing a line denoted with \"# <MISSING LINE>\". What should go in the missing line? \n\n\n if self.state_in_first_order:\n # 2. Convert to an ODE derivative for 1st order\n derivative = (sample - pred_original_sample) / sigma_hat\n # 3. delta timestep\n dt = sigma_next - sigma_hat\n\n # store for 2nd order step\n self.prev_derivative = derivative\n self.dt = dt\n self.sample = sample\n else:\n # 2. 2nd order / Heun's method\n derivative = (sample - pred_original_sample) / sigma_next\n derivative = (self.prev_derivative + derivative) / 2\n\n # 3. take prev timestep & sample\n dt = self.dt\n sample = self.sample\n\n # free dt and derivative\n # Note, this puts the scheduler in \"first order mode\"\n self.prev_derivative = None\n self.dt = None\n self.sample = None\n\n prev_sample = sample + derivative * dt\n\n if not return_dict:\n return (prev_sample,)\n\n return SchedulerOutput(prev_sample=prev_sample)\n\n def add_noise(\n self,\n original_samples: torch.FloatTensor,\n noise: torch.FloatTensor,\n timesteps: torch.FloatTensor,\n ) -> torch.FloatTensor:\n # Make sure sigmas and timesteps have the same device and dtype as original_samples\n self.sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)\n if original_samples.device.type == \"mps\" and torch.is_floating_point(timesteps):\n # mps does not support float64\n # <MISSING LINE>\n timesteps = timesteps.to(original_samples.device, dtype=torch.float32)\n else:\n self.timesteps = self.timesteps.to(original_samples.device)\n timesteps = timesteps.to(original_samples.device)\n\n step_indices = [self.index_for_timestep(t) for t in timesteps]\n\n sigma = self.sigmas[step_indices].flatten()\n while len(sigma.shape) < len(original_samples.shape):\n sigma = sigma.unsqueeze(-1)\n\n noisy_samples = original_samples + noise * sigma\n return noisy_samples\n\n def __len__(self):\n return self.config.num_train_timesteps\n",
66
+ "response": " self.timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32)",
67
+ "example_id": 3,
68
+ "chunks": [
69
+ {
70
+ "document_path": "src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py",
71
+ "start_lineno": 286,
72
+ "end_lineno": 314
73
+ },
74
+ {
75
+ "document_path": "src/diffusers/schedulers/scheduling_euler_ancestral_discrete.py",
76
+ "start_lineno": 242,
77
+ "end_lineno": 271
78
+ },
79
+ {
80
+ "document_path": "src/diffusers/schedulers/scheduling_k_dpm_2_discrete.py",
81
+ "start_lineno": 268,
82
+ "end_lineno": 297
83
+ },
84
+ {
85
+ "document_path": "src/diffusers/schedulers/scheduling_lms_discrete.py",
86
+ "start_lineno": 248,
87
+ "end_lineno": 277
88
+ },
89
+ {
90
+ "document_path": "tests/test_scheduler.py",
91
+ "start_lineno": 2693,
92
+ "end_lineno": 2727
93
+ },
94
+ {
95
+ "document_path": "src/diffusers/schedulers/scheduling_heun_discrete.py",
96
+ "start_lineno": 134,
97
+ "end_lineno": 172
98
+ },
99
+ {
100
+ "document_path": "src/diffusers/schedulers/scheduling_euler_discrete.py",
101
+ "start_lineno": 239,
102
+ "end_lineno": 279
103
+ },
104
+ {
105
+ "document_path": "src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py",
106
+ "start_lineno": 137,
107
+ "end_lineno": 172
108
+ }
109
+ ]
110
+ },
111
+ {
112
+ "prompt": "Here is some code that is missing a line denoted with \"# <MISSING LINE>\". What should go in the missing line?\n\n\n\nimport PIL\nfrom diffusers import DiffusionPipeline\nfrom diffusers.configuration_utils import FrozenDict\nfrom diffusers.models import AutoencoderKL, UNet2DConditionModel\nfrom diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput\nfrom diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker\nfrom diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler\nfrom diffusers.utils import deprecate, logging\nfrom transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer\n\n\nlogger = logging.get_logger(__name__) # pylint: disable=invalid-name\n\n\ndef prepare_mask_and_masked_image(image, mask):\n image = np.array(image.convert(\"RGB\"))\n image = image[None].transpose(0, 3, 1, 2)\n # <MISSING LINE>\n\n mask = np.array(mask.convert(\"L\"))\n mask = mask.astype(np.float32) / 255.0\n mask = mask[None, None]\n mask[mask < 0.5] = 0\n mask[mask >= 0.5] = 1\n mask = torch.from_numpy(mask)\n\n masked_image = image * (mask < 0.5)\n\n return mask, masked_image\n\n\ndef check_size(image, height, width):\n if isinstance(image, PIL.Image.Image):\n w, h = image.size\n elif isinstance(image, torch.Tensor):\n *_, h, w = image.shape\n\n if h != height or w != width:\n raise ValueError(f\"Image size should be {height}x{width}, but got {h}x{w}\")\n\n",
113
+ "response": " image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0",
114
+ "example_id": 4,
115
+ "chunks": [
116
+ [
117
+ {
118
+ "document_path": "examples/community/img2img_inpainting.py",
119
+ "start_lineno": 14,
120
+ "end_lineno": 35
121
+ },
122
+ {
123
+ "document_path": "examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py",
124
+ "start_lineno": 34,
125
+ "end_lineno": 55
126
+ },
127
+ {
128
+ "document_path": "src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py",
129
+ "start_lineno": 106,
130
+ "end_lineno": 127
131
+ },
132
+ {
133
+ "document_path": "src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py",
134
+ "start_lineno": 110,
135
+ "end_lineno": 131
136
+ },
137
+ {
138
+ "document_path": "tests/pipelines/paint_by_example/test_paint_by_example.py",
139
+ "start_lineno": 84,
140
+ "end_lineno": 105
141
+ },
142
+ {
143
+ "document_path": "src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py",
144
+ "start_lineno": 64,
145
+ "end_lineno": 108
146
+ },
147
+ {
148
+ "document_path": "src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py",
149
+ "start_lineno": 1,
150
+ "end_lineno": 57
151
+ },
152
+ {
153
+ "document_path": "src/diffusers/pipelines/repaint/pipeline_repaint.py",
154
+ "start_lineno": 51,
155
+ "end_lineno": 70
156
+ },
157
+ {
158
+ "document_path": "examples/community/lpw_stable_diffusion_onnx.py",
159
+ "start_lineno": 410,
160
+ "end_lineno": 421
161
+ },
162
+ {
163
+ "document_path": "src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py",
164
+ "start_lineno": 514,
165
+ "end_lineno": 524
166
+ }
167
+ ]
168
+ ]
169
+ },
170
+ {
171
+ "prompt": "Here is some code that is missing a line denoted with \"# <MISSING LINE>\". What should go in the missing line?\n\n\n # Create parallel version of the train step\n p_train_step = jax.pmap(train_step, \"batch\", donate_argnums=(0, 1))\n\n # Replicate the train state on each device\n unet_state = jax_utils.replicate(unet_state)\n text_encoder_state = jax_utils.replicate(text_encoder_state)\n vae_params = jax_utils.replicate(vae_params)\n\n # Train!\n num_update_steps_per_epoch = math.ceil(len(train_dataloader))\n\n # Scheduler and math around the number of training steps.\n if args.max_train_steps is None:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel & distributed) = {total_train_batch_size}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n\n def checkpoint(step=None):\n # Create the pipeline using the trained modules and save it.\n scheduler, _ = FlaxPNDMScheduler.from_pretrained(\"CompVis/stable-diffusion-v1-4\", subfolder=\"scheduler\")\n safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained(\n \"CompVis/stable-diffusion-safety-checker\", from_pt=True\n )\n pipeline = FlaxStableDiffusionPipeline(\n text_encoder=text_encoder,\n vae=vae,\n unet=unet,\n tokenizer=tokenizer,\n scheduler=scheduler,\n safety_checker=safety_checker,\n # <MISSING LINE>\n )\n\n outdir = os.path.join(args.output_dir, str(step)) if step else args.output_dir\n pipeline.save_pretrained(\n outdir,\n params={\n \"text_encoder\": get_params_to_save(text_encoder_state.params),\n \"vae\": get_params_to_save(vae_params),\n \"unet\": get_params_to_save(unet_state.params),\n \"safety_checker\": safety_checker.params,\n },\n )\n\n if args.push_to_hub:\n message = f\"checkpoint-{step}\" if step is not None else \"End of training\"\n repo.push_to_hub(commit_message=message, blocking=False, auto_lfs_prune=True)\n\n global_step = 0\n\n epochs = tqdm(range(args.num_train_epochs), desc=\"Epoch ... \", position=0)\n for epoch in epochs:\n # ======================== Training ================================\n\n train_metrics = []\n\n steps_per_epoch = len(train_dataset) // total_train_batch_size\n train_step_progress_bar = tqdm(total=steps_per_epoch, desc=\"Training...\", position=1, leave=False)\n # train\n for batch in train_dataloader:\n batch = shard(batch)\n unet_state, text_encoder_state, train_metric, train_rngs = p_train_step(\n unet_state, text_encoder_state, vae_params, batch, train_rngs\n )\n train_metrics.append(train_metric)\n\n train_step_progress_bar.update(jax.local_device_count())\n\n global_step += 1\n if jax.process_index() == 0 and args.save_steps and global_step % args.save_steps == 0:\n checkpoint(global_step)\n if global_step >= args.max_train_steps:\n break\n\n train_metric = jax_utils.unreplicate(train_metric)\n\n train_step_progress_bar.close()\n epochs.write(f\"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})\")\n\n if jax.process_index() == 0:\n checkpoint()\n\n\nif __name__ == \"__main__\":\n main()\n",
172
+ "response": " feature_extractor=CLIPFeatureExtractor.from_pretrained(\"openai/clip-vit-base-patch32\")",
173
+ "example_id": 5,
174
+ "chunks": [
175
+ {
176
+ "document_path": "examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py",
177
+ "start_lineno": 615,
178
+ "end_lineno": 655
179
+ },
180
+ {
181
+ "document_path": "examples/text_to_image/train_text_to_image_flax.py",
182
+ "start_lineno": 530,
183
+ "end_lineno": 590
184
+ },
185
+ {
186
+ "document_path": "examples/textual_inversion/textual_inversion_flax.py",
187
+ "start_lineno": 620,
188
+ "end_lineno": 690
189
+ },
190
+ {
191
+ "document_path": "src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py",
192
+ "start_lineno": 1,
193
+ "end_lineno": 50
194
+ },
195
+ {
196
+ "document_path": "src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py",
197
+ "start_lineno": 45,
198
+ "end_lineno": 80
199
+ },
200
+ {
201
+ "document_path": "src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py",
202
+ "start_lineno": 68,
203
+ "end_lineno": 114
204
+ },
205
+ {
206
+ "document_path": "src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py",
207
+ "start_lineno": 175,
208
+ "end_lineno": 225
209
+ },
210
+ {
211
+ "document_path": "src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py",
212
+ "start_lineno": 107,
213
+ "end_lineno": 135
214
+ },
215
+ {
216
+ "document_path": "src/diffusers/pipelines/stable_diffusion/__init__.py",
217
+ "start_lineno": 74,
218
+ "end_lineno": 105
219
+ }
220
+ ]
221
+ },
222
+ {
223
+ "prompt": "Here is some code that is missing a line denoted with \"# <MISSING LINE>\". What should go in the missing line?\n\n\n\n if args.with_prior_preservation:\n # Chunk the noise and noise_pred into two parts and compute the loss on each part separately.\n model_pred, model_pred_prior = jnp.split(model_pred, 2, axis=0)\n target, target_prior = jnp.split(target, 2, axis=0)\n\n # Compute instance loss\n loss = (target - model_pred) ** 2\n loss = loss.mean()\n\n # Compute prior loss\n prior_loss = (target_prior - model_pred_prior) ** 2\n prior_loss = prior_loss.mean()\n\n # Add the prior loss to the instance loss.\n loss = loss + args.prior_loss_weight * prior_loss\n else:\n loss = (target - model_pred) ** 2\n loss = loss.mean()\n\n return loss\n\n grad_fn = jax.value_and_grad(compute_loss)\n loss, grad = grad_fn(params)\n grad = jax.lax.pmean(grad, \"batch\")\n\n new_unet_state = unet_state.apply_gradients(grads=grad[\"unet\"])\n if args.train_text_encoder:\n new_text_encoder_state = text_encoder_state.apply_gradients(grads=grad[\"text_encoder\"])\n else:\n new_text_encoder_state = text_encoder_state\n\n metrics = {\"loss\": loss}\n metrics = jax.lax.pmean(metrics, axis_name=\"batch\")\n\n return new_unet_state, new_text_encoder_state, metrics, new_train_rng\n\n # Create parallel version of the train step\n p_train_step = jax.pmap(train_step, \"batch\", donate_argnums=(0, 1))\n\n # Replicate the train state on each device\n unet_state = jax_utils.replicate(unet_state)\n text_encoder_state = jax_utils.replicate(text_encoder_state)\n vae_params = jax_utils.replicate(vae_params)\n\n # Train!\n num_update_steps_per_epoch = math.ceil(len(train_dataloader))\n\n # Scheduler and math around the number of training steps.\n if args.max_train_steps is None:\n # <MISSING LINE>\n\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel & distributed) = {total_train_batch_size}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n\n def checkpoint(step=None):\n # Create the pipeline using the trained modules and save it.\n scheduler, _ = FlaxPNDMScheduler.from_pretrained(\"CompVis/stable-diffusion-v1-4\", subfolder=\"scheduler\")\n safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained(\n \"CompVis/stable-diffusion-safety-checker\", from_pt=True\n )\n pipeline = FlaxStableDiffusionPipeline(\n text_encoder=text_encoder,\n vae=vae,\n unet=unet,\n tokenizer=tokenizer,\n scheduler=scheduler,\n safety_checker=safety_checker,\n # <MISSING LINE>\n )\n\n outdir = os.path.join(args.output_dir, str(step)) if step else args.output_dir\n pipeline.save_pretrained(\n outdir,\n params={\n \"text_encoder\": get_params_to_save(text_encoder_state.params),\n \"vae\": get_params_to_save(vae_params),\n \"unet\": get_params_to_save(unet_state.params),\n \"safety_checker\": safety_checker.params,\n },\n )\n\n if args.push_to_hub:\n message = f\"checkpoint-{step}\" if step is not None else \"End of training\"\n repo.push_to_hub(commit_message=message, blocking=False, auto_lfs_prune=True)\n\n global_step = 0\n\n epochs = tqdm(range(args.num_train_epochs), desc=\"Epoch ... \", position=0)\n for epoch in epochs:\n # ======================== Training ================================\n\n train_metrics = []\n\n steps_per_epoch = len(train_dataset) // total_train_batch_size\n train_step_progress_bar = tqdm(total=steps_per_epoch, desc=\"Training...\", position=1, leave=False)\n # train\n for batch in train_dataloader:\n batch = shard(batch)\n unet_state, text_encoder_state, train_metric, train_rngs = p_train_step(\n unet_state, text_encoder_state, vae_params, batch, train_rngs\n )\n train_metrics.append(train_metric)\n\n train_step_progress_bar.update(jax.local_device_count())\n\n global_step += 1\n if jax.process_index() == 0 and args.save_steps and global_step % args.save_steps == 0:\n checkpoint(global_step)\n if global_step >= args.max_train_steps:\n break\n\n train_metric = jax_utils.unreplicate(train_metric)\n\n train_step_progress_bar.close()\n epochs.write(f\"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})\")\n\n if jax.process_index() == 0:\n checkpoint()\n\n\nif __name__ == \"__main__\":\n main()\n",
224
+ "example_id": 6,
225
+ "response": " args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch",
226
+ "chunks": [
227
+ {
228
+ "document_path": "examples/dreambooth/train_dreambooth_flax.py",
229
+ "start_lineno": 605,
230
+ "end_lineno": 645
231
+ },
232
+
233
+ {
234
+ "document_path": "examples/dreambooth/train_dreambooth_lora.py",
235
+ "start_lineno": 751,
236
+ "end_lineno": 791
237
+ },
238
+
239
+ {
240
+ "document_path": "examples/dreambooth/train_dreambooth.py",
241
+ "start_lineno": 681,
242
+ "end_lineno": 721
243
+ },
244
+
245
+ {
246
+ "document_path": "examples/research_projects/colossalai/train_dreambooth_colossalai.py",
247
+ "start_lineno": 529,
248
+ "end_lineno": 569
249
+ },
250
+
251
+ {
252
+ "document_path": "examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py",
253
+ "start_lineno": 581,
254
+ "end_lineno": 621
255
+ },
256
+
257
+ {
258
+ "document_path": "examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py",
259
+ "start_lineno": 476,
260
+ "end_lineno": 516
261
+ },
262
+
263
+ {
264
+ "document_path": "examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py",
265
+ "start_lineno": 666,
266
+ "end_lineno": 706
267
+ },
268
+
269
+ {
270
+ "document_path": "examples/text_to_image/train_text_to_image_flax.py",
271
+ "start_lineno": 487,
272
+ "end_lineno": 527
273
+ },
274
+
275
+ {
276
+ "document_path": "examples/text_to_image/train_text_to_image_lora.py",
277
+ "start_lineno": 589,
278
+ "end_lineno": 629
279
+ },
280
+
281
+ {
282
+ "document_path": "examples/text_to_image/train_text_to_image.py",
283
+ "start_lineno": 520,
284
+ "end_lineno": 560
285
+ },
286
+
287
+ {
288
+ "document_path": "examples/textual_inversion/textual_inversion_flax.py",
289
+ "start_lineno": 569,
290
+ "end_lineno": 609
291
+ },
292
+
293
+ {
294
+ "document_path": "examples/textual_inversion/textual_inversion.py",
295
+ "start_lineno": 597,
296
+ "end_lineno": 637
297
+ }
298
+ ]
299
+ }
300
+ ]
huggingface_diffusers/.github/ISSUE_TEMPLATE/bug-report.yml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "\U0001F41B Bug Report"
2
+ description: Report a bug on diffusers
3
+ labels: [ "bug" ]
4
+ body:
5
+ - type: markdown
6
+ attributes:
7
+ value: |
8
+ Thanks a lot for taking the time to file this issue 🤗.
9
+ Issues do not only help to improve the library, but also publicly document common problems, questions, workflows for the whole community!
10
+ Thus, issues are of the same importance as pull requests when contributing to this library ❤️.
11
+ In order to make your issue as **useful for the community as possible**, let's try to stick to some simple guidelines:
12
+ - 1. Please try to be as precise and concise as possible.
13
+ *Give your issue a fitting title. Assume that someone which very limited knowledge of diffusers can understand your issue. Add links to the source code, documentation other issues, pull requests etc...*
14
+ - 2. If your issue is about something not working, **always** provide a reproducible code snippet. The reader should be able to reproduce your issue by **only copy-pasting your code snippet into a Python shell**.
15
+ *The community cannot solve your issue if it cannot reproduce it. If your bug is related to training, add your training script and make everything needed to train public. Otherwise, just add a simple Python code snippet.*
16
+ - 3. Add the **minimum amount of code / context that is needed to understand, reproduce your issue**.
17
+ *Make the life of maintainers easy. `diffusers` is getting many issues every day. Make sure your issue is about one bug and one bug only. Make sure you add only the context, code needed to understand your issues - nothing more. Generally, every issue is a way of documenting this library, try to make it a good documentation entry.*
18
+ - type: markdown
19
+ attributes:
20
+ value: |
21
+ For more in-detail information on how to write good issues you can have a look [here](https://huggingface.co/course/chapter8/5?fw=pt)
22
+ - type: textarea
23
+ id: bug-description
24
+ attributes:
25
+ label: Describe the bug
26
+ description: A clear and concise description of what the bug is. If you intend to submit a pull request for this issue, tell us in the description. Thanks!
27
+ placeholder: Bug description
28
+ validations:
29
+ required: true
30
+ - type: textarea
31
+ id: reproduction
32
+ attributes:
33
+ label: Reproduction
34
+ description: Please provide a minimal reproducible code which we can copy/paste and reproduce the issue.
35
+ placeholder: Reproduction
36
+ validations:
37
+ required: true
38
+ - type: textarea
39
+ id: logs
40
+ attributes:
41
+ label: Logs
42
+ description: "Please include the Python logs if you can."
43
+ render: shell
44
+ - type: textarea
45
+ id: system-info
46
+ attributes:
47
+ label: System Info
48
+ description: Please share your system info with us. You can run the command `diffusers-cli env` and copy-paste its output below.
49
+ placeholder: diffusers version, platform, python version, ...
50
+ validations:
51
+ required: true
huggingface_diffusers/.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ contact_links:
2
+ - name: Blank issue
3
+ url: https://github.com/huggingface/diffusers/issues/new
4
+ about: General usage questions and community discussions
huggingface_diffusers/.github/ISSUE_TEMPLATE/feature_request.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "\U0001F680 Feature request"
3
+ about: Suggest an idea for this project
4
+ title: ''
5
+ labels: ''
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ **Is your feature request related to a problem? Please describe.**
11
+ A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12
+
13
+ **Describe the solution you'd like**
14
+ A clear and concise description of what you want to happen.
15
+
16
+ **Describe alternatives you've considered**
17
+ A clear and concise description of any alternative solutions or features you've considered.
18
+
19
+ **Additional context**
20
+ Add any other context or screenshots about the feature request here.
huggingface_diffusers/.github/ISSUE_TEMPLATE/feedback.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "💬 Feedback about API Design"
3
+ about: Give feedback about the current API design
4
+ title: ''
5
+ labels: ''
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ **What API design would you like to have changed or added to the library? Why?**
11
+
12
+ **What use case would this enable or better enable? Can you give us a code example?**
huggingface_diffusers/.github/ISSUE_TEMPLATE/new-model-addition.yml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "\U0001F31F New model/pipeline/scheduler addition"
2
+ description: Submit a proposal/request to implement a new diffusion model / pipeline / scheduler
3
+ labels: [ "New model/pipeline/scheduler" ]
4
+
5
+ body:
6
+ - type: textarea
7
+ id: description-request
8
+ validations:
9
+ required: true
10
+ attributes:
11
+ label: Model/Pipeline/Scheduler description
12
+ description: |
13
+ Put any and all important information relative to the model/pipeline/scheduler
14
+
15
+ - type: checkboxes
16
+ id: information-tasks
17
+ attributes:
18
+ label: Open source status
19
+ description: |
20
+ Please note that if the model implementation isn't available or if the weights aren't open-source, we are less likely to implement it in `diffusers`.
21
+ options:
22
+ - label: "The model implementation is available"
23
+ - label: "The model weights are available (Only relevant if addition is not a scheduler)."
24
+
25
+ - type: textarea
26
+ id: additional-info
27
+ attributes:
28
+ label: Provide useful links for the implementation
29
+ description: |
30
+ Please provide information regarding the implementation, the weights, and the authors.
31
+ Please mention the authors by @gh-username if you're aware of their usernames.
huggingface_diffusers/.github/actions/setup-miniconda/action.yml ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Set up conda environment for testing
2
+
3
+ description: Sets up miniconda in your ${RUNNER_TEMP} environment and gives you the ${CONDA_RUN} environment variable so you don't have to worry about polluting non-empeheral runners anymore
4
+
5
+ inputs:
6
+ python-version:
7
+ description: If set to any value, dont use sudo to clean the workspace
8
+ required: false
9
+ type: string
10
+ default: "3.9"
11
+ miniconda-version:
12
+ description: Miniconda version to install
13
+ required: false
14
+ type: string
15
+ default: "4.12.0"
16
+ environment-file:
17
+ description: Environment file to install dependencies from
18
+ required: false
19
+ type: string
20
+ default: ""
21
+
22
+ runs:
23
+ using: composite
24
+ steps:
25
+ # Use the same trick from https://github.com/marketplace/actions/setup-miniconda
26
+ # to refresh the cache daily. This is kind of optional though
27
+ - name: Get date
28
+ id: get-date
29
+ shell: bash
30
+ run: echo "::set-output name=today::$(/bin/date -u '+%Y%m%d')d"
31
+ - name: Setup miniconda cache
32
+ id: miniconda-cache
33
+ uses: actions/cache@v2
34
+ with:
35
+ path: ${{ runner.temp }}/miniconda
36
+ key: miniconda-${{ runner.os }}-${{ runner.arch }}-${{ inputs.python-version }}-${{ steps.get-date.outputs.today }}
37
+ - name: Install miniconda (${{ inputs.miniconda-version }})
38
+ if: steps.miniconda-cache.outputs.cache-hit != 'true'
39
+ env:
40
+ MINICONDA_VERSION: ${{ inputs.miniconda-version }}
41
+ shell: bash -l {0}
42
+ run: |
43
+ MINICONDA_INSTALL_PATH="${RUNNER_TEMP}/miniconda"
44
+ mkdir -p "${MINICONDA_INSTALL_PATH}"
45
+ case ${RUNNER_OS}-${RUNNER_ARCH} in
46
+ Linux-X64)
47
+ MINICONDA_ARCH="Linux-x86_64"
48
+ ;;
49
+ macOS-ARM64)
50
+ MINICONDA_ARCH="MacOSX-arm64"
51
+ ;;
52
+ macOS-X64)
53
+ MINICONDA_ARCH="MacOSX-x86_64"
54
+ ;;
55
+ *)
56
+ echo "::error::Platform ${RUNNER_OS}-${RUNNER_ARCH} currently unsupported using this action"
57
+ exit 1
58
+ ;;
59
+ esac
60
+ MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-py39_${MINICONDA_VERSION}-${MINICONDA_ARCH}.sh"
61
+ curl -fsSL "${MINICONDA_URL}" -o "${MINICONDA_INSTALL_PATH}/miniconda.sh"
62
+ bash "${MINICONDA_INSTALL_PATH}/miniconda.sh" -b -u -p "${MINICONDA_INSTALL_PATH}"
63
+ rm -rf "${MINICONDA_INSTALL_PATH}/miniconda.sh"
64
+ - name: Update GitHub path to include miniconda install
65
+ shell: bash
66
+ run: |
67
+ MINICONDA_INSTALL_PATH="${RUNNER_TEMP}/miniconda"
68
+ echo "${MINICONDA_INSTALL_PATH}/bin" >> $GITHUB_PATH
69
+ - name: Setup miniconda env cache (with env file)
70
+ id: miniconda-env-cache-env-file
71
+ if: ${{ runner.os }} == 'macOS' && ${{ inputs.environment-file }} != ''
72
+ uses: actions/cache@v2
73
+ with:
74
+ path: ${{ runner.temp }}/conda-python-${{ inputs.python-version }}
75
+ key: miniconda-env-${{ runner.os }}-${{ runner.arch }}-${{ inputs.python-version }}-${{ steps.get-date.outputs.today }}-${{ hashFiles(inputs.environment-file) }}
76
+ - name: Setup miniconda env cache (without env file)
77
+ id: miniconda-env-cache
78
+ if: ${{ runner.os }} == 'macOS' && ${{ inputs.environment-file }} == ''
79
+ uses: actions/cache@v2
80
+ with:
81
+ path: ${{ runner.temp }}/conda-python-${{ inputs.python-version }}
82
+ key: miniconda-env-${{ runner.os }}-${{ runner.arch }}-${{ inputs.python-version }}-${{ steps.get-date.outputs.today }}
83
+ - name: Setup conda environment with python (v${{ inputs.python-version }})
84
+ if: steps.miniconda-env-cache-env-file.outputs.cache-hit != 'true' && steps.miniconda-env-cache.outputs.cache-hit != 'true'
85
+ shell: bash
86
+ env:
87
+ PYTHON_VERSION: ${{ inputs.python-version }}
88
+ ENV_FILE: ${{ inputs.environment-file }}
89
+ run: |
90
+ CONDA_BASE_ENV="${RUNNER_TEMP}/conda-python-${PYTHON_VERSION}"
91
+ ENV_FILE_FLAG=""
92
+ if [[ -f "${ENV_FILE}" ]]; then
93
+ ENV_FILE_FLAG="--file ${ENV_FILE}"
94
+ elif [[ -n "${ENV_FILE}" ]]; then
95
+ echo "::warning::Specified env file (${ENV_FILE}) not found, not going to include it"
96
+ fi
97
+ conda create \
98
+ --yes \
99
+ --prefix "${CONDA_BASE_ENV}" \
100
+ "python=${PYTHON_VERSION}" \
101
+ ${ENV_FILE_FLAG} \
102
+ cmake=3.22 \
103
+ conda-build=3.21 \
104
+ ninja=1.10 \
105
+ pkg-config=0.29 \
106
+ wheel=0.37
107
+ - name: Clone the base conda environment and update GitHub env
108
+ shell: bash
109
+ env:
110
+ PYTHON_VERSION: ${{ inputs.python-version }}
111
+ CONDA_BASE_ENV: ${{ runner.temp }}/conda-python-${{ inputs.python-version }}
112
+ run: |
113
+ CONDA_ENV="${RUNNER_TEMP}/conda_environment_${GITHUB_RUN_ID}"
114
+ conda create \
115
+ --yes \
116
+ --prefix "${CONDA_ENV}" \
117
+ --clone "${CONDA_BASE_ENV}"
118
+ # TODO: conda-build could not be cloned because it hardcodes the path, so it
119
+ # could not be cached
120
+ conda install --yes -p ${CONDA_ENV} conda-build=3.21
121
+ echo "CONDA_ENV=${CONDA_ENV}" >> "${GITHUB_ENV}"
122
+ echo "CONDA_RUN=conda run -p ${CONDA_ENV} --no-capture-output" >> "${GITHUB_ENV}"
123
+ echo "CONDA_BUILD=conda run -p ${CONDA_ENV} conda-build" >> "${GITHUB_ENV}"
124
+ echo "CONDA_INSTALL=conda install -p ${CONDA_ENV}" >> "${GITHUB_ENV}"
125
+ - name: Get disk space usage and throw an error for low disk space
126
+ shell: bash
127
+ run: |
128
+ echo "Print the available disk space for manual inspection"
129
+ df -h
130
+ # Set the minimum requirement space to 4GB
131
+ MINIMUM_AVAILABLE_SPACE_IN_GB=4
132
+ MINIMUM_AVAILABLE_SPACE_IN_KB=$(($MINIMUM_AVAILABLE_SPACE_IN_GB * 1024 * 1024))
133
+ # Use KB to avoid floating point warning like 3.1GB
134
+ df -k | tr -s ' ' | cut -d' ' -f 4,9 | while read -r LINE;
135
+ do
136
+ AVAIL=$(echo $LINE | cut -f1 -d' ')
137
+ MOUNT=$(echo $LINE | cut -f2 -d' ')
138
+ if [ "$MOUNT" = "/" ]; then
139
+ if [ "$AVAIL" -lt "$MINIMUM_AVAILABLE_SPACE_IN_KB" ]; then
140
+ echo "There is only ${AVAIL}KB free space left in $MOUNT, which is less than the minimum requirement of ${MINIMUM_AVAILABLE_SPACE_IN_KB}KB. Please help create an issue to PyTorch Release Engineering via https://github.com/pytorch/test-infra/issues and provide the link to the workflow run."
141
+ exit 1;
142
+ else
143
+ echo "There is ${AVAIL}KB free space left in $MOUNT, continue"
144
+ fi
145
+ fi
146
+ done
huggingface_diffusers/.github/workflows/build_docker_images.yml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build Docker images (nightly)
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ schedule:
6
+ - cron: "0 0 * * *" # every day at midnight
7
+
8
+ concurrency:
9
+ group: docker-image-builds
10
+ cancel-in-progress: false
11
+
12
+ env:
13
+ REGISTRY: diffusers
14
+
15
+ jobs:
16
+ build-docker-images:
17
+ runs-on: ubuntu-latest
18
+
19
+ permissions:
20
+ contents: read
21
+ packages: write
22
+
23
+ strategy:
24
+ fail-fast: false
25
+ matrix:
26
+ image-name:
27
+ - diffusers-pytorch-cpu
28
+ - diffusers-pytorch-cuda
29
+ - diffusers-flax-cpu
30
+ - diffusers-flax-tpu
31
+ - diffusers-onnxruntime-cpu
32
+ - diffusers-onnxruntime-cuda
33
+
34
+ steps:
35
+ - name: Checkout repository
36
+ uses: actions/checkout@v3
37
+
38
+ - name: Login to Docker Hub
39
+ uses: docker/login-action@v2
40
+ with:
41
+ username: ${{ env.REGISTRY }}
42
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
43
+
44
+ - name: Build and push
45
+ uses: docker/build-push-action@v3
46
+ with:
47
+ no-cache: true
48
+ context: ./docker/${{ matrix.image-name }}
49
+ push: true
50
+ tags: ${{ env.REGISTRY }}/${{ matrix.image-name }}:latest
huggingface_diffusers/.github/workflows/build_documentation.yml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build documentation
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ - doc-builder*
8
+ - v*-release
9
+
10
+ jobs:
11
+ build:
12
+ uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
13
+ with:
14
+ commit_sha: ${{ github.sha }}
15
+ package: diffusers
16
+ languages: en ko
17
+ secrets:
18
+ token: ${{ secrets.HUGGINGFACE_PUSH }}
huggingface_diffusers/.github/workflows/build_pr_documentation.yml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build PR Documentation
2
+
3
+ on:
4
+ pull_request:
5
+
6
+ concurrency:
7
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
8
+ cancel-in-progress: true
9
+
10
+ jobs:
11
+ build:
12
+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
13
+ with:
14
+ commit_sha: ${{ github.event.pull_request.head.sha }}
15
+ pr_number: ${{ github.event.number }}
16
+ package: diffusers
17
+ languages: en ko
huggingface_diffusers/.github/workflows/delete_doc_comment.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Delete dev documentation
2
+
3
+ on:
4
+ pull_request:
5
+ types: [ closed ]
6
+
7
+
8
+ jobs:
9
+ delete:
10
+ uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main
11
+ with:
12
+ pr_number: ${{ github.event.number }}
13
+ package: diffusers
huggingface_diffusers/.github/workflows/nightly_tests.yml ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Nightly tests on main
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 0 * * *" # every day at midnight
6
+
7
+ env:
8
+ DIFFUSERS_IS_CI: yes
9
+ HF_HOME: /mnt/cache
10
+ OMP_NUM_THREADS: 8
11
+ MKL_NUM_THREADS: 8
12
+ PYTEST_TIMEOUT: 600
13
+ RUN_SLOW: yes
14
+ RUN_NIGHTLY: yes
15
+
16
+ jobs:
17
+ run_nightly_tests:
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ config:
22
+ - name: Nightly PyTorch CUDA tests on Ubuntu
23
+ framework: pytorch
24
+ runner: docker-gpu
25
+ image: diffusers/diffusers-pytorch-cuda
26
+ report: torch_cuda
27
+ - name: Nightly Flax TPU tests on Ubuntu
28
+ framework: flax
29
+ runner: docker-tpu
30
+ image: diffusers/diffusers-flax-tpu
31
+ report: flax_tpu
32
+ - name: Nightly ONNXRuntime CUDA tests on Ubuntu
33
+ framework: onnxruntime
34
+ runner: docker-gpu
35
+ image: diffusers/diffusers-onnxruntime-cuda
36
+ report: onnx_cuda
37
+
38
+ name: ${{ matrix.config.name }}
39
+
40
+ runs-on: ${{ matrix.config.runner }}
41
+
42
+ container:
43
+ image: ${{ matrix.config.image }}
44
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ ${{ matrix.config.runner == 'docker-tpu' && '--privileged' || '--gpus 0'}}
45
+
46
+ defaults:
47
+ run:
48
+ shell: bash
49
+
50
+ steps:
51
+ - name: Checkout diffusers
52
+ uses: actions/checkout@v3
53
+ with:
54
+ fetch-depth: 2
55
+
56
+ - name: NVIDIA-SMI
57
+ if: ${{ matrix.config.runner == 'docker-gpu' }}
58
+ run: |
59
+ nvidia-smi
60
+
61
+ - name: Install dependencies
62
+ run: |
63
+ python -m pip install -e .[quality,test]
64
+ python -m pip install -U git+https://github.com/huggingface/transformers
65
+ python -m pip install git+https://github.com/huggingface/accelerate
66
+
67
+ - name: Environment
68
+ run: |
69
+ python utils/print_env.py
70
+
71
+ - name: Run nightly PyTorch CUDA tests
72
+ if: ${{ matrix.config.framework == 'pytorch' }}
73
+ env:
74
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
75
+ run: |
76
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
77
+ -s -v -k "not Flax and not Onnx" \
78
+ --make-reports=tests_${{ matrix.config.report }} \
79
+ tests/
80
+
81
+ - name: Run nightly Flax TPU tests
82
+ if: ${{ matrix.config.framework == 'flax' }}
83
+ env:
84
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
85
+ run: |
86
+ python -m pytest -n 0 \
87
+ -s -v -k "Flax" \
88
+ --make-reports=tests_${{ matrix.config.report }} \
89
+ tests/
90
+
91
+ - name: Run nightly ONNXRuntime CUDA tests
92
+ if: ${{ matrix.config.framework == 'onnxruntime' }}
93
+ env:
94
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
95
+ run: |
96
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
97
+ -s -v -k "Onnx" \
98
+ --make-reports=tests_${{ matrix.config.report }} \
99
+ tests/
100
+
101
+ - name: Failure short reports
102
+ if: ${{ failure() }}
103
+ run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
104
+
105
+ - name: Test suite reports artifacts
106
+ if: ${{ always() }}
107
+ uses: actions/upload-artifact@v2
108
+ with:
109
+ name: ${{ matrix.config.report }}_test_reports
110
+ path: reports
111
+
112
+ run_nightly_tests_apple_m1:
113
+ name: Nightly PyTorch MPS tests on MacOS
114
+ runs-on: [ self-hosted, apple-m1 ]
115
+
116
+ steps:
117
+ - name: Checkout diffusers
118
+ uses: actions/checkout@v3
119
+ with:
120
+ fetch-depth: 2
121
+
122
+ - name: Clean checkout
123
+ shell: arch -arch arm64 bash {0}
124
+ run: |
125
+ git clean -fxd
126
+
127
+ - name: Setup miniconda
128
+ uses: ./.github/actions/setup-miniconda
129
+ with:
130
+ python-version: 3.9
131
+
132
+ - name: Install dependencies
133
+ shell: arch -arch arm64 bash {0}
134
+ run: |
135
+ ${CONDA_RUN} python -m pip install --upgrade pip
136
+ ${CONDA_RUN} python -m pip install -e .[quality,test]
137
+ ${CONDA_RUN} python -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
138
+ ${CONDA_RUN} python -m pip install git+https://github.com/huggingface/accelerate
139
+
140
+ - name: Environment
141
+ shell: arch -arch arm64 bash {0}
142
+ run: |
143
+ ${CONDA_RUN} python utils/print_env.py
144
+
145
+ - name: Run nightly PyTorch tests on M1 (MPS)
146
+ shell: arch -arch arm64 bash {0}
147
+ env:
148
+ HF_HOME: /System/Volumes/Data/mnt/cache
149
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
150
+ run: |
151
+ ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps tests/
152
+
153
+ - name: Failure short reports
154
+ if: ${{ failure() }}
155
+ run: cat reports/tests_torch_mps_failures_short.txt
156
+
157
+ - name: Test suite reports artifacts
158
+ if: ${{ always() }}
159
+ uses: actions/upload-artifact@v2
160
+ with:
161
+ name: torch_mps_test_reports
162
+ path: reports
huggingface_diffusers/.github/workflows/pr_quality.yml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run code quality checks
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+ push:
8
+ branches:
9
+ - main
10
+
11
+ concurrency:
12
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
13
+ cancel-in-progress: true
14
+
15
+ jobs:
16
+ check_code_quality:
17
+ runs-on: ubuntu-latest
18
+ steps:
19
+ - uses: actions/checkout@v3
20
+ - name: Set up Python
21
+ uses: actions/setup-python@v4
22
+ with:
23
+ python-version: "3.7"
24
+ - name: Install dependencies
25
+ run: |
26
+ python -m pip install --upgrade pip
27
+ pip install .[quality]
28
+ - name: Check quality
29
+ run: |
30
+ black --check --preview examples tests src utils scripts
31
+ isort --check-only examples tests src utils scripts
32
+ flake8 examples tests src utils scripts
33
+ doc-builder style src/diffusers docs/source --max_len 119 --check_only --path_to_docs docs/source
34
+
35
+ check_repository_consistency:
36
+ runs-on: ubuntu-latest
37
+ steps:
38
+ - uses: actions/checkout@v3
39
+ - name: Set up Python
40
+ uses: actions/setup-python@v4
41
+ with:
42
+ python-version: "3.7"
43
+ - name: Install dependencies
44
+ run: |
45
+ python -m pip install --upgrade pip
46
+ pip install .[quality]
47
+ - name: Check quality
48
+ run: |
49
+ python utils/check_copies.py
50
+ python utils/check_dummies.py
huggingface_diffusers/.github/workflows/pr_tests.yml ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Fast tests for PRs
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+
8
+ concurrency:
9
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
10
+ cancel-in-progress: true
11
+
12
+ env:
13
+ DIFFUSERS_IS_CI: yes
14
+ OMP_NUM_THREADS: 4
15
+ MKL_NUM_THREADS: 4
16
+ PYTEST_TIMEOUT: 60
17
+
18
+ jobs:
19
+ run_fast_tests:
20
+ strategy:
21
+ fail-fast: false
22
+ matrix:
23
+ config:
24
+ - name: Fast PyTorch CPU tests on Ubuntu
25
+ framework: pytorch
26
+ runner: docker-cpu
27
+ image: diffusers/diffusers-pytorch-cpu
28
+ report: torch_cpu
29
+ - name: Fast Flax CPU tests on Ubuntu
30
+ framework: flax
31
+ runner: docker-cpu
32
+ image: diffusers/diffusers-flax-cpu
33
+ report: flax_cpu
34
+ - name: Fast ONNXRuntime CPU tests on Ubuntu
35
+ framework: onnxruntime
36
+ runner: docker-cpu
37
+ image: diffusers/diffusers-onnxruntime-cpu
38
+ report: onnx_cpu
39
+
40
+ name: ${{ matrix.config.name }}
41
+
42
+ runs-on: ${{ matrix.config.runner }}
43
+
44
+ container:
45
+ image: ${{ matrix.config.image }}
46
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
47
+
48
+ defaults:
49
+ run:
50
+ shell: bash
51
+
52
+ steps:
53
+ - name: Checkout diffusers
54
+ uses: actions/checkout@v3
55
+ with:
56
+ fetch-depth: 2
57
+
58
+ - name: Install dependencies
59
+ run: |
60
+ apt-get update && apt-get install libsndfile1-dev -y
61
+ python -m pip install -e .[quality,test]
62
+ python -m pip install -U git+https://github.com/huggingface/transformers
63
+ python -m pip install git+https://github.com/huggingface/accelerate
64
+
65
+ - name: Environment
66
+ run: |
67
+ python utils/print_env.py
68
+
69
+ - name: Run fast PyTorch CPU tests
70
+ if: ${{ matrix.config.framework == 'pytorch' }}
71
+ run: |
72
+ python -m pytest -n 2 --max-worker-restart=0 --dist=loadfile \
73
+ -s -v -k "not Flax and not Onnx" \
74
+ --make-reports=tests_${{ matrix.config.report }} \
75
+ tests/
76
+
77
+ - name: Run fast Flax TPU tests
78
+ if: ${{ matrix.config.framework == 'flax' }}
79
+ run: |
80
+ python -m pytest -n 2 --max-worker-restart=0 --dist=loadfile \
81
+ -s -v -k "Flax" \
82
+ --make-reports=tests_${{ matrix.config.report }} \
83
+ tests/
84
+
85
+ - name: Run fast ONNXRuntime CPU tests
86
+ if: ${{ matrix.config.framework == 'onnxruntime' }}
87
+ run: |
88
+ python -m pytest -n 2 --max-worker-restart=0 --dist=loadfile \
89
+ -s -v -k "Onnx" \
90
+ --make-reports=tests_${{ matrix.config.report }} \
91
+ tests/
92
+
93
+ - name: Failure short reports
94
+ if: ${{ failure() }}
95
+ run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
96
+
97
+ - name: Test suite reports artifacts
98
+ if: ${{ always() }}
99
+ uses: actions/upload-artifact@v2
100
+ with:
101
+ name: pr_${{ matrix.config.report }}_test_reports
102
+ path: reports
103
+
104
+ run_fast_tests_apple_m1:
105
+ name: Fast PyTorch MPS tests on MacOS
106
+ runs-on: [ self-hosted, apple-m1 ]
107
+
108
+ steps:
109
+ - name: Checkout diffusers
110
+ uses: actions/checkout@v3
111
+ with:
112
+ fetch-depth: 2
113
+
114
+ - name: Clean checkout
115
+ shell: arch -arch arm64 bash {0}
116
+ run: |
117
+ git clean -fxd
118
+
119
+ - name: Setup miniconda
120
+ uses: ./.github/actions/setup-miniconda
121
+ with:
122
+ python-version: 3.9
123
+
124
+ - name: Install dependencies
125
+ shell: arch -arch arm64 bash {0}
126
+ run: |
127
+ ${CONDA_RUN} python -m pip install --upgrade pip
128
+ ${CONDA_RUN} python -m pip install -e .[quality,test]
129
+ ${CONDA_RUN} python -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
130
+ ${CONDA_RUN} python -m pip install git+https://github.com/huggingface/accelerate
131
+ ${CONDA_RUN} python -m pip install -U git+https://github.com/huggingface/transformers
132
+
133
+ - name: Environment
134
+ shell: arch -arch arm64 bash {0}
135
+ run: |
136
+ ${CONDA_RUN} python utils/print_env.py
137
+
138
+ - name: Run fast PyTorch tests on M1 (MPS)
139
+ shell: arch -arch arm64 bash {0}
140
+ env:
141
+ HF_HOME: /System/Volumes/Data/mnt/cache
142
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
143
+ run: |
144
+ ${CONDA_RUN} python -m pytest -n 0 -s -v --make-reports=tests_torch_mps tests/
145
+
146
+ - name: Failure short reports
147
+ if: ${{ failure() }}
148
+ run: cat reports/tests_torch_mps_failures_short.txt
149
+
150
+ - name: Test suite reports artifacts
151
+ if: ${{ always() }}
152
+ uses: actions/upload-artifact@v2
153
+ with:
154
+ name: pr_torch_mps_test_reports
155
+ path: reports
huggingface_diffusers/.github/workflows/push_tests.yml ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Slow tests on main
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ env:
9
+ DIFFUSERS_IS_CI: yes
10
+ HF_HOME: /mnt/cache
11
+ OMP_NUM_THREADS: 8
12
+ MKL_NUM_THREADS: 8
13
+ PYTEST_TIMEOUT: 600
14
+ RUN_SLOW: yes
15
+
16
+ jobs:
17
+ run_slow_tests:
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ config:
22
+ - name: Slow PyTorch CUDA tests on Ubuntu
23
+ framework: pytorch
24
+ runner: docker-gpu
25
+ image: diffusers/diffusers-pytorch-cuda
26
+ report: torch_cuda
27
+ - name: Slow Flax TPU tests on Ubuntu
28
+ framework: flax
29
+ runner: docker-tpu
30
+ image: diffusers/diffusers-flax-tpu
31
+ report: flax_tpu
32
+ - name: Slow ONNXRuntime CUDA tests on Ubuntu
33
+ framework: onnxruntime
34
+ runner: docker-gpu
35
+ image: diffusers/diffusers-onnxruntime-cuda
36
+ report: onnx_cuda
37
+
38
+ name: ${{ matrix.config.name }}
39
+
40
+ runs-on: ${{ matrix.config.runner }}
41
+
42
+ container:
43
+ image: ${{ matrix.config.image }}
44
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ ${{ matrix.config.runner == 'docker-tpu' && '--privileged' || '--gpus 0'}}
45
+
46
+ defaults:
47
+ run:
48
+ shell: bash
49
+
50
+ steps:
51
+ - name: Checkout diffusers
52
+ uses: actions/checkout@v3
53
+ with:
54
+ fetch-depth: 2
55
+
56
+ - name: NVIDIA-SMI
57
+ if : ${{ matrix.config.runner == 'docker-gpu' }}
58
+ run: |
59
+ nvidia-smi
60
+
61
+ - name: Install dependencies
62
+ run: |
63
+ python -m pip install -e .[quality,test]
64
+ python -m pip install -U git+https://github.com/huggingface/transformers
65
+ python -m pip install git+https://github.com/huggingface/accelerate
66
+
67
+ - name: Environment
68
+ run: |
69
+ python utils/print_env.py
70
+
71
+ - name: Run slow PyTorch CUDA tests
72
+ if: ${{ matrix.config.framework == 'pytorch' }}
73
+ env:
74
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
75
+ run: |
76
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
77
+ -s -v -k "not Flax and not Onnx" \
78
+ --make-reports=tests_${{ matrix.config.report }} \
79
+ tests/
80
+
81
+ - name: Run slow Flax TPU tests
82
+ if: ${{ matrix.config.framework == 'flax' }}
83
+ env:
84
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
85
+ run: |
86
+ python -m pytest -n 0 \
87
+ -s -v -k "Flax" \
88
+ --make-reports=tests_${{ matrix.config.report }} \
89
+ tests/
90
+
91
+ - name: Run slow ONNXRuntime CUDA tests
92
+ if: ${{ matrix.config.framework == 'onnxruntime' }}
93
+ env:
94
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
95
+ run: |
96
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
97
+ -s -v -k "Onnx" \
98
+ --make-reports=tests_${{ matrix.config.report }} \
99
+ tests/
100
+
101
+ - name: Failure short reports
102
+ if: ${{ failure() }}
103
+ run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
104
+
105
+ - name: Test suite reports artifacts
106
+ if: ${{ always() }}
107
+ uses: actions/upload-artifact@v2
108
+ with:
109
+ name: ${{ matrix.config.report }}_test_reports
110
+ path: reports
111
+
112
+ run_examples_tests:
113
+ name: Examples PyTorch CUDA tests on Ubuntu
114
+
115
+ runs-on: docker-gpu
116
+
117
+ container:
118
+ image: diffusers/diffusers-pytorch-cuda
119
+ options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
120
+
121
+ steps:
122
+ - name: Checkout diffusers
123
+ uses: actions/checkout@v3
124
+ with:
125
+ fetch-depth: 2
126
+
127
+ - name: NVIDIA-SMI
128
+ run: |
129
+ nvidia-smi
130
+
131
+ - name: Install dependencies
132
+ run: |
133
+ python -m pip install -e .[quality,test,training]
134
+ python -m pip install git+https://github.com/huggingface/accelerate
135
+ python -m pip install -U git+https://github.com/huggingface/transformers
136
+
137
+ - name: Environment
138
+ run: |
139
+ python utils/print_env.py
140
+
141
+ - name: Run example tests on GPU
142
+ env:
143
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
144
+ run: |
145
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
146
+
147
+ - name: Failure short reports
148
+ if: ${{ failure() }}
149
+ run: cat reports/examples_torch_cuda_failures_short.txt
150
+
151
+ - name: Test suite reports artifacts
152
+ if: ${{ always() }}
153
+ uses: actions/upload-artifact@v2
154
+ with:
155
+ name: examples_test_reports
156
+ path: reports
huggingface_diffusers/.github/workflows/stale.yml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Stale Bot
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 15 * * *"
6
+
7
+ jobs:
8
+ close_stale_issues:
9
+ name: Close Stale Issues
10
+ if: github.repository == 'huggingface/diffusers'
11
+ runs-on: ubuntu-latest
12
+ env:
13
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
14
+ steps:
15
+ - uses: actions/checkout@v2
16
+
17
+ - name: Setup Python
18
+ uses: actions/setup-python@v1
19
+ with:
20
+ python-version: 3.7
21
+
22
+ - name: Install requirements
23
+ run: |
24
+ pip install PyGithub
25
+ - name: Close stale issues
26
+ run: |
27
+ python utils/stale.py
huggingface_diffusers/.github/workflows/typos.yml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Check typos
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ jobs:
7
+ build:
8
+ runs-on: ubuntu-latest
9
+
10
+ steps:
11
+ - uses: actions/checkout@v3
12
+
13
+ - name: typos-action
14
+ uses: crate-ci/typos@v1.12.4
huggingface_diffusers/.gitignore ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Initially taken from Github's Python gitignore file
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # tests and logs
12
+ tests/fixtures/cached_*_text.txt
13
+ logs/
14
+ lightning_logs/
15
+ lang_code_data/
16
+
17
+ # Distribution / packaging
18
+ .Python
19
+ build/
20
+ develop-eggs/
21
+ dist/
22
+ downloads/
23
+ eggs/
24
+ .eggs/
25
+ lib/
26
+ lib64/
27
+ parts/
28
+ sdist/
29
+ var/
30
+ wheels/
31
+ *.egg-info/
32
+ .installed.cfg
33
+ *.egg
34
+ MANIFEST
35
+
36
+ # PyInstaller
37
+ # Usually these files are written by a python script from a template
38
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
39
+ *.manifest
40
+ *.spec
41
+
42
+ # Installer logs
43
+ pip-log.txt
44
+ pip-delete-this-directory.txt
45
+
46
+ # Unit test / coverage reports
47
+ htmlcov/
48
+ .tox/
49
+ .nox/
50
+ .coverage
51
+ .coverage.*
52
+ .cache
53
+ nosetests.xml
54
+ coverage.xml
55
+ *.cover
56
+ .hypothesis/
57
+ .pytest_cache/
58
+
59
+ # Translations
60
+ *.mo
61
+ *.pot
62
+
63
+ # Django stuff:
64
+ *.log
65
+ local_settings.py
66
+ db.sqlite3
67
+
68
+ # Flask stuff:
69
+ instance/
70
+ .webassets-cache
71
+
72
+ # Scrapy stuff:
73
+ .scrapy
74
+
75
+ # Sphinx documentation
76
+ docs/_build/
77
+
78
+ # PyBuilder
79
+ target/
80
+
81
+ # Jupyter Notebook
82
+ .ipynb_checkpoints
83
+
84
+ # IPython
85
+ profile_default/
86
+ ipython_config.py
87
+
88
+ # pyenv
89
+ .python-version
90
+
91
+ # celery beat schedule file
92
+ celerybeat-schedule
93
+
94
+ # SageMath parsed files
95
+ *.sage.py
96
+
97
+ # Environments
98
+ .env
99
+ .venv
100
+ env/
101
+ venv/
102
+ ENV/
103
+ env.bak/
104
+ venv.bak/
105
+
106
+ # Spyder project settings
107
+ .spyderproject
108
+ .spyproject
109
+
110
+ # Rope project settings
111
+ .ropeproject
112
+
113
+ # mkdocs documentation
114
+ /site
115
+
116
+ # mypy
117
+ .mypy_cache/
118
+ .dmypy.json
119
+ dmypy.json
120
+
121
+ # Pyre type checker
122
+ .pyre/
123
+
124
+ # vscode
125
+ .vs
126
+ .vscode
127
+
128
+ # Pycharm
129
+ .idea
130
+
131
+ # TF code
132
+ tensorflow_code
133
+
134
+ # Models
135
+ proc_data
136
+
137
+ # examples
138
+ runs
139
+ /runs_old
140
+ /wandb
141
+ /examples/runs
142
+ /examples/**/*.args
143
+ /examples/rag/sweep
144
+
145
+ # data
146
+ /data
147
+ serialization_dir
148
+
149
+ # emacs
150
+ *.*~
151
+ debug.env
152
+
153
+ # vim
154
+ .*.swp
155
+
156
+ #ctags
157
+ tags
158
+
159
+ # pre-commit
160
+ .pre-commit*
161
+
162
+ # .lock
163
+ *.lock
164
+
165
+ # DS_Store (MacOS)
166
+ .DS_Store
167
+ # RL pipelines may produce mp4 outputs
168
+ *.mp4
169
+
170
+ # dependencies
171
+ /transformers
huggingface_diffusers/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Contributor Covenant Code of Conduct
3
+
4
+ ## Our Pledge
5
+
6
+ We as members, contributors, and leaders pledge to make participation in our
7
+ community a harassment-free experience for everyone, regardless of age, body
8
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
9
+ identity and expression, level of experience, education, socio-economic status,
10
+ nationality, personal appearance, race, religion, or sexual identity
11
+ and orientation.
12
+
13
+ We pledge to act and interact in ways that contribute to an open, welcoming,
14
+ diverse, inclusive, and healthy community.
15
+
16
+ ## Our Standards
17
+
18
+ Examples of behavior that contributes to a positive environment for our
19
+ community include:
20
+
21
+ * Demonstrating empathy and kindness toward other people
22
+ * Being respectful of differing opinions, viewpoints, and experiences
23
+ * Giving and gracefully accepting constructive feedback
24
+ * Accepting responsibility and apologizing to those affected by our mistakes,
25
+ and learning from the experience
26
+ * Focusing on what is best not just for us as individuals, but for the
27
+ overall community
28
+
29
+ Examples of unacceptable behavior include:
30
+
31
+ * The use of sexualized language or imagery, and sexual attention or
32
+ advances of any kind
33
+ * Trolling, insulting or derogatory comments, and personal or political attacks
34
+ * Public or private harassment
35
+ * Publishing others' private information, such as a physical or email
36
+ address, without their explicit permission
37
+ * Other conduct which could reasonably be considered inappropriate in a
38
+ professional setting
39
+
40
+ ## Enforcement Responsibilities
41
+
42
+ Community leaders are responsible for clarifying and enforcing our standards of
43
+ acceptable behavior and will take appropriate and fair corrective action in
44
+ response to any behavior that they deem inappropriate, threatening, offensive,
45
+ or harmful.
46
+
47
+ Community leaders have the right and responsibility to remove, edit, or reject
48
+ comments, commits, code, wiki edits, issues, and other contributions that are
49
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
50
+ decisions when appropriate.
51
+
52
+ ## Scope
53
+
54
+ This Code of Conduct applies within all community spaces, and also applies when
55
+ an individual is officially representing the community in public spaces.
56
+ Examples of representing our community include using an official e-mail address,
57
+ posting via an official social media account, or acting as an appointed
58
+ representative at an online or offline event.
59
+
60
+ ## Enforcement
61
+
62
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
63
+ reported to the community leaders responsible for enforcement at
64
+ feedback@huggingface.co.
65
+ All complaints will be reviewed and investigated promptly and fairly.
66
+
67
+ All community leaders are obligated to respect the privacy and security of the
68
+ reporter of any incident.
69
+
70
+ ## Enforcement Guidelines
71
+
72
+ Community leaders will follow these Community Impact Guidelines in determining
73
+ the consequences for any action they deem in violation of this Code of Conduct:
74
+
75
+ ### 1. Correction
76
+
77
+ **Community Impact**: Use of inappropriate language or other behavior deemed
78
+ unprofessional or unwelcome in the community.
79
+
80
+ **Consequence**: A private, written warning from community leaders, providing
81
+ clarity around the nature of the violation and an explanation of why the
82
+ behavior was inappropriate. A public apology may be requested.
83
+
84
+ ### 2. Warning
85
+
86
+ **Community Impact**: A violation through a single incident or series
87
+ of actions.
88
+
89
+ **Consequence**: A warning with consequences for continued behavior. No
90
+ interaction with the people involved, including unsolicited interaction with
91
+ those enforcing the Code of Conduct, for a specified period of time. This
92
+ includes avoiding interactions in community spaces as well as external channels
93
+ like social media. Violating these terms may lead to a temporary or
94
+ permanent ban.
95
+
96
+ ### 3. Temporary Ban
97
+
98
+ **Community Impact**: A serious violation of community standards, including
99
+ sustained inappropriate behavior.
100
+
101
+ **Consequence**: A temporary ban from any sort of interaction or public
102
+ communication with the community for a specified period of time. No public or
103
+ private interaction with the people involved, including unsolicited interaction
104
+ with those enforcing the Code of Conduct, is allowed during this period.
105
+ Violating these terms may lead to a permanent ban.
106
+
107
+ ### 4. Permanent Ban
108
+
109
+ **Community Impact**: Demonstrating a pattern of violation of community
110
+ standards, including sustained inappropriate behavior, harassment of an
111
+ individual, or aggression toward or disparagement of classes of individuals.
112
+
113
+ **Consequence**: A permanent ban from any sort of public interaction within
114
+ the community.
115
+
116
+ ## Attribution
117
+
118
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
119
+ version 2.0, available at
120
+ https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
121
+
122
+ Community Impact Guidelines were inspired by [Mozilla's code of conduct
123
+ enforcement ladder](https://github.com/mozilla/diversity).
124
+
125
+ [homepage]: https://www.contributor-covenant.org
126
+
127
+ For answers to common questions about this code of conduct, see the FAQ at
128
+ https://www.contributor-covenant.org/faq. Translations are available at
129
+ https://www.contributor-covenant.org/translations.
huggingface_diffusers/CONTRIBUTING.md ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!---
2
+ Copyright 2022 The HuggingFace Team. All rights reserved.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ -->
16
+
17
+ # How to contribute to diffusers?
18
+
19
+ Everyone is welcome to contribute, and we value everybody's contribution. Code
20
+ is thus not the only way to help the community. Answering questions, helping
21
+ others, reaching out and improving the documentations are immensely valuable to
22
+ the community.
23
+
24
+ It also helps us if you spread the word: reference the library from blog posts
25
+ on the awesome projects it made possible, shout out on Twitter every time it has
26
+ helped you, or simply star the repo to say "thank you".
27
+
28
+ Whichever way you choose to contribute, please be mindful to respect our
29
+ [code of conduct](https://github.com/huggingface/diffusers/blob/main/CODE_OF_CONDUCT.md).
30
+
31
+ ## You can contribute in so many ways!
32
+
33
+ There are 4 ways you can contribute to diffusers:
34
+ * Fixing outstanding issues with the existing code;
35
+ * Implementing [new diffusion pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines#contribution), [new schedulers](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers) or [new models](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models)
36
+ * [Contributing to the examples](https://github.com/huggingface/diffusers/tree/main/examples) or to the documentation;
37
+ * Submitting issues related to bugs or desired new features.
38
+
39
+ In particular there is a special [Good First Issue](https://github.com/huggingface/diffusers/contribute) listing.
40
+ It will give you a list of open Issues that are open to anybody to work on. Just comment in the issue that you'd like to work on it.
41
+ In that same listing you will also find some Issues with `Good Second Issue` label. These are
42
+ typically slightly more complicated than the Issues with just `Good First Issue` label. But if you
43
+ feel you know what you're doing, go for it.
44
+
45
+ *All are equally valuable to the community.*
46
+
47
+ ## Submitting a new issue or feature request
48
+
49
+ Do your best to follow these guidelines when submitting an issue or a feature
50
+ request. It will make it easier for us to come back to you quickly and with good
51
+ feedback.
52
+
53
+ ### Did you find a bug?
54
+
55
+ The 🧨 Diffusers library is robust and reliable thanks to the users who notify us of
56
+ the problems they encounter. So thank you for reporting an issue.
57
+
58
+ First, we would really appreciate it if you could **make sure the bug was not
59
+ already reported** (use the search bar on Github under Issues).
60
+
61
+ ### Do you want to implement a new diffusion pipeline / diffusion model?
62
+
63
+ Awesome! Please provide the following information:
64
+
65
+ * Short description of the diffusion pipeline and link to the paper;
66
+ * Link to the implementation if it is open-source;
67
+ * Link to the model weights if they are available.
68
+
69
+ If you are willing to contribute the model yourself, let us know so we can best
70
+ guide you.
71
+
72
+ ### Do you want a new feature (that is not a model)?
73
+
74
+ A world-class feature request addresses the following points:
75
+
76
+ 1. Motivation first:
77
+ * Is it related to a problem/frustration with the library? If so, please explain
78
+ why. Providing a code snippet that demonstrates the problem is best.
79
+ * Is it related to something you would need for a project? We'd love to hear
80
+ about it!
81
+ * Is it something you worked on and think could benefit the community?
82
+ Awesome! Tell us what problem it solved for you.
83
+ 2. Write a *full paragraph* describing the feature;
84
+ 3. Provide a **code snippet** that demonstrates its future use;
85
+ 4. In case this is related to a paper, please attach a link;
86
+ 5. Attach any additional information (drawings, screenshots, etc.) you think may help.
87
+
88
+ If your issue is well written we're already 80% of the way there by the time you
89
+ post it.
90
+
91
+ ## Start contributing! (Pull Requests)
92
+
93
+ Before writing code, we strongly advise you to search through the existing PRs or
94
+ issues to make sure that nobody is already working on the same thing. If you are
95
+ unsure, it is always a good idea to open an issue to get some feedback.
96
+
97
+ You will need basic `git` proficiency to be able to contribute to
98
+ 🧨 Diffusers. `git` is not the easiest tool to use but it has the greatest
99
+ manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
100
+ Git](https://git-scm.com/book/en/v2) is a very good reference.
101
+
102
+ Follow these steps to start contributing ([supported Python versions](https://github.com/huggingface/diffusers/blob/main/setup.py#L426)):
103
+
104
+ 1. Fork the [repository](https://github.com/huggingface/diffusers) by
105
+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code
106
+ under your GitHub user account.
107
+
108
+ 2. Clone your fork to your local disk, and add the base repository as a remote:
109
+
110
+ ```bash
111
+ $ git clone git@github.com:<your Github handle>/diffusers.git
112
+ $ cd diffusers
113
+ $ git remote add upstream https://github.com/huggingface/diffusers.git
114
+ ```
115
+
116
+ 3. Create a new branch to hold your development changes:
117
+
118
+ ```bash
119
+ $ git checkout -b a-descriptive-name-for-my-changes
120
+ ```
121
+
122
+ **Do not** work on the `main` branch.
123
+
124
+ 4. Set up a development environment by running the following command in a virtual environment:
125
+
126
+ ```bash
127
+ $ pip install -e ".[dev]"
128
+ ```
129
+
130
+ (If diffusers was already installed in the virtual environment, remove
131
+ it with `pip uninstall diffusers` before reinstalling it in editable
132
+ mode with the `-e` flag.)
133
+
134
+ To run the full test suite, you might need the additional dependency on `transformers` and `datasets` which requires a separate source
135
+ install:
136
+
137
+ ```bash
138
+ $ git clone https://github.com/huggingface/transformers
139
+ $ cd transformers
140
+ $ pip install -e .
141
+ ```
142
+
143
+ ```bash
144
+ $ git clone https://github.com/huggingface/datasets
145
+ $ cd datasets
146
+ $ pip install -e .
147
+ ```
148
+
149
+ If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `datasets`
150
+ library.
151
+
152
+ 5. Develop the features on your branch.
153
+
154
+ As you work on the features, you should make sure that the test suite
155
+ passes. You should run the tests impacted by your changes like this:
156
+
157
+ ```bash
158
+ $ pytest tests/<TEST_TO_RUN>.py
159
+ ```
160
+
161
+ You can also run the full suite with the following command, but it takes
162
+ a beefy machine to produce a result in a decent amount of time now that
163
+ Diffusers has grown a lot. Here is the command for it:
164
+
165
+ ```bash
166
+ $ make test
167
+ ```
168
+
169
+ For more information about tests, check out the
170
+ [dedicated documentation](https://huggingface.co/docs/diffusers/testing)
171
+
172
+ 🧨 Diffusers relies on `black` and `isort` to format its source code
173
+ consistently. After you make changes, apply automatic style corrections and code verifications
174
+ that can't be automated in one go with:
175
+
176
+ ```bash
177
+ $ make style
178
+ ```
179
+
180
+ 🧨 Diffusers also uses `flake8` and a few custom scripts to check for coding mistakes. Quality
181
+ control runs in CI, however you can also run the same checks with:
182
+
183
+ ```bash
184
+ $ make quality
185
+ ```
186
+
187
+ Once you're happy with your changes, add changed files using `git add` and
188
+ make a commit with `git commit` to record your changes locally:
189
+
190
+ ```bash
191
+ $ git add modified_file.py
192
+ $ git commit
193
+ ```
194
+
195
+ It is a good idea to sync your copy of the code with the original
196
+ repository regularly. This way you can quickly account for changes:
197
+
198
+ ```bash
199
+ $ git fetch upstream
200
+ $ git rebase upstream/main
201
+ ```
202
+
203
+ Push the changes to your account using:
204
+
205
+ ```bash
206
+ $ git push -u origin a-descriptive-name-for-my-changes
207
+ ```
208
+
209
+ 6. Once you are satisfied (**and the checklist below is happy too**), go to the
210
+ webpage of your fork on GitHub. Click on 'Pull request' to send your changes
211
+ to the project maintainers for review.
212
+
213
+ 7. It's ok if maintainers ask you for changes. It happens to core contributors
214
+ too! So everyone can see the changes in the Pull request, work in your local
215
+ branch and push the changes to your fork. They will automatically appear in
216
+ the pull request.
217
+
218
+
219
+ ### Checklist
220
+
221
+ 1. The title of your pull request should be a summary of its contribution;
222
+ 2. If your pull request addresses an issue, please mention the issue number in
223
+ the pull request description to make sure they are linked (and people
224
+ consulting the issue know you are working on it);
225
+ 3. To indicate a work in progress please prefix the title with `[WIP]`. These
226
+ are useful to avoid duplicated work, and to differentiate it from PRs ready
227
+ to be merged;
228
+ 4. Make sure existing tests pass;
229
+ 5. Add high-coverage tests. No quality testing = no merge.
230
+ - If you are adding new `@slow` tests, make sure they pass using
231
+ `RUN_SLOW=1 python -m pytest tests/test_my_new_model.py`.
232
+ - If you are adding a new tokenizer, write tests, and make sure
233
+ `RUN_SLOW=1 python -m pytest tests/test_tokenization_{your_model_name}.py` passes.
234
+ CircleCI does not run the slow tests, but github actions does every night!
235
+ 6. All public methods must have informative docstrings that work nicely with sphinx. See `modeling_bert.py` for an
236
+ example.
237
+ 7. Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
238
+ the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
239
+ them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
240
+ If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
241
+ to this dataset.
242
+
243
+ ### Tests
244
+
245
+ An extensive test suite is included to test the library behavior and several examples. Library tests can be found in
246
+ the [tests folder](https://github.com/huggingface/diffusers/tree/main/tests).
247
+
248
+ We like `pytest` and `pytest-xdist` because it's faster. From the root of the
249
+ repository, here's how to run tests with `pytest` for the library:
250
+
251
+ ```bash
252
+ $ python -m pytest -n auto --dist=loadfile -s -v ./tests/
253
+ ```
254
+
255
+ In fact, that's how `make test` is implemented (sans the `pip install` line)!
256
+
257
+ You can specify a smaller set of tests in order to test only the feature
258
+ you're working on.
259
+
260
+ By default, slow tests are skipped. Set the `RUN_SLOW` environment variable to
261
+ `yes` to run them. This will download many gigabytes of models — make sure you
262
+ have enough disk space and a good Internet connection, or a lot of patience!
263
+
264
+ ```bash
265
+ $ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/
266
+ ```
267
+
268
+ This means `unittest` is fully supported. Here's how to run tests with
269
+ `unittest`:
270
+
271
+ ```bash
272
+ $ python -m unittest discover -s tests -t . -v
273
+ $ python -m unittest discover -s examples -t examples -v
274
+ ```
275
+
276
+
277
+ ### Style guide
278
+
279
+ For documentation strings, 🧨 Diffusers follows the [google style](https://google.github.io/styleguide/pyguide.html).
280
+
281
+ **This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md).**
282
+
283
+ ### Syncing forked main with upstream (HuggingFace) main
284
+
285
+ To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnecessary notifications to the developers involved in these PRs,
286
+ when syncing the main branch of a forked repository, please, follow these steps:
287
+ 1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead merge directly into the forked main.
288
+ 2. If a PR is absolutely necessary, use the following steps after checking out your branch:
289
+ ```
290
+ $ git checkout -b your-branch-for-syncing
291
+ $ git pull --squash --no-commit upstream main
292
+ $ git commit -m '<your message without GitHub references>'
293
+ $ git push --set-upstream origin your-branch-for-syncing
294
+ ```
huggingface_diffusers/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
huggingface_diffusers/MANIFEST.in ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ include LICENSE
2
+ include src/diffusers/utils/model_card_template.md
huggingface_diffusers/Makefile ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples
2
+
3
+ # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
4
+ export PYTHONPATH = src
5
+
6
+ check_dirs := examples scripts src tests utils
7
+
8
+ modified_only_fixup:
9
+ $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs)))
10
+ @if test -n "$(modified_py_files)"; then \
11
+ echo "Checking/fixing $(modified_py_files)"; \
12
+ black --preview $(modified_py_files); \
13
+ isort $(modified_py_files); \
14
+ flake8 $(modified_py_files); \
15
+ else \
16
+ echo "No library .py files were modified"; \
17
+ fi
18
+
19
+ # Update src/diffusers/dependency_versions_table.py
20
+
21
+ deps_table_update:
22
+ @python setup.py deps_table_update
23
+
24
+ deps_table_check_updated:
25
+ @md5sum src/diffusers/dependency_versions_table.py > md5sum.saved
26
+ @python setup.py deps_table_update
27
+ @md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1)
28
+ @rm md5sum.saved
29
+
30
+ # autogenerating code
31
+
32
+ autogenerate_code: deps_table_update
33
+
34
+ # Check that the repo is in a good state
35
+
36
+ repo-consistency:
37
+ python utils/check_dummies.py
38
+ python utils/check_repo.py
39
+ python utils/check_inits.py
40
+
41
+ # this target runs checks on all files
42
+
43
+ quality:
44
+ black --check --preview $(check_dirs)
45
+ isort --check-only $(check_dirs)
46
+ flake8 $(check_dirs)
47
+ doc-builder style src/diffusers docs/source --max_len 119 --check_only --path_to_docs docs/source
48
+ python utils/check_doc_toc.py
49
+
50
+ # Format source code automatically and check is there are any problems left that need manual fixing
51
+
52
+ extra_style_checks:
53
+ python utils/custom_init_isort.py
54
+ doc-builder style src/diffusers docs/source --max_len 119 --path_to_docs docs/source
55
+ python utils/check_doc_toc.py --fix_and_overwrite
56
+
57
+ # this target runs checks on all files and potentially modifies some of them
58
+
59
+ style:
60
+ black --preview $(check_dirs)
61
+ isort $(check_dirs)
62
+ ${MAKE} autogenerate_code
63
+ ${MAKE} extra_style_checks
64
+
65
+ # Super fast fix and check target that only works on relevant modified files since the branch was made
66
+
67
+ fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency
68
+
69
+ # Make marked copies of snippets of codes conform to the original
70
+
71
+ fix-copies:
72
+ python utils/check_copies.py --fix_and_overwrite
73
+ python utils/check_dummies.py --fix_and_overwrite
74
+
75
+ # Run tests for the library
76
+
77
+ test:
78
+ python -m pytest -n auto --dist=loadfile -s -v ./tests/
79
+
80
+ # Run tests for examples
81
+
82
+ test-examples:
83
+ python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/
84
+
85
+
86
+ # Release stuff
87
+
88
+ pre-release:
89
+ python utils/release.py
90
+
91
+ pre-patch:
92
+ python utils/release.py --patch
93
+
94
+ post-release:
95
+ python utils/release.py --post_release
96
+
97
+ post-patch:
98
+ python utils/release.py --post_release --patch
huggingface_diffusers/README.md ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <p align="center">
2
+ <br>
3
+ <img src="./docs/source/en/imgs/diffusers_library.jpg" width="400"/>
4
+ <br>
5
+ <p>
6
+ <p align="center">
7
+ <a href="https://github.com/huggingface/diffusers/blob/main/LICENSE">
8
+ <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue">
9
+ </a>
10
+ <a href="https://github.com/huggingface/diffusers/releases">
11
+ <img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/diffusers.svg">
12
+ </a>
13
+ <a href="CODE_OF_CONDUCT.md">
14
+ <img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg">
15
+ </a>
16
+ </p>
17
+
18
+ 🤗 Diffusers provides pretrained diffusion models across multiple modalities, such as vision and audio, and serves
19
+ as a modular toolbox for inference and training of diffusion models.
20
+
21
+ More precisely, 🤗 Diffusers offers:
22
+
23
+ - State-of-the-art diffusion pipelines that can be run in inference with just a couple of lines of code (see [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines)). Check [this overview](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/README.md#pipelines-summary) to see all supported pipelines and their corresponding official papers.
24
+ - Various noise schedulers that can be used interchangeably for the preferred speed vs. quality trade-off in inference (see [src/diffusers/schedulers](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers)).
25
+ - Multiple types of models, such as UNet, can be used as building blocks in an end-to-end diffusion system (see [src/diffusers/models](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models)).
26
+ - Training examples to show how to train the most popular diffusion model tasks (see [examples](https://github.com/huggingface/diffusers/tree/main/examples), *e.g.* [unconditional-image-generation](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation)).
27
+
28
+ ## Installation
29
+
30
+ ### For PyTorch
31
+
32
+ **With `pip`** (official package)
33
+
34
+ ```bash
35
+ pip install --upgrade diffusers[torch]
36
+ ```
37
+
38
+ **With `conda`** (maintained by the community)
39
+
40
+ ```sh
41
+ conda install -c conda-forge diffusers
42
+ ```
43
+
44
+ ### For Flax
45
+
46
+ **With `pip`**
47
+
48
+ ```bash
49
+ pip install --upgrade diffusers[flax]
50
+ ```
51
+
52
+ **Apple Silicon (M1/M2) support**
53
+
54
+ Please, refer to [the documentation](https://huggingface.co/docs/diffusers/optimization/mps).
55
+
56
+ ## Contributing
57
+
58
+ We ❤️ contributions from the open-source community!
59
+ If you want to contribute to this library, please check out our [Contribution guide](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md).
60
+ You can look out for [issues](https://github.com/huggingface/diffusers/issues) you'd like to tackle to contribute to the library.
61
+ - See [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) for general opportunities to contribute
62
+ - See [New model/pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) to contribute exciting new diffusion models / diffusion pipelines
63
+ - See [New scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22)
64
+
65
+ Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a>. We discuss the hottest trends about diffusion models, help each other with contributions, personal projects or
66
+ just hang out ☕.
67
+
68
+ ## Quickstart
69
+
70
+ In order to get started, we recommend taking a look at two notebooks:
71
+
72
+ - The [Getting started with Diffusers](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) notebook, which showcases an end-to-end example of usage for diffusion models, schedulers and pipelines.
73
+ Take a look at this notebook to learn how to use the pipeline abstraction, which takes care of everything (model, scheduler, noise handling) for you, and also to understand each independent building block in the library.
74
+ - The [Training a diffusers model](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) notebook summarizes diffusion models training methods. This notebook takes a step-by-step approach to training your
75
+ diffusion models on an image dataset, with explanatory graphics.
76
+
77
+ ## Stable Diffusion is fully compatible with `diffusers`!
78
+
79
+ Stable Diffusion is a text-to-image latent diffusion model created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), [LAION](https://laion.ai/) and [RunwayML](https://runwayml.com/). It's trained on 512x512 images from a subset of the [LAION-5B](https://laion.ai/blog/laion-5b/) database. This model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. With its 860M UNet and 123M text encoder, the model is relatively lightweight and runs on a GPU with at least 4GB VRAM.
80
+ See the [model card](https://huggingface.co/CompVis/stable-diffusion) for more information.
81
+
82
+
83
+ ### Text-to-Image generation with Stable Diffusion
84
+
85
+ First let's install
86
+
87
+ ```bash
88
+ pip install --upgrade diffusers transformers accelerate
89
+ ```
90
+
91
+ We recommend using the model in [half-precision (`fp16`)](https://pytorch.org/blog/accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision/) as it gives almost always the same results as full
92
+ precision while being roughly twice as fast and requiring half the amount of GPU RAM.
93
+
94
+ ```python
95
+ import torch
96
+ from diffusers import StableDiffusionPipeline
97
+
98
+ pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
99
+ pipe = pipe.to("cuda")
100
+
101
+ prompt = "a photo of an astronaut riding a horse on mars"
102
+ image = pipe(prompt).images[0]
103
+ ```
104
+
105
+ #### Running the model locally
106
+
107
+ You can also simply download the model folder and pass the path to the local folder to the `StableDiffusionPipeline`.
108
+
109
+ ```
110
+ git lfs install
111
+ git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
112
+ ```
113
+
114
+ Assuming the folder is stored locally under `./stable-diffusion-v1-5`, you can run stable diffusion
115
+ as follows:
116
+
117
+ ```python
118
+ pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-5")
119
+ pipe = pipe.to("cuda")
120
+
121
+ prompt = "a photo of an astronaut riding a horse on mars"
122
+ image = pipe(prompt).images[0]
123
+ ```
124
+
125
+ If you are limited by GPU memory, you might want to consider chunking the attention computation in addition
126
+ to using `fp16`.
127
+ The following snippet should result in less than 4GB VRAM.
128
+
129
+ ```python
130
+ pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
131
+ pipe = pipe.to("cuda")
132
+
133
+ prompt = "a photo of an astronaut riding a horse on mars"
134
+ pipe.enable_attention_slicing()
135
+ image = pipe(prompt).images[0]
136
+ ```
137
+
138
+ If you wish to use a different scheduler (e.g.: DDIM, LMS, PNDM/PLMS), you can instantiate
139
+ it before the pipeline and pass it to `from_pretrained`.
140
+
141
+ ```python
142
+ from diffusers import LMSDiscreteScheduler
143
+
144
+ pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
145
+
146
+ prompt = "a photo of an astronaut riding a horse on mars"
147
+ image = pipe(prompt).images[0]
148
+
149
+ image.save("astronaut_rides_horse.png")
150
+ ```
151
+
152
+ If you want to run Stable Diffusion on CPU or you want to have maximum precision on GPU,
153
+ please run the model in the default *full-precision* setting:
154
+
155
+ ```python
156
+ from diffusers import StableDiffusionPipeline
157
+
158
+ pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
159
+
160
+ # disable the following line if you run on CPU
161
+ pipe = pipe.to("cuda")
162
+
163
+ prompt = "a photo of an astronaut riding a horse on mars"
164
+ image = pipe(prompt).images[0]
165
+
166
+ image.save("astronaut_rides_horse.png")
167
+ ```
168
+
169
+ ### JAX/Flax
170
+
171
+ Diffusers offers a JAX / Flax implementation of Stable Diffusion for very fast inference. JAX shines specially on TPU hardware because each TPU server has 8 accelerators working in parallel, but it runs great on GPUs too.
172
+
173
+ Running the pipeline with the default PNDMScheduler:
174
+
175
+ ```python
176
+ import jax
177
+ import numpy as np
178
+ from flax.jax_utils import replicate
179
+ from flax.training.common_utils import shard
180
+
181
+ from diffusers import FlaxStableDiffusionPipeline
182
+
183
+ pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
184
+ "runwayml/stable-diffusion-v1-5", revision="flax", dtype=jax.numpy.bfloat16
185
+ )
186
+
187
+ prompt = "a photo of an astronaut riding a horse on mars"
188
+
189
+ prng_seed = jax.random.PRNGKey(0)
190
+ num_inference_steps = 50
191
+
192
+ num_samples = jax.device_count()
193
+ prompt = num_samples * [prompt]
194
+ prompt_ids = pipeline.prepare_inputs(prompt)
195
+
196
+ # shard inputs and rng
197
+ params = replicate(params)
198
+ prng_seed = jax.random.split(prng_seed, jax.device_count())
199
+ prompt_ids = shard(prompt_ids)
200
+
201
+ images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
202
+ images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
203
+ ```
204
+
205
+ **Note**:
206
+ If you are limited by TPU memory, please make sure to load the `FlaxStableDiffusionPipeline` in `bfloat16` precision instead of the default `float32` precision as done above. You can do so by telling diffusers to load the weights from "bf16" branch.
207
+
208
+ ```python
209
+ import jax
210
+ import numpy as np
211
+ from flax.jax_utils import replicate
212
+ from flax.training.common_utils import shard
213
+
214
+ from diffusers import FlaxStableDiffusionPipeline
215
+
216
+ pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
217
+ "runwayml/stable-diffusion-v1-5", revision="bf16", dtype=jax.numpy.bfloat16
218
+ )
219
+
220
+ prompt = "a photo of an astronaut riding a horse on mars"
221
+
222
+ prng_seed = jax.random.PRNGKey(0)
223
+ num_inference_steps = 50
224
+
225
+ num_samples = jax.device_count()
226
+ prompt = num_samples * [prompt]
227
+ prompt_ids = pipeline.prepare_inputs(prompt)
228
+
229
+ # shard inputs and rng
230
+ params = replicate(params)
231
+ prng_seed = jax.random.split(prng_seed, jax.device_count())
232
+ prompt_ids = shard(prompt_ids)
233
+
234
+ images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
235
+ images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
236
+ ```
237
+
238
+ Diffusers also has a Image-to-Image generation pipeline with Flax/Jax
239
+ ```python
240
+ import jax
241
+ import numpy as np
242
+ import jax.numpy as jnp
243
+ from flax.jax_utils import replicate
244
+ from flax.training.common_utils import shard
245
+ import requests
246
+ from io import BytesIO
247
+ from PIL import Image
248
+ from diffusers import FlaxStableDiffusionImg2ImgPipeline
249
+
250
+ def create_key(seed=0):
251
+ return jax.random.PRNGKey(seed)
252
+ rng = create_key(0)
253
+
254
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
255
+ response = requests.get(url)
256
+ init_img = Image.open(BytesIO(response.content)).convert("RGB")
257
+ init_img = init_img.resize((768, 512))
258
+
259
+ prompts = "A fantasy landscape, trending on artstation"
260
+
261
+ pipeline, params = FlaxStableDiffusionImg2ImgPipeline.from_pretrained(
262
+ "CompVis/stable-diffusion-v1-4", revision="flax",
263
+ dtype=jnp.bfloat16,
264
+ )
265
+
266
+ num_samples = jax.device_count()
267
+ rng = jax.random.split(rng, jax.device_count())
268
+ prompt_ids, processed_image = pipeline.prepare_inputs(prompt=[prompts]*num_samples, image = [init_img]*num_samples)
269
+ p_params = replicate(params)
270
+ prompt_ids = shard(prompt_ids)
271
+ processed_image = shard(processed_image)
272
+
273
+ output = pipeline(
274
+ prompt_ids=prompt_ids,
275
+ image=processed_image,
276
+ params=p_params,
277
+ prng_seed=rng,
278
+ strength=0.75,
279
+ num_inference_steps=50,
280
+ jit=True,
281
+ height=512,
282
+ width=768).images
283
+
284
+ output_images = pipeline.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))
285
+ ```
286
+
287
+ Diffusers also has a Text-guided inpainting pipeline with Flax/Jax
288
+
289
+ ```python
290
+ import jax
291
+ import numpy as np
292
+ from flax.jax_utils import replicate
293
+ from flax.training.common_utils import shard
294
+ import PIL
295
+ import requests
296
+ from io import BytesIO
297
+
298
+
299
+ from diffusers import FlaxStableDiffusionInpaintPipeline
300
+
301
+ def download_image(url):
302
+ response = requests.get(url)
303
+ return PIL.Image.open(BytesIO(response.content)).convert("RGB")
304
+ img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
305
+ mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
306
+
307
+ init_image = download_image(img_url).resize((512, 512))
308
+ mask_image = download_image(mask_url).resize((512, 512))
309
+
310
+ pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained("xvjiarui/stable-diffusion-2-inpainting")
311
+
312
+ prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
313
+ prng_seed = jax.random.PRNGKey(0)
314
+ num_inference_steps = 50
315
+
316
+ num_samples = jax.device_count()
317
+ prompt = num_samples * [prompt]
318
+ init_image = num_samples * [init_image]
319
+ mask_image = num_samples * [mask_image]
320
+ prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image)
321
+
322
+
323
+ # shard inputs and rng
324
+ params = replicate(params)
325
+ prng_seed = jax.random.split(prng_seed, jax.device_count())
326
+ prompt_ids = shard(prompt_ids)
327
+ processed_masked_images = shard(processed_masked_images)
328
+ processed_masks = shard(processed_masks)
329
+
330
+ images = pipeline(prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True).images
331
+ images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
332
+ ```
333
+
334
+ ### Image-to-Image text-guided generation with Stable Diffusion
335
+
336
+ The `StableDiffusionImg2ImgPipeline` lets you pass a text prompt and an initial image to condition the generation of new images.
337
+
338
+ ```python
339
+ import requests
340
+ import torch
341
+ from PIL import Image
342
+ from io import BytesIO
343
+
344
+ from diffusers import StableDiffusionImg2ImgPipeline
345
+
346
+ # load the pipeline
347
+ device = "cuda"
348
+ model_id_or_path = "runwayml/stable-diffusion-v1-5"
349
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
350
+
351
+ # or download via git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
352
+ # and pass `model_id_or_path="./stable-diffusion-v1-5"`.
353
+ pipe = pipe.to(device)
354
+
355
+ # let's download an initial image
356
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
357
+
358
+ response = requests.get(url)
359
+ init_image = Image.open(BytesIO(response.content)).convert("RGB")
360
+ init_image = init_image.resize((768, 512))
361
+
362
+ prompt = "A fantasy landscape, trending on artstation"
363
+
364
+ images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images
365
+
366
+ images[0].save("fantasy_landscape.png")
367
+ ```
368
+ You can also run this example on colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb)
369
+
370
+ ### In-painting using Stable Diffusion
371
+
372
+ The `StableDiffusionInpaintPipeline` lets you edit specific parts of an image by providing a mask and a text prompt.
373
+
374
+ ```python
375
+ import PIL
376
+ import requests
377
+ import torch
378
+ from io import BytesIO
379
+
380
+ from diffusers import StableDiffusionInpaintPipeline
381
+
382
+ def download_image(url):
383
+ response = requests.get(url)
384
+ return PIL.Image.open(BytesIO(response.content)).convert("RGB")
385
+
386
+ img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
387
+ mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
388
+
389
+ init_image = download_image(img_url).resize((512, 512))
390
+ mask_image = download_image(mask_url).resize((512, 512))
391
+
392
+ pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16)
393
+ pipe = pipe.to("cuda")
394
+
395
+ prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
396
+ image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
397
+ ```
398
+
399
+ ### Tweak prompts reusing seeds and latents
400
+
401
+ You can generate your own latents to reproduce results, or tweak your prompt on a specific result you liked.
402
+ Please have a look at [Reusing seeds for deterministic generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/reusing_seeds).
403
+
404
+ ## Fine-Tuning Stable Diffusion
405
+
406
+ Fine-tuning techniques make it possible to adapt Stable Diffusion to your own dataset, or add new subjects to it. These are some of the techniques supported in `diffusers`:
407
+
408
+ Textual Inversion is a technique for capturing novel concepts from a small number of example images in a way that can later be used to control text-to-image pipelines. It does so by learning new 'words' in the embedding space of the pipeline's text encoder. These special words can then be used within text prompts to achieve very fine-grained control of the resulting images.
409
+
410
+ - Textual Inversion. Capture novel concepts from a small set of sample images, and associate them with new "words" in the embedding space of the text encoder. Please, refer to [our training examples](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) or [documentation](https://huggingface.co/docs/diffusers/training/text_inversion) to try for yourself.
411
+
412
+ - Dreambooth. Another technique to capture new concepts in Stable Diffusion. This method fine-tunes the UNet (and, optionally, also the text encoder) of the pipeline to achieve impressive results. Please, refer to [our training example](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) and [training report](https://huggingface.co/blog/dreambooth) for additional details and training recommendations.
413
+
414
+ - Full Stable Diffusion fine-tuning. If you have a more sizable dataset with a specific look or style, you can fine-tune Stable Diffusion so that it outputs images following those examples. This was the approach taken to create [a Pokémon Stable Diffusion model](https://huggingface.co/justinpinkney/pokemon-stable-diffusion) (by Justing Pinkney / Lambda Labs), [a Japanese specific version of Stable Diffusion](https://huggingface.co/spaces/rinna/japanese-stable-diffusion) (by [Rinna Co.](https://github.com/rinnakk/japanese-stable-diffusion/) and others. You can start at [our text-to-image fine-tuning example](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image) and go from there.
415
+
416
+
417
+ ## Stable Diffusion Community Pipelines
418
+
419
+ The release of Stable Diffusion as an open source model has fostered a lot of interesting ideas and experimentation.
420
+ Our [Community Examples folder](https://github.com/huggingface/diffusers/tree/main/examples/community) contains many ideas worth exploring, like interpolating to create animated videos, using CLIP Guidance for additional prompt fidelity, term weighting, and much more! [Take a look](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview) and [contribute your own](https://huggingface.co/docs/diffusers/using-diffusers/contribute_pipeline).
421
+
422
+ ## Other Examples
423
+
424
+ There are many ways to try running Diffusers! Here we outline code-focused tools (primarily using `DiffusionPipeline`s and Google Colab) and interactive web-tools.
425
+
426
+ ### Running Code
427
+
428
+ If you want to run the code yourself 💻, you can try out:
429
+ - [Text-to-Image Latent Diffusion](https://huggingface.co/CompVis/ldm-text2im-large-256)
430
+ ```python
431
+ # !pip install diffusers["torch"] transformers
432
+ from diffusers import DiffusionPipeline
433
+
434
+ device = "cuda"
435
+ model_id = "CompVis/ldm-text2im-large-256"
436
+
437
+ # load model and scheduler
438
+ ldm = DiffusionPipeline.from_pretrained(model_id)
439
+ ldm = ldm.to(device)
440
+
441
+ # run pipeline in inference (sample random noise and denoise)
442
+ prompt = "A painting of a squirrel eating a burger"
443
+ image = ldm([prompt], num_inference_steps=50, eta=0.3, guidance_scale=6).images[0]
444
+
445
+ # save image
446
+ image.save("squirrel.png")
447
+ ```
448
+ - [Unconditional Diffusion with discrete scheduler](https://huggingface.co/google/ddpm-celebahq-256)
449
+ ```python
450
+ # !pip install diffusers["torch"]
451
+ from diffusers import DDPMPipeline, DDIMPipeline, PNDMPipeline
452
+
453
+ model_id = "google/ddpm-celebahq-256"
454
+ device = "cuda"
455
+
456
+ # load model and scheduler
457
+ ddpm = DDPMPipeline.from_pretrained(model_id) # you can replace DDPMPipeline with DDIMPipeline or PNDMPipeline for faster inference
458
+ ddpm.to(device)
459
+
460
+ # run pipeline in inference (sample random noise and denoise)
461
+ image = ddpm().images[0]
462
+
463
+ # save image
464
+ image.save("ddpm_generated_image.png")
465
+ ```
466
+ - [Unconditional Latent Diffusion](https://huggingface.co/CompVis/ldm-celebahq-256)
467
+ - [Unconditional Diffusion with continuous scheduler](https://huggingface.co/google/ncsnpp-ffhq-1024)
468
+
469
+ **Other Image Notebooks**:
470
+ * [image-to-image generation with Stable Diffusion](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb) ![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg),
471
+ * [tweak images via repeated Stable Diffusion seeds](https://colab.research.google.com/github/pcuenca/diffusers-examples/blob/main/notebooks/stable-diffusion-seeds.ipynb) ![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg),
472
+
473
+ **Diffusers for Other Modalities**:
474
+ * [Molecule conformation generation](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/geodiff_molecule_conformation.ipynb) ![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg),
475
+ * [Model-based reinforcement learning](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_with_diffusers.ipynb) ![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg),
476
+
477
+ ### Web Demos
478
+ If you just want to play around with some web demos, you can try out the following 🚀 Spaces:
479
+ | Model | Hugging Face Spaces |
480
+ |-------------------------------- |------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
481
+ | Text-to-Image Latent Diffusion | [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/CompVis/text2img-latent-diffusion) |
482
+ | Faces generator | [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/CompVis/celeba-latent-diffusion) |
483
+ | DDPM with different schedulers | [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/fusing/celeba-diffusion) |
484
+ | Conditional generation from sketch | [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/huggingface/diffuse-the-rest) |
485
+ | Composable diffusion | [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/Shuang59/Composable-Diffusion) |
486
+
487
+ ## Definitions
488
+
489
+ **Models**: Neural network that models $p_\theta(\mathbf{x}_{t-1}|\mathbf{x}_t)$ (see image below) and is trained end-to-end to *denoise* a noisy input to an image.
490
+ *Examples*: UNet, Conditioned UNet, 3D UNet, Transformer UNet
491
+
492
+ <p align="center">
493
+ <img src="https://user-images.githubusercontent.com/10695622/174349667-04e9e485-793b-429a-affe-096e8199ad5b.png" width="800"/>
494
+ <br>
495
+ <em> Figure from DDPM paper (https://arxiv.org/abs/2006.11239). </em>
496
+ <p>
497
+
498
+ **Schedulers**: Algorithm class for both **inference** and **training**.
499
+ The class provides functionality to compute previous image according to alpha, beta schedule as well as predict noise for training. Also known as **Samplers**.
500
+ *Examples*: [DDPM](https://arxiv.org/abs/2006.11239), [DDIM](https://arxiv.org/abs/2010.02502), [PNDM](https://arxiv.org/abs/2202.09778), [DEIS](https://arxiv.org/abs/2204.13902)
501
+
502
+ <p align="center">
503
+ <img src="https://user-images.githubusercontent.com/10695622/174349706-53d58acc-a4d1-4cda-b3e8-432d9dc7ad38.png" width="800"/>
504
+ <br>
505
+ <em> Sampling and training algorithms. Figure from DDPM paper (https://arxiv.org/abs/2006.11239). </em>
506
+ <p>
507
+
508
+
509
+ **Diffusion Pipeline**: End-to-end pipeline that includes multiple diffusion models, possible text encoders, ...
510
+ *Examples*: Glide, Latent-Diffusion, Imagen, DALL-E 2
511
+
512
+ <p align="center">
513
+ <img src="https://user-images.githubusercontent.com/10695622/174348898-481bd7c2-5457-4830-89bc-f0907756f64c.jpeg" width="550"/>
514
+ <br>
515
+ <em> Figure from ImageGen (https://imagen.research.google/). </em>
516
+ <p>
517
+
518
+ ## Philosophy
519
+
520
+ - Readability and clarity is preferred over highly optimized code. A strong importance is put on providing readable, intuitive and elementary code design. *E.g.*, the provided [schedulers](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers) are separated from the provided [models](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models) and provide well-commented code that can be read alongside the original paper.
521
+ - Diffusers is **modality independent** and focuses on providing pretrained models and tools to build systems that generate **continuous outputs**, *e.g.* vision and audio.
522
+ - Diffusion models and schedulers are provided as concise, elementary building blocks. In contrast, diffusion pipelines are a collection of end-to-end diffusion systems that can be used out-of-the-box, should stay as close as possible to their original implementation and can include components of another library, such as text-encoders. Examples for diffusion pipelines are [Glide](https://github.com/openai/glide-text2im) and [Latent Diffusion](https://github.com/CompVis/latent-diffusion).
523
+
524
+ ## In the works
525
+
526
+ For the first release, 🤗 Diffusers focuses on text-to-image diffusion techniques. However, diffusers can be used for much more than that! Over the upcoming releases, we'll be focusing on:
527
+
528
+ - Diffusers for audio
529
+ - Diffusers for reinforcement learning (initial work happening in https://github.com/huggingface/diffusers/pull/105).
530
+ - Diffusers for video generation
531
+ - Diffusers for molecule generation (initial work happening in https://github.com/huggingface/diffusers/pull/54)
532
+
533
+ A few pipeline components are already being worked on, namely:
534
+
535
+ - BDDMPipeline for spectrogram-to-sound vocoding
536
+ - GLIDEPipeline to support OpenAI's GLIDE model
537
+ - Grad-TTS for text to audio generation / conditional audio generation
538
+
539
+ We want diffusers to be a toolbox useful for diffusers models in general; if you find yourself limited in any way by the current API, or would like to see additional models, schedulers, or techniques, please open a [GitHub issue](https://github.com/huggingface/diffusers/issues) mentioning what you would like to see.
540
+
541
+ ## Credits
542
+
543
+ This library concretizes previous work by many different authors and would not have been possible without their great research and implementations. We'd like to thank, in particular, the following implementations which have helped us in our development and without which the API could not have been as polished today:
544
+
545
+ - @CompVis' latent diffusion models library, available [here](https://github.com/CompVis/latent-diffusion)
546
+ - @hojonathanho original DDPM implementation, available [here](https://github.com/hojonathanho/diffusion) as well as the extremely useful translation into PyTorch by @pesser, available [here](https://github.com/pesser/pytorch_diffusion)
547
+ - @ermongroup's DDIM implementation, available [here](https://github.com/ermongroup/ddim).
548
+ - @yang-song's Score-VE and Score-VP implementations, available [here](https://github.com/yang-song/score_sde_pytorch)
549
+
550
+ We also want to thank @heejkoo for the very helpful overview of papers, code and resources on diffusion models, available [here](https://github.com/heejkoo/Awesome-Diffusion-Models) as well as @crowsonkb and @rromb for useful discussions and insights.
551
+
552
+ ## Citation
553
+
554
+ ```bibtex
555
+ @misc{von-platen-etal-2022-diffusers,
556
+ author = {Patrick von Platen and Suraj Patil and Anton Lozhkov and Pedro Cuenca and Nathan Lambert and Kashif Rasul and Mishig Davaadorj and Thomas Wolf},
557
+ title = {Diffusers: State-of-the-art diffusion models},
558
+ year = {2022},
559
+ publisher = {GitHub},
560
+ journal = {GitHub repository},
561
+ howpublished = {\url{https://github.com/huggingface/diffusers}}
562
+ }
563
+ ```
huggingface_diffusers/_typos.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Files for typos
2
+ # Instruction: https://github.com/marketplace/actions/typos-action#getting-started
3
+
4
+ [default.extend-identifiers]
5
+
6
+ [default.extend-words]
7
+ NIN="NIN" # NIN is used in scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py
8
+ nd="np" # nd may be np (numpy)
9
+ parms="parms" # parms is used in scripts/convert_original_stable_diffusion_to_diffusers.py
10
+
11
+
12
+ [files]
13
+ extend-exclude = ["_typos.toml"]
huggingface_diffusers/docker/diffusers-flax-cpu/Dockerfile ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ubuntu:20.04
2
+ LABEL maintainer="Hugging Face"
3
+ LABEL repository="diffusers"
4
+
5
+ ENV DEBIAN_FRONTEND=noninteractive
6
+
7
+ RUN apt update && \
8
+ apt install -y bash \
9
+ build-essential \
10
+ git \
11
+ git-lfs \
12
+ curl \
13
+ ca-certificates \
14
+ libsndfile1-dev \
15
+ python3.8 \
16
+ python3-pip \
17
+ python3.8-venv && \
18
+ rm -rf /var/lib/apt/lists
19
+
20
+ # make sure to use venv
21
+ RUN python3 -m venv /opt/venv
22
+ ENV PATH="/opt/venv/bin:$PATH"
23
+
24
+ # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
25
+ # follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container
26
+ RUN python3 -m pip install --no-cache-dir --upgrade pip && \
27
+ python3 -m pip install --upgrade --no-cache-dir \
28
+ clu \
29
+ "jax[cpu]>=0.2.16,!=0.3.2" \
30
+ "flax>=0.4.1" \
31
+ "jaxlib>=0.1.65" && \
32
+ python3 -m pip install --no-cache-dir \
33
+ accelerate \
34
+ datasets \
35
+ hf-doc-builder \
36
+ huggingface-hub \
37
+ Jinja2 \
38
+ librosa \
39
+ numpy \
40
+ scipy \
41
+ tensorboard \
42
+ transformers
43
+
44
+ CMD ["/bin/bash"]
huggingface_diffusers/docker/diffusers-flax-tpu/Dockerfile ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ubuntu:20.04
2
+ LABEL maintainer="Hugging Face"
3
+ LABEL repository="diffusers"
4
+
5
+ ENV DEBIAN_FRONTEND=noninteractive
6
+
7
+ RUN apt update && \
8
+ apt install -y bash \
9
+ build-essential \
10
+ git \
11
+ git-lfs \
12
+ curl \
13
+ ca-certificates \
14
+ libsndfile1-dev \
15
+ python3.8 \
16
+ python3-pip \
17
+ python3.8-venv && \
18
+ rm -rf /var/lib/apt/lists
19
+
20
+ # make sure to use venv
21
+ RUN python3 -m venv /opt/venv
22
+ ENV PATH="/opt/venv/bin:$PATH"
23
+
24
+ # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
25
+ # follow the instructions here: https://cloud.google.com/tpu/docs/run-in-container#train_a_jax_model_in_a_docker_container
26
+ RUN python3 -m pip install --no-cache-dir --upgrade pip && \
27
+ python3 -m pip install --no-cache-dir \
28
+ "jax[tpu]>=0.2.16,!=0.3.2" \
29
+ -f https://storage.googleapis.com/jax-releases/libtpu_releases.html && \
30
+ python3 -m pip install --upgrade --no-cache-dir \
31
+ clu \
32
+ "flax>=0.4.1" \
33
+ "jaxlib>=0.1.65" && \
34
+ python3 -m pip install --no-cache-dir \
35
+ accelerate \
36
+ datasets \
37
+ hf-doc-builder \
38
+ huggingface-hub \
39
+ Jinja2 \
40
+ librosa \
41
+ numpy \
42
+ scipy \
43
+ tensorboard \
44
+ transformers
45
+
46
+ CMD ["/bin/bash"]
huggingface_diffusers/docker/diffusers-onnxruntime-cpu/Dockerfile ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ubuntu:20.04
2
+ LABEL maintainer="Hugging Face"
3
+ LABEL repository="diffusers"
4
+
5
+ ENV DEBIAN_FRONTEND=noninteractive
6
+
7
+ RUN apt update && \
8
+ apt install -y bash \
9
+ build-essential \
10
+ git \
11
+ git-lfs \
12
+ curl \
13
+ ca-certificates \
14
+ libsndfile1-dev \
15
+ python3.8 \
16
+ python3-pip \
17
+ python3.8-venv && \
18
+ rm -rf /var/lib/apt/lists
19
+
20
+ # make sure to use venv
21
+ RUN python3 -m venv /opt/venv
22
+ ENV PATH="/opt/venv/bin:$PATH"
23
+
24
+ # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
25
+ RUN python3 -m pip install --no-cache-dir --upgrade pip && \
26
+ python3 -m pip install --no-cache-dir \
27
+ torch \
28
+ torchvision \
29
+ torchaudio \
30
+ onnxruntime \
31
+ --extra-index-url https://download.pytorch.org/whl/cpu && \
32
+ python3 -m pip install --no-cache-dir \
33
+ accelerate \
34
+ datasets \
35
+ hf-doc-builder \
36
+ huggingface-hub \
37
+ Jinja2 \
38
+ librosa \
39
+ numpy \
40
+ scipy \
41
+ tensorboard \
42
+ transformers
43
+
44
+ CMD ["/bin/bash"]
huggingface_diffusers/docker/diffusers-onnxruntime-cuda/Dockerfile ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04
2
+ LABEL maintainer="Hugging Face"
3
+ LABEL repository="diffusers"
4
+
5
+ ENV DEBIAN_FRONTEND=noninteractive
6
+
7
+ RUN apt update && \
8
+ apt install -y bash \
9
+ build-essential \
10
+ git \
11
+ git-lfs \
12
+ curl \
13
+ ca-certificates \
14
+ libsndfile1-dev \
15
+ python3.8 \
16
+ python3-pip \
17
+ python3.8-venv && \
18
+ rm -rf /var/lib/apt/lists
19
+
20
+ # make sure to use venv
21
+ RUN python3 -m venv /opt/venv
22
+ ENV PATH="/opt/venv/bin:$PATH"
23
+
24
+ # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
25
+ RUN python3 -m pip install --no-cache-dir --upgrade pip && \
26
+ python3 -m pip install --no-cache-dir \
27
+ torch \
28
+ torchvision \
29
+ torchaudio \
30
+ "onnxruntime-gpu>=1.13.1" \
31
+ --extra-index-url https://download.pytorch.org/whl/cu117 && \
32
+ python3 -m pip install --no-cache-dir \
33
+ accelerate \
34
+ datasets \
35
+ hf-doc-builder \
36
+ huggingface-hub \
37
+ Jinja2 \
38
+ librosa \
39
+ numpy \
40
+ scipy \
41
+ tensorboard \
42
+ transformers
43
+
44
+ CMD ["/bin/bash"]
huggingface_diffusers/docker/diffusers-pytorch-cpu/Dockerfile ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ubuntu:20.04
2
+ LABEL maintainer="Hugging Face"
3
+ LABEL repository="diffusers"
4
+
5
+ ENV DEBIAN_FRONTEND=noninteractive
6
+
7
+ RUN apt update && \
8
+ apt install -y bash \
9
+ build-essential \
10
+ git \
11
+ git-lfs \
12
+ curl \
13
+ ca-certificates \
14
+ libsndfile1-dev \
15
+ python3.8 \
16
+ python3-pip \
17
+ python3.8-venv && \
18
+ rm -rf /var/lib/apt/lists
19
+
20
+ # make sure to use venv
21
+ RUN python3 -m venv /opt/venv
22
+ ENV PATH="/opt/venv/bin:$PATH"
23
+
24
+ # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
25
+ RUN python3 -m pip install --no-cache-dir --upgrade pip && \
26
+ python3 -m pip install --no-cache-dir \
27
+ torch \
28
+ torchvision \
29
+ torchaudio \
30
+ --extra-index-url https://download.pytorch.org/whl/cpu && \
31
+ python3 -m pip install --no-cache-dir \
32
+ accelerate \
33
+ datasets \
34
+ hf-doc-builder \
35
+ huggingface-hub \
36
+ Jinja2 \
37
+ librosa \
38
+ numpy \
39
+ scipy \
40
+ tensorboard \
41
+ transformers
42
+
43
+ CMD ["/bin/bash"]
huggingface_diffusers/docker/diffusers-pytorch-cuda/Dockerfile ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu20.04
2
+ LABEL maintainer="Hugging Face"
3
+ LABEL repository="diffusers"
4
+
5
+ ENV DEBIAN_FRONTEND=noninteractive
6
+
7
+ RUN apt update && \
8
+ apt install -y bash \
9
+ build-essential \
10
+ git \
11
+ git-lfs \
12
+ curl \
13
+ ca-certificates \
14
+ libsndfile1-dev \
15
+ python3.8 \
16
+ python3-pip \
17
+ python3.8-venv && \
18
+ rm -rf /var/lib/apt/lists
19
+
20
+ # make sure to use venv
21
+ RUN python3 -m venv /opt/venv
22
+ ENV PATH="/opt/venv/bin:$PATH"
23
+
24
+ # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
25
+ RUN python3 -m pip install --no-cache-dir --upgrade pip && \
26
+ python3 -m pip install --no-cache-dir \
27
+ torch \
28
+ torchvision \
29
+ torchaudio \
30
+ --extra-index-url https://download.pytorch.org/whl/cu117 && \
31
+ python3 -m pip install --no-cache-dir \
32
+ accelerate \
33
+ datasets \
34
+ hf-doc-builder \
35
+ huggingface-hub \
36
+ Jinja2 \
37
+ librosa \
38
+ numpy \
39
+ scipy \
40
+ tensorboard \
41
+ transformers
42
+
43
+ CMD ["/bin/bash"]
huggingface_diffusers/examples/README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!---
2
+ Copyright 2022 The HuggingFace Team. All rights reserved.
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ -->
15
+
16
+ # 🧨 Diffusers Examples
17
+
18
+ Diffusers examples are a collection of scripts to demonstrate how to effectively use the `diffusers` library
19
+ for a variety of use cases involving training or fine-tuning.
20
+
21
+ **Note**: If you are looking for **official** examples on how to use `diffusers` for inference,
22
+ please have a look at [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines)
23
+
24
+ Our examples aspire to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**.
25
+ More specifically, this means:
26
+
27
+ - **Self-contained**: An example script shall only depend on "pip-install-able" Python packages that can be found in a `requirements.txt` file. Example scripts shall **not** depend on any local files. This means that one can simply download an example script, *e.g.* [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), install the required dependencies, *e.g.* [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt) and execute the example script.
28
+ - **Easy-to-tweak**: While we strive to present as many use cases as possible, the example scripts are just that - examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. To help you with that, most of the examples fully expose the preprocessing of the data and the training loop to allow you to tweak and edit them as required.
29
+ - **Beginner-friendly**: We do not aim for providing state-of-the-art training scripts for the newest models, but rather examples that can be used as a way to better understand diffusion models and how to use them with the `diffusers` library. We often purposefully leave out certain state-of-the-art methods if we consider them too complex for beginners.
30
+ - **One-purpose-only**: Examples should show one task and one task only. Even if a task is from a modeling
31
+ point of view very similar, *e.g.* image super-resolution and image modification tend to use the same model and training method, we want examples to showcase only one task to keep them as readable and easy-to-understand as possible.
32
+
33
+ We provide **official** examples that cover the most popular tasks of diffusion models.
34
+ *Official* examples are **actively** maintained by the `diffusers` maintainers and we try to rigorously follow our example philosophy as defined above.
35
+ If you feel like another important example should exist, we are more than happy to welcome a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) or directly a [Pull Request](https://github.com/huggingface/diffusers/compare) from you!
36
+
37
+ Training examples show how to pretrain or fine-tune diffusion models for a variety of tasks. Currently we support:
38
+
39
+ | Task | 🤗 Accelerate | 🤗 Datasets | Colab
40
+ |---|---|:---:|:---:|
41
+ | [**Unconditional Image Generation**](./unconditional_image_generation) | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
42
+ | [**Text-to-Image fine-tuning**](./text_to_image) | ✅ | ✅ |
43
+ | [**Textual Inversion**](./textual_inversion) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
44
+ | [**Dreambooth**](./dreambooth) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb)
45
+ | [**Reinforcement Learning for Control**](https://github.com/huggingface/diffusers/blob/main/examples/rl/run_diffusers_locomotion.py) | - | - | coming soon.
46
+
47
+ ## Community
48
+
49
+ In addition, we provide **community** examples, which are examples added and maintained by our community.
50
+ Community examples can consist of both *training* examples or *inference* pipelines.
51
+ For such examples, we are more lenient regarding the philosophy defined above and also cannot guarantee to provide maintenance for every issue.
52
+ Examples that are useful for the community, but are either not yet deemed popular or not yet following our above philosophy should go into the [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) folder. The community folder therefore includes training examples and inference pipelines.
53
+ **Note**: Community examples can be a [great first contribution](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) to show to the community how you like to use `diffusers` 🪄.
54
+
55
+ ## Research Projects
56
+
57
+ We also provide **research_projects** examples that are maintained by the community as defined in the respective research project folders. These examples are useful and offer the extended capabilities which are complementary to the official examples. You may refer to [research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects) for details.
58
+
59
+ ## Important note
60
+
61
+ To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
62
+ ```bash
63
+ git clone https://github.com/huggingface/diffusers
64
+ cd diffusers
65
+ pip install .
66
+ ```
67
+ Then cd in the example folder of your choice and run
68
+ ```bash
69
+ pip install -r requirements.txt
70
+ ```
huggingface_diffusers/examples/community/README.md ADDED
@@ -0,0 +1,953 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Community Examples
2
+
3
+ > **For more information about community pipelines, please have a look at [this issue](https://github.com/huggingface/diffusers/issues/841).**
4
+
5
+ **Community** examples consist of both inference and training examples that have been added by the community.
6
+ Please have a look at the following table to get an overview of all community examples. Click on the **Code Example** to get a copy-and-paste ready code example that you can try out.
7
+ If a community doesn't work as expected, please open an issue and ping the author on it.
8
+
9
+ | Example | Description | Code Example | Colab | Author |
10
+ |:---------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------:|
11
+ | CLIP Guided Stable Diffusion | Doing CLIP guidance for text to image generation with Stable Diffusion | [CLIP Guided Stable Diffusion](#clip-guided-stable-diffusion) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/CLIP_Guided_Stable_diffusion_with_diffusers.ipynb) | [Suraj Patil](https://github.com/patil-suraj/) |
12
+ | One Step U-Net (Dummy) | Example showcasing of how to use Community Pipelines (see https://github.com/huggingface/diffusers/issues/841) | [One Step U-Net](#one-step-unet) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
13
+ | Stable Diffusion Interpolation | Interpolate the latent space of Stable Diffusion between different prompts/seeds | [Stable Diffusion Interpolation](#stable-diffusion-interpolation) | - | [Nate Raw](https://github.com/nateraw/) |
14
+ | Stable Diffusion Mega | **One** Stable Diffusion Pipeline with all functionalities of [Text2Image](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py), [Image2Image](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py) and [Inpainting](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py) | [Stable Diffusion Mega](#stable-diffusion-mega) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
15
+ | Long Prompt Weighting Stable Diffusion | **One** Stable Diffusion Pipeline without tokens length limit, and support parsing weighting in prompt. | [Long Prompt Weighting Stable Diffusion](#long-prompt-weighting-stable-diffusion) | - | [SkyTNT](https://github.com/SkyTNT) |
16
+ | Speech to Image | Using automatic-speech-recognition to transcribe text and Stable Diffusion to generate images | [Speech to Image](#speech-to-image) | - | [Mikail Duzenli](https://github.com/MikailINTech)
17
+ | Wild Card Stable Diffusion | Stable Diffusion Pipeline that supports prompts that contain wildcard terms (indicated by surrounding double underscores), with values instantiated randomly from a corresponding txt file or a dictionary of possible values | [Wildcard Stable Diffusion](#wildcard-stable-diffusion) | - | [Shyam Sudhakaran](https://github.com/shyamsn97) |
18
+ | [Composable Stable Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/) | Stable Diffusion Pipeline that supports prompts that contain "&#124;" in prompts (as an AND condition) and weights (separated by "&#124;" as well) to positively / negatively weight prompts. | [Composable Stable Diffusion](#composable-stable-diffusion) | - | [Mark Rich](https://github.com/MarkRich) |
19
+ | Seed Resizing Stable Diffusion| Stable Diffusion Pipeline that supports resizing an image and retaining the concepts of the 512 by 512 generation. | [Seed Resizing](#seed-resizing) | - | [Mark Rich](https://github.com/MarkRich) |
20
+ | Imagic Stable Diffusion | Stable Diffusion Pipeline that enables writing a text prompt to edit an existing image| [Imagic Stable Diffusion](#imagic-stable-diffusion) | - | [Mark Rich](https://github.com/MarkRich) |
21
+ | Multilingual Stable Diffusion| Stable Diffusion Pipeline that supports prompts in 50 different languages. | [Multilingual Stable Diffusion](#multilingual-stable-diffusion-pipeline) | - | [Juan Carlos Piñeros](https://github.com/juancopi81) |
22
+ | Image to Image Inpainting Stable Diffusion | Stable Diffusion Pipeline that enables the overlaying of two images and subsequent inpainting| [Image to Image Inpainting Stable Diffusion](#image-to-image-inpainting-stable-diffusion) | - | [Alex McKinney](https://github.com/vvvm23) |
23
+ | Text Based Inpainting Stable Diffusion | Stable Diffusion Inpainting Pipeline that enables passing a text prompt to generate the mask for inpainting| [Text Based Inpainting Stable Diffusion](#image-to-image-inpainting-stable-diffusion) | - | [Dhruv Karan](https://github.com/unography) |
24
+ | Bit Diffusion | Diffusion on discrete data | [Bit Diffusion](#bit-diffusion) | - |[Stuti R.](https://github.com/kingstut) |
25
+ | K-Diffusion Stable Diffusion | Run Stable Diffusion with any of [K-Diffusion's samplers](https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py) | [Stable Diffusion with K Diffusion](#stable-diffusion-with-k-diffusion) | - | [Patrick von Platen](https://github.com/patrickvonplaten/) |
26
+ | Checkpoint Merger Pipeline | Diffusion Pipeline that enables merging of saved model checkpoints | [Checkpoint Merger Pipeline](#checkpoint-merger-pipeline) | - | [Naga Sai Abhinay Devarinti](https://github.com/Abhinay1997/) |
27
+ Stable Diffusion v1.1-1.4 Comparison | Run all 4 model checkpoints for Stable Diffusion and compare their results together | [Stable Diffusion Comparison](#stable-diffusion-comparisons) | - | [Suvaditya Mukherjee](https://github.com/suvadityamuk) |
28
+ MagicMix | Diffusion Pipeline for semantic mixing of an image and a text prompt | [MagicMix](#magic-mix) | - | [Partho Das](https://github.com/daspartho) |
29
+ | Stable UnCLIP | Diffusion Pipeline for combining prior model (generate clip image embedding from text, UnCLIPPipeline `"kakaobrain/karlo-v1-alpha"`) and decoder pipeline (decode clip image embedding to image, StableDiffusionImageVariationPipeline `"lambdalabs/sd-image-variations-diffusers"` ). | [Stable UnCLIP](#stable-unclip) | - |[Ray Wang](https://wrong.wang) |
30
+
31
+
32
+
33
+ To load a custom pipeline you just need to pass the `custom_pipeline` argument to `DiffusionPipeline`, as one of the files in `diffusers/examples/community`. Feel free to send a PR with your own pipelines, we will merge them quickly.
34
+ ```py
35
+ pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="filename_in_the_community_folder")
36
+ ```
37
+
38
+ ## Example usages
39
+
40
+ ### CLIP Guided Stable Diffusion
41
+
42
+ CLIP guided stable diffusion can help to generate more realistic images
43
+ by guiding stable diffusion at every denoising step with an additional CLIP model.
44
+
45
+ The following code requires roughly 12GB of GPU RAM.
46
+
47
+ ```python
48
+ from diffusers import DiffusionPipeline
49
+ from transformers import CLIPFeatureExtractor, CLIPModel
50
+ import torch
51
+
52
+
53
+ feature_extractor = CLIPFeatureExtractor.from_pretrained("laion/CLIP-ViT-B-32-laion2B-s34B-b79K")
54
+ clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16)
55
+
56
+
57
+ guided_pipeline = DiffusionPipeline.from_pretrained(
58
+ "runwayml/stable-diffusion-v1-5",
59
+ custom_pipeline="clip_guided_stable_diffusion",
60
+ clip_model=clip_model,
61
+ feature_extractor=feature_extractor,
62
+
63
+ torch_dtype=torch.float16,
64
+ )
65
+ guided_pipeline.enable_attention_slicing()
66
+ guided_pipeline = guided_pipeline.to("cuda")
67
+
68
+ prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
69
+
70
+ generator = torch.Generator(device="cuda").manual_seed(0)
71
+ images = []
72
+ for i in range(4):
73
+ image = guided_pipeline(
74
+ prompt,
75
+ num_inference_steps=50,
76
+ guidance_scale=7.5,
77
+ clip_guidance_scale=100,
78
+ num_cutouts=4,
79
+ use_cutouts=False,
80
+ generator=generator,
81
+ ).images[0]
82
+ images.append(image)
83
+
84
+ # save images locally
85
+ for i, img in enumerate(images):
86
+ img.save(f"./clip_guided_sd/image_{i}.png")
87
+ ```
88
+
89
+ The `images` list contains a list of PIL images that can be saved locally or displayed directly in a google colab.
90
+ Generated images tend to be of higher qualtiy than natively using stable diffusion. E.g. the above script generates the following images:
91
+
92
+ ![clip_guidance](https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/clip_guidance/merged_clip_guidance.jpg).
93
+
94
+ ### One Step Unet
95
+
96
+ The dummy "one-step-unet" can be run as follows:
97
+
98
+ ```python
99
+ from diffusers import DiffusionPipeline
100
+
101
+ pipe = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="one_step_unet")
102
+ pipe()
103
+ ```
104
+
105
+ **Note**: This community pipeline is not useful as a feature, but rather just serves as an example of how community pipelines can be added (see https://github.com/huggingface/diffusers/issues/841).
106
+
107
+ ### Stable Diffusion Interpolation
108
+
109
+ The following code can be run on a GPU of at least 8GB VRAM and should take approximately 5 minutes.
110
+
111
+ ```python
112
+ from diffusers import DiffusionPipeline
113
+ import torch
114
+
115
+ pipe = DiffusionPipeline.from_pretrained(
116
+ "CompVis/stable-diffusion-v1-4",
117
+ revision='fp16',
118
+ torch_dtype=torch.float16,
119
+ safety_checker=None, # Very important for videos...lots of false positives while interpolating
120
+ custom_pipeline="interpolate_stable_diffusion",
121
+ ).to('cuda')
122
+ pipe.enable_attention_slicing()
123
+
124
+ frame_filepaths = pipe.walk(
125
+ prompts=['a dog', 'a cat', 'a horse'],
126
+ seeds=[42, 1337, 1234],
127
+ num_interpolation_steps=16,
128
+ output_dir='./dreams',
129
+ batch_size=4,
130
+ height=512,
131
+ width=512,
132
+ guidance_scale=8.5,
133
+ num_inference_steps=50,
134
+ )
135
+ ```
136
+
137
+ The output of the `walk(...)` function returns a list of images saved under the folder as defined in `output_dir`. You can use these images to create videos of stable diffusion.
138
+
139
+ > **Please have a look at https://github.com/nateraw/stable-diffusion-videos for more in-detail information on how to create videos using stable diffusion as well as more feature-complete functionality.**
140
+
141
+ ### Stable Diffusion Mega
142
+
143
+ The Stable Diffusion Mega Pipeline lets you use the main use cases of the stable diffusion pipeline in a single class.
144
+
145
+ ```python
146
+ #!/usr/bin/env python3
147
+ from diffusers import DiffusionPipeline
148
+ import PIL
149
+ import requests
150
+ from io import BytesIO
151
+ import torch
152
+
153
+
154
+ def download_image(url):
155
+ response = requests.get(url)
156
+ return PIL.Image.open(BytesIO(response.content)).convert("RGB")
157
+
158
+ pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_mega", torch_dtype=torch.float16, revision="fp16")
159
+ pipe.to("cuda")
160
+ pipe.enable_attention_slicing()
161
+
162
+
163
+ ### Text-to-Image
164
+
165
+ images = pipe.text2img("An astronaut riding a horse").images
166
+
167
+ ### Image-to-Image
168
+
169
+ init_image = download_image("https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg")
170
+
171
+ prompt = "A fantasy landscape, trending on artstation"
172
+
173
+ images = pipe.img2img(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images
174
+
175
+ ### Inpainting
176
+
177
+ img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
178
+ mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
179
+ init_image = download_image(img_url).resize((512, 512))
180
+ mask_image = download_image(mask_url).resize((512, 512))
181
+
182
+ prompt = "a cat sitting on a bench"
183
+ images = pipe.inpaint(prompt=prompt, image=init_image, mask_image=mask_image, strength=0.75).images
184
+ ```
185
+
186
+ As shown above this one pipeline can run all both "text-to-image", "image-to-image", and "inpainting" in one pipeline.
187
+
188
+ ### Long Prompt Weighting Stable Diffusion
189
+ Features of this custom pipeline:
190
+ - Input a prompt without the 77 token length limit.
191
+ - Includes tx2img, img2img. and inpainting pipelines.
192
+ - Emphasize/weigh part of your prompt with parentheses as so: `a baby deer with (big eyes)`
193
+ - De-emphasize part of your prompt as so: `a [baby] deer with big eyes`
194
+ - Precisely weigh part of your prompt as so: `a baby deer with (big eyes:1.3)`
195
+
196
+ Prompt weighting equivalents:
197
+ - `a baby deer with` == `(a baby deer with:1.0)`
198
+ - `(big eyes)` == `(big eyes:1.1)`
199
+ - `((big eyes))` == `(big eyes:1.21)`
200
+ - `[big eyes]` == `(big eyes:0.91)`
201
+
202
+ You can run this custom pipeline as so:
203
+
204
+ #### pytorch
205
+
206
+ ```python
207
+ from diffusers import DiffusionPipeline
208
+ import torch
209
+
210
+ pipe = DiffusionPipeline.from_pretrained(
211
+ 'hakurei/waifu-diffusion',
212
+ custom_pipeline="lpw_stable_diffusion",
213
+
214
+ torch_dtype=torch.float16
215
+ )
216
+ pipe=pipe.to("cuda")
217
+
218
+ prompt = "best_quality (1girl:1.3) bow bride brown_hair closed_mouth frilled_bow frilled_hair_tubes frills (full_body:1.3) fox_ear hair_bow hair_tubes happy hood japanese_clothes kimono long_sleeves red_bow smile solo tabi uchikake white_kimono wide_sleeves cherry_blossoms"
219
+ neg_prompt = "lowres, bad_anatomy, error_body, error_hair, error_arm, error_hands, bad_hands, error_fingers, bad_fingers, missing_fingers, error_legs, bad_legs, multiple_legs, missing_legs, error_lighting, error_shadow, error_reflection, text, error, extra_digit, fewer_digits, cropped, worst_quality, low_quality, normal_quality, jpeg_artifacts, signature, watermark, username, blurry"
220
+
221
+ pipe.text2img(prompt, negative_prompt=neg_prompt, width=512,height=512,max_embeddings_multiples=3).images[0]
222
+
223
+ ```
224
+
225
+ #### onnxruntime
226
+
227
+ ```python
228
+ from diffusers import DiffusionPipeline
229
+ import torch
230
+
231
+ pipe = DiffusionPipeline.from_pretrained(
232
+ 'CompVis/stable-diffusion-v1-4',
233
+ custom_pipeline="lpw_stable_diffusion_onnx",
234
+ revision="onnx",
235
+ provider="CUDAExecutionProvider"
236
+ )
237
+
238
+ prompt = "a photo of an astronaut riding a horse on mars, best quality"
239
+ neg_prompt = "lowres, bad anatomy, error body, error hair, error arm, error hands, bad hands, error fingers, bad fingers, missing fingers, error legs, bad legs, multiple legs, missing legs, error lighting, error shadow, error reflection, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry"
240
+
241
+ pipe.text2img(prompt,negative_prompt=neg_prompt, width=512, height=512, max_embeddings_multiples=3).images[0]
242
+
243
+ ```
244
+
245
+ if you see `Token indices sequence length is longer than the specified maximum sequence length for this model ( *** > 77 ) . Running this sequence through the model will result in indexing errors`. Do not worry, it is normal.
246
+
247
+ ### Speech to Image
248
+
249
+ The following code can generate an image from an audio sample using pre-trained OpenAI whisper-small and Stable Diffusion.
250
+
251
+ ```Python
252
+ import torch
253
+
254
+ import matplotlib.pyplot as plt
255
+ from datasets import load_dataset
256
+ from diffusers import DiffusionPipeline
257
+ from transformers import (
258
+ WhisperForConditionalGeneration,
259
+ WhisperProcessor,
260
+ )
261
+
262
+
263
+ device = "cuda" if torch.cuda.is_available() else "cpu"
264
+
265
+ ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
266
+
267
+ audio_sample = ds[3]
268
+
269
+ text = audio_sample["text"].lower()
270
+ speech_data = audio_sample["audio"]["array"]
271
+
272
+ model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device)
273
+ processor = WhisperProcessor.from_pretrained("openai/whisper-small")
274
+
275
+ diffuser_pipeline = DiffusionPipeline.from_pretrained(
276
+ "CompVis/stable-diffusion-v1-4",
277
+ custom_pipeline="speech_to_image_diffusion",
278
+ speech_model=model,
279
+ speech_processor=processor,
280
+
281
+ torch_dtype=torch.float16,
282
+ )
283
+
284
+ diffuser_pipeline.enable_attention_slicing()
285
+ diffuser_pipeline = diffuser_pipeline.to(device)
286
+
287
+ output = diffuser_pipeline(speech_data)
288
+ plt.imshow(output.images[0])
289
+ ```
290
+ This example produces the following image:
291
+
292
+ ![image](https://user-images.githubusercontent.com/45072645/196901736-77d9c6fc-63ee-4072-90b0-dc8b903d63e3.png)
293
+
294
+ ### Wildcard Stable Diffusion
295
+ Following the great examples from https://github.com/jtkelm2/stable-diffusion-webui-1/blob/master/scripts/wildcards.py and https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts#wildcards, here's a minimal implementation that allows for users to add "wildcards", denoted by `__wildcard__` to prompts that are used as placeholders for randomly sampled values given by either a dictionary or a `.txt` file. For example:
296
+
297
+ Say we have a prompt:
298
+
299
+ ```
300
+ prompt = "__animal__ sitting on a __object__ wearing a __clothing__"
301
+ ```
302
+
303
+ We can then define possible values to be sampled for `animal`, `object`, and `clothing`. These can either be from a `.txt` with the same name as the category.
304
+
305
+ The possible values can also be defined / combined by using a dictionary like: `{"animal":["dog", "cat", mouse"]}`.
306
+
307
+ The actual pipeline works just like `StableDiffusionPipeline`, except the `__call__` method takes in:
308
+
309
+ `wildcard_files`: list of file paths for wild card replacement
310
+ `wildcard_option_dict`: dict with key as `wildcard` and values as a list of possible replacements
311
+ `num_prompt_samples`: number of prompts to sample, uniformly sampling wildcards
312
+
313
+ A full example:
314
+
315
+ create `animal.txt`, with contents like:
316
+
317
+ ```
318
+ dog
319
+ cat
320
+ mouse
321
+ ```
322
+
323
+ create `object.txt`, with contents like:
324
+
325
+ ```
326
+ chair
327
+ sofa
328
+ bench
329
+ ```
330
+
331
+ ```python
332
+ from diffusers import DiffusionPipeline
333
+ import torch
334
+
335
+ pipe = DiffusionPipeline.from_pretrained(
336
+ "CompVis/stable-diffusion-v1-4",
337
+ custom_pipeline="wildcard_stable_diffusion",
338
+
339
+ torch_dtype=torch.float16,
340
+ )
341
+ prompt = "__animal__ sitting on a __object__ wearing a __clothing__"
342
+ out = pipe(
343
+ prompt,
344
+ wildcard_option_dict={
345
+ "clothing":["hat", "shirt", "scarf", "beret"]
346
+ },
347
+ wildcard_files=["object.txt", "animal.txt"],
348
+ num_prompt_samples=1
349
+ )
350
+ ```
351
+
352
+ ### Composable Stable diffusion
353
+
354
+ [Composable Stable Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/) proposes conjunction and negation (negative prompts) operators for compositional generation with conditional diffusion models.
355
+
356
+ ```python
357
+ import torch as th
358
+ import numpy as np
359
+ import torchvision.utils as tvu
360
+
361
+ from diffusers import DiffusionPipeline
362
+
363
+ import argparse
364
+
365
+ parser = argparse.ArgumentParser()
366
+ parser.add_argument("--prompt", type=str, default="mystical trees | A magical pond | dark",
367
+ help="use '|' as the delimiter to compose separate sentences.")
368
+ parser.add_argument("--steps", type=int, default=50)
369
+ parser.add_argument("--scale", type=float, default=7.5)
370
+ parser.add_argument("--weights", type=str, default="7.5 | 7.5 | -7.5")
371
+ parser.add_argument("--seed", type=int, default=2)
372
+ parser.add_argument("--model_path", type=str, default="CompVis/stable-diffusion-v1-4")
373
+ parser.add_argument("--num_images", type=int, default=1)
374
+ args = parser.parse_args()
375
+
376
+ has_cuda = th.cuda.is_available()
377
+ device = th.device('cpu' if not has_cuda else 'cuda')
378
+
379
+ prompt = args.prompt
380
+ scale = args.scale
381
+ steps = args.steps
382
+
383
+ pipe = DiffusionPipeline.from_pretrained(
384
+ args.model_path,
385
+ custom_pipeline="composable_stable_diffusion",
386
+ ).to(device)
387
+
388
+ pipe.safety_checker = None
389
+
390
+ images = []
391
+ generator = th.Generator("cuda").manual_seed(args.seed)
392
+ for i in range(args.num_images):
393
+ image = pipe(prompt, guidance_scale=scale, num_inference_steps=steps,
394
+ weights=args.weights, generator=generator).images[0]
395
+ images.append(th.from_numpy(np.array(image)).permute(2, 0, 1) / 255.)
396
+ grid = tvu.make_grid(th.stack(images, dim=0), nrow=4, padding=0)
397
+ tvu.save_image(grid, f'{prompt}_{args.weights}' + '.png')
398
+
399
+ ```
400
+
401
+ ### Imagic Stable Diffusion
402
+ Allows you to edit an image using stable diffusion.
403
+
404
+ ```python
405
+ import requests
406
+ from PIL import Image
407
+ from io import BytesIO
408
+ import torch
409
+ import os
410
+ from diffusers import DiffusionPipeline, DDIMScheduler
411
+ has_cuda = torch.cuda.is_available()
412
+ device = torch.device('cpu' if not has_cuda else 'cuda')
413
+ pipe = DiffusionPipeline.from_pretrained(
414
+ "CompVis/stable-diffusion-v1-4",
415
+ safety_checker=None,
416
+ use_auth_token=True,
417
+ custom_pipeline="imagic_stable_diffusion",
418
+ scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
419
+ ).to(device)
420
+ generator = torch.Generator("cuda").manual_seed(0)
421
+ seed = 0
422
+ prompt = "A photo of Barack Obama smiling with a big grin"
423
+ url = 'https://www.dropbox.com/s/6tlwzr73jd1r9yk/obama.png?dl=1'
424
+ response = requests.get(url)
425
+ init_image = Image.open(BytesIO(response.content)).convert("RGB")
426
+ init_image = init_image.resize((512, 512))
427
+ res = pipe.train(
428
+ prompt,
429
+ image=init_image,
430
+ generator=generator)
431
+ res = pipe(alpha=1, guidance_scale=7.5, num_inference_steps=50)
432
+ os.makedirs("imagic", exist_ok=True)
433
+ image = res.images[0]
434
+ image.save('./imagic/imagic_image_alpha_1.png')
435
+ res = pipe(alpha=1.5, guidance_scale=7.5, num_inference_steps=50)
436
+ image = res.images[0]
437
+ image.save('./imagic/imagic_image_alpha_1_5.png')
438
+ res = pipe(alpha=2, guidance_scale=7.5, num_inference_steps=50)
439
+ image = res.images[0]
440
+ image.save('./imagic/imagic_image_alpha_2.png')
441
+ ```
442
+
443
+ ### Seed Resizing
444
+ Test seed resizing. Originally generate an image in 512 by 512, then generate image with same seed at 512 by 592 using seed resizing. Finally, generate 512 by 592 using original stable diffusion pipeline.
445
+
446
+ ```python
447
+ import torch as th
448
+ import numpy as np
449
+ from diffusers import DiffusionPipeline
450
+
451
+ has_cuda = th.cuda.is_available()
452
+ device = th.device('cpu' if not has_cuda else 'cuda')
453
+
454
+ pipe = DiffusionPipeline.from_pretrained(
455
+ "CompVis/stable-diffusion-v1-4",
456
+ use_auth_token=True,
457
+ custom_pipeline="seed_resize_stable_diffusion"
458
+ ).to(device)
459
+
460
+ def dummy(images, **kwargs):
461
+ return images, False
462
+
463
+ pipe.safety_checker = dummy
464
+
465
+
466
+ images = []
467
+ th.manual_seed(0)
468
+ generator = th.Generator("cuda").manual_seed(0)
469
+
470
+ seed = 0
471
+ prompt = "A painting of a futuristic cop"
472
+
473
+ width = 512
474
+ height = 512
475
+
476
+ res = pipe(
477
+ prompt,
478
+ guidance_scale=7.5,
479
+ num_inference_steps=50,
480
+ height=height,
481
+ width=width,
482
+ generator=generator)
483
+ image = res.images[0]
484
+ image.save('./seed_resize/seed_resize_{w}_{h}_image.png'.format(w=width, h=height))
485
+
486
+
487
+ th.manual_seed(0)
488
+ generator = th.Generator("cuda").manual_seed(0)
489
+
490
+ pipe = DiffusionPipeline.from_pretrained(
491
+ "CompVis/stable-diffusion-v1-4",
492
+ use_auth_token=True,
493
+ custom_pipeline="/home/mark/open_source/diffusers/examples/community/"
494
+ ).to(device)
495
+
496
+ width = 512
497
+ height = 592
498
+
499
+ res = pipe(
500
+ prompt,
501
+ guidance_scale=7.5,
502
+ num_inference_steps=50,
503
+ height=height,
504
+ width=width,
505
+ generator=generator)
506
+ image = res.images[0]
507
+ image.save('./seed_resize/seed_resize_{w}_{h}_image.png'.format(w=width, h=height))
508
+
509
+ pipe_compare = DiffusionPipeline.from_pretrained(
510
+ "CompVis/stable-diffusion-v1-4",
511
+ use_auth_token=True,
512
+ custom_pipeline="/home/mark/open_source/diffusers/examples/community/"
513
+ ).to(device)
514
+
515
+ res = pipe_compare(
516
+ prompt,
517
+ guidance_scale=7.5,
518
+ num_inference_steps=50,
519
+ height=height,
520
+ width=width,
521
+ generator=generator
522
+ )
523
+
524
+ image = res.images[0]
525
+ image.save('./seed_resize/seed_resize_{w}_{h}_image_compare.png'.format(w=width, h=height))
526
+ ```
527
+
528
+ ### Multilingual Stable Diffusion Pipeline
529
+
530
+ The following code can generate an images from texts in different languages using the pre-trained [mBART-50 many-to-one multilingual machine translation model](https://huggingface.co/facebook/mbart-large-50-many-to-one-mmt) and Stable Diffusion.
531
+
532
+ ```python
533
+ from PIL import Image
534
+
535
+ import torch
536
+
537
+ from diffusers import DiffusionPipeline
538
+ from transformers import (
539
+ pipeline,
540
+ MBart50TokenizerFast,
541
+ MBartForConditionalGeneration,
542
+ )
543
+ device = "cuda" if torch.cuda.is_available() else "cpu"
544
+ device_dict = {"cuda": 0, "cpu": -1}
545
+
546
+ # helper function taken from: https://huggingface.co/blog/stable_diffusion
547
+ def image_grid(imgs, rows, cols):
548
+ assert len(imgs) == rows*cols
549
+
550
+ w, h = imgs[0].size
551
+ grid = Image.new('RGB', size=(cols*w, rows*h))
552
+ grid_w, grid_h = grid.size
553
+
554
+ for i, img in enumerate(imgs):
555
+ grid.paste(img, box=(i%cols*w, i//cols*h))
556
+ return grid
557
+
558
+ # Add language detection pipeline
559
+ language_detection_model_ckpt = "papluca/xlm-roberta-base-language-detection"
560
+ language_detection_pipeline = pipeline("text-classification",
561
+ model=language_detection_model_ckpt,
562
+ device=device_dict[device])
563
+
564
+ # Add model for language translation
565
+ trans_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-one-mmt")
566
+ trans_model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-one-mmt").to(device)
567
+
568
+ diffuser_pipeline = DiffusionPipeline.from_pretrained(
569
+ "CompVis/stable-diffusion-v1-4",
570
+ custom_pipeline="multilingual_stable_diffusion",
571
+ detection_pipeline=language_detection_pipeline,
572
+ translation_model=trans_model,
573
+ translation_tokenizer=trans_tokenizer,
574
+
575
+ torch_dtype=torch.float16,
576
+ )
577
+
578
+ diffuser_pipeline.enable_attention_slicing()
579
+ diffuser_pipeline = diffuser_pipeline.to(device)
580
+
581
+ prompt = ["a photograph of an astronaut riding a horse",
582
+ "Una casa en la playa",
583
+ "Ein Hund, der Orange isst",
584
+ "Un restaurant parisien"]
585
+
586
+ output = diffuser_pipeline(prompt)
587
+
588
+ images = output.images
589
+
590
+ grid = image_grid(images, rows=2, cols=2)
591
+ ```
592
+
593
+ This example produces the following images:
594
+ ![image](https://user-images.githubusercontent.com/4313860/198328706-295824a4-9856-4ce5-8e66-278ceb42fd29.png)
595
+
596
+ ### Image to Image Inpainting Stable Diffusion
597
+
598
+ Similar to the standard stable diffusion inpainting example, except with the addition of an `inner_image` argument.
599
+
600
+ `image`, `inner_image`, and `mask` should have the same dimensions. `inner_image` should have an alpha (transparency) channel.
601
+
602
+ The aim is to overlay two images, then mask out the boundary between `image` and `inner_image` to allow stable diffusion to make the connection more seamless.
603
+ For example, this could be used to place a logo on a shirt and make it blend seamlessly.
604
+
605
+ ```python
606
+ import PIL
607
+ import torch
608
+
609
+ from diffusers import DiffusionPipeline
610
+
611
+ image_path = "./path-to-image.png"
612
+ inner_image_path = "./path-to-inner-image.png"
613
+ mask_path = "./path-to-mask.png"
614
+
615
+ init_image = PIL.Image.open(image_path).convert("RGB").resize((512, 512))
616
+ inner_image = PIL.Image.open(inner_image_path).convert("RGBA").resize((512, 512))
617
+ mask_image = PIL.Image.open(mask_path).convert("RGB").resize((512, 512))
618
+
619
+ pipe = DiffusionPipeline.from_pretrained(
620
+ "runwayml/stable-diffusion-inpainting",
621
+ custom_pipeline="img2img_inpainting",
622
+
623
+ torch_dtype=torch.float16
624
+ )
625
+ pipe = pipe.to("cuda")
626
+
627
+ prompt = "Your prompt here!"
628
+ image = pipe(prompt=prompt, image=init_image, inner_image=inner_image, mask_image=mask_image).images[0]
629
+ ```
630
+
631
+ ![2 by 2 grid demonstrating image to image inpainting.](https://user-images.githubusercontent.com/44398246/203506577-ec303be4-887e-4ebd-a773-c83fcb3dd01a.png)
632
+
633
+ ### Text Based Inpainting Stable Diffusion
634
+
635
+ Use a text prompt to generate the mask for the area to be inpainted.
636
+ Currently uses the CLIPSeg model for mask generation, then calls the standard Stable Diffusion Inpainting pipeline to perform the inpainting.
637
+
638
+ ```python
639
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
640
+ from diffusers import DiffusionPipeline
641
+
642
+ from PIL import Image
643
+ import requests
644
+
645
+ processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
646
+ model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
647
+
648
+ pipe = DiffusionPipeline.from_pretrained(
649
+ "runwayml/stable-diffusion-inpainting",
650
+ custom_pipeline="text_inpainting",
651
+ segmentation_model=model,
652
+ segmentation_processor=processor
653
+ )
654
+ pipe = pipe.to("cuda")
655
+
656
+
657
+ url = "https://github.com/timojl/clipseg/blob/master/example_image.jpg?raw=true"
658
+ image = Image.open(requests.get(url, stream=True).raw).resize((512, 512))
659
+ text = "a glass" # will mask out this text
660
+ prompt = "a cup" # the masked out region will be replaced with this
661
+
662
+ image = pipe(image=image, text=text, prompt=prompt).images[0]
663
+ ```
664
+
665
+ ### Bit Diffusion
666
+ Based https://arxiv.org/abs/2208.04202, this is used for diffusion on discrete data - eg, discreate image data, DNA sequence data. An unconditional discreate image can be generated like this:
667
+
668
+ ```python
669
+ from diffusers import DiffusionPipeline
670
+ pipe = DiffusionPipeline.from_pretrained("google/ddpm-cifar10-32", custom_pipeline="bit_diffusion")
671
+ image = pipe().images[0]
672
+
673
+ ```
674
+
675
+ ### Stable Diffusion with K Diffusion
676
+
677
+ Make sure you have @crowsonkb's https://github.com/crowsonkb/k-diffusion installed:
678
+
679
+ ```
680
+ pip install k-diffusion
681
+ ```
682
+
683
+ You can use the community pipeline as follows:
684
+
685
+ ```python
686
+ from diffusers import DiffusionPipeline
687
+
688
+ pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="sd_text2img_k_diffusion")
689
+ pipe = pipe.to("cuda")
690
+
691
+ prompt = "an astronaut riding a horse on mars"
692
+ pipe.set_scheduler("sample_heun")
693
+ generator = torch.Generator(device="cuda").manual_seed(seed)
694
+ image = pipe(prompt, generator=generator, num_inference_steps=20).images[0]
695
+
696
+ image.save("./astronaut_heun_k_diffusion.png")
697
+ ```
698
+
699
+ To make sure that K Diffusion and `diffusers` yield the same results:
700
+
701
+ **Diffusers**:
702
+ ```python
703
+ from diffusers import DiffusionPipeline, EulerDiscreteScheduler
704
+
705
+ seed = 33
706
+
707
+ pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
708
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
709
+ pipe = pipe.to("cuda")
710
+
711
+ generator = torch.Generator(device="cuda").manual_seed(seed)
712
+ image = pipe(prompt, generator=generator, num_inference_steps=50).images[0]
713
+ ```
714
+
715
+ ![diffusers_euler](https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/k_diffusion/astronaut_euler.png)
716
+
717
+ **K Diffusion**:
718
+ ```python
719
+ from diffusers import DiffusionPipeline, EulerDiscreteScheduler
720
+
721
+ seed = 33
722
+
723
+ pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="sd_text2img_k_diffusion")
724
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
725
+ pipe = pipe.to("cuda")
726
+
727
+ pipe.set_scheduler("sample_euler")
728
+ generator = torch.Generator(device="cuda").manual_seed(seed)
729
+ image = pipe(prompt, generator=generator, num_inference_steps=50).images[0]
730
+ ```
731
+
732
+ ![diffusers_euler](https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/k_diffusion/astronaut_euler_k_diffusion.png)
733
+
734
+ ### Checkpoint Merger Pipeline
735
+ Based on the AUTOMATIC1111/webui for checkpoint merging. This is a custom pipeline that merges upto 3 pretrained model checkpoints as long as they are in the HuggingFace model_index.json format.
736
+
737
+ The checkpoint merging is currently memory intensive as it modifies the weights of a DiffusionPipeline object in place. Expect atleast 13GB RAM Usage on Kaggle GPU kernels and
738
+ on colab you might run out of the 12GB memory even while merging two checkpoints.
739
+
740
+ Usage:-
741
+ ```python
742
+ from diffusers import DiffusionPipeline
743
+
744
+ #Return a CheckpointMergerPipeline class that allows you to merge checkpoints.
745
+ #The checkpoint passed here is ignored. But still pass one of the checkpoints you plan to
746
+ #merge for convenience
747
+ pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger")
748
+
749
+ #There are multiple possible scenarios:
750
+ #The pipeline with the merged checkpoints is returned in all the scenarios
751
+
752
+ #Compatible checkpoints a.k.a matched model_index.json files. Ignores the meta attributes in model_index.json during comparision.( attrs with _ as prefix )
753
+ merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","CompVis/stable-diffusion-v1-2"], interp = "sigmoid", alpha = 0.4)
754
+
755
+ #Incompatible checkpoints in model_index.json but merge might be possible. Use force = True to ignore model_index.json compatibility
756
+ merged_pipe_1 = pipe.merge(["CompVis/stable-diffusion-v1-4","hakurei/waifu-diffusion"], force = True, interp = "sigmoid", alpha = 0.4)
757
+
758
+ #Three checkpoint merging. Only "add_difference" method actually works on all three checkpoints. Using any other options will ignore the 3rd checkpoint.
759
+ merged_pipe_2 = pipe.merge(["CompVis/stable-diffusion-v1-4","hakurei/waifu-diffusion","prompthero/openjourney"], force = True, interp = "add_difference", alpha = 0.4)
760
+
761
+ prompt = "An astronaut riding a horse on Mars"
762
+
763
+ image = merged_pipe(prompt).images[0]
764
+
765
+ ```
766
+ Some examples along with the merge details:
767
+
768
+ 1. "CompVis/stable-diffusion-v1-4" + "hakurei/waifu-diffusion" ; Sigmoid interpolation; alpha = 0.8
769
+
770
+ ![Stable plus Waifu Sigmoid 0.8](https://huggingface.co/datasets/NagaSaiAbhinay/CheckpointMergerSamples/resolve/main/stability_v1_4_waifu_sig_0.8.png)
771
+
772
+ 2. "hakurei/waifu-diffusion" + "prompthero/openjourney" ; Inverse Sigmoid interpolation; alpha = 0.8
773
+
774
+ ![Stable plus Waifu Sigmoid 0.8](https://huggingface.co/datasets/NagaSaiAbhinay/CheckpointMergerSamples/resolve/main/waifu_openjourney_inv_sig_0.8.png)
775
+
776
+
777
+ 3. "CompVis/stable-diffusion-v1-4" + "hakurei/waifu-diffusion" + "prompthero/openjourney"; Add Difference interpolation; alpha = 0.5
778
+
779
+ ![Stable plus Waifu plus openjourney add_diff 0.5](https://huggingface.co/datasets/NagaSaiAbhinay/CheckpointMergerSamples/resolve/main/stable_waifu_openjourney_add_diff_0.5.png)
780
+
781
+
782
+ ### Stable Diffusion Comparisons
783
+
784
+ This Community Pipeline enables the comparison between the 4 checkpoints that exist for Stable Diffusion. They can be found through the following links:
785
+ 1. [Stable Diffusion v1.1](https://huggingface.co/CompVis/stable-diffusion-v1-1)
786
+ 2. [Stable Diffusion v1.2](https://huggingface.co/CompVis/stable-diffusion-v1-2)
787
+ 3. [Stable Diffusion v1.3](https://huggingface.co/CompVis/stable-diffusion-v1-3)
788
+ 4. [Stable Diffusion v1.4](https://huggingface.co/CompVis/stable-diffusion-v1-4)
789
+
790
+ ```python
791
+ from diffusers import DiffusionPipeline
792
+ import matplotlib.pyplot as plt
793
+
794
+ pipe = DiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', custom_pipeline='suvadityamuk/StableDiffusionComparison')
795
+ pipe.enable_attention_slicing()
796
+ pipe = pipe.to('cuda')
797
+ prompt = "an astronaut riding a horse on mars"
798
+ output = pipe(prompt)
799
+
800
+ plt.subplots(2,2,1)
801
+ plt.imshow(output.images[0])
802
+ plt.title('Stable Diffusion v1.1')
803
+ plt.axis('off')
804
+ plt.subplots(2,2,2)
805
+ plt.imshow(output.images[1])
806
+ plt.title('Stable Diffusion v1.2')
807
+ plt.axis('off')
808
+ plt.subplots(2,2,3)
809
+ plt.imshow(output.images[2])
810
+ plt.title('Stable Diffusion v1.3')
811
+ plt.axis('off')
812
+ plt.subplots(2,2,4)
813
+ plt.imshow(output.images[3])
814
+ plt.title('Stable Diffusion v1.4')
815
+ plt.axis('off')
816
+
817
+ plt.show()
818
+ ```
819
+
820
+ As a result, you can look at a grid of all 4 generated images being shown together, that captures a difference the advancement of the training between the 4 checkpoints.
821
+
822
+ ### Magic Mix
823
+
824
+ Implementation of the [MagicMix: Semantic Mixing with Diffusion Models](https://arxiv.org/abs/2210.16056) paper. This is a Diffusion Pipeline for semantic mixing of an image and a text prompt to create a new concept while preserving the spatial layout and geometry of the subject in the image. The pipeline takes an image that provides the layout semantics and a prompt that provides the content semantics for the mixing process.
825
+
826
+ There are 3 parameters for the method-
827
+ - `mix_factor`: It is the interpolation constant used in the layout generation phase. The greater the value of `mix_factor`, the greater the influence of the prompt on the layout generation process.
828
+ - `kmax` and `kmin`: These determine the range for the layout and content generation process. A higher value of kmax results in loss of more information about the layout of the original image and a higher value of kmin results in more steps for content generation process.
829
+
830
+ Here is an example usage-
831
+
832
+ ```python
833
+ from diffusers import DiffusionPipeline, DDIMScheduler
834
+ from PIL import Image
835
+
836
+ pipe = DiffusionPipeline.from_pretrained(
837
+ "CompVis/stable-diffusion-v1-4",
838
+ custom_pipeline="magic_mix",
839
+ scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler"),
840
+ ).to('cuda')
841
+
842
+ img = Image.open('phone.jpg')
843
+ mix_img = pipe(
844
+ img,
845
+ prompt = 'bed',
846
+ kmin = 0.3,
847
+ kmax = 0.5,
848
+ mix_factor = 0.5,
849
+ )
850
+ mix_img.save('phone_bed_mix.jpg')
851
+ ```
852
+ The `mix_img` is a PIL image that can be saved locally or displayed directly in a google colab. Generated image is a mix of the layout semantics of the given image and the content semantics of the prompt.
853
+
854
+ E.g. the above script generates the following image:
855
+
856
+ `phone.jpg`
857
+
858
+ ![206903102-34e79b9f-9ed2-4fac-bb38-82871343c655](https://user-images.githubusercontent.com/59410571/209578593-141467c7-d831-4792-8b9a-b17dc5e47816.jpg)
859
+
860
+ `phone_bed_mix.jpg`
861
+
862
+ ![206903104-913a671d-ef53-4ae4-919d-64c3059c8f67](https://user-images.githubusercontent.com/59410571/209578602-70f323fa-05b7-4dd6-b055-e40683e37914.jpg)
863
+
864
+ For more example generations check out this [demo notebook](https://github.com/daspartho/MagicMix/blob/main/demo.ipynb).
865
+
866
+
867
+ ### Stable UnCLIP
868
+
869
+ UnCLIPPipeline("kakaobrain/karlo-v1-alpha") provide a prior model that can generate clip image embedding from text.
870
+ StableDiffusionImageVariationPipeline("lambdalabs/sd-image-variations-diffusers") provide a decoder model than can generate images from clip image embedding.
871
+
872
+ ```python
873
+ import torch
874
+ from diffusers import DiffusionPipeline
875
+
876
+ device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
877
+
878
+ pipeline = DiffusionPipeline.from_pretrained(
879
+ "kakaobrain/karlo-v1-alpha",
880
+ torch_dtype=torch.float16,
881
+ custom_pipeline="stable_unclip",
882
+ decoder_pipe_kwargs=dict(
883
+ image_encoder=None,
884
+ ),
885
+ )
886
+ pipeline.to(device)
887
+
888
+ prompt = "a shiba inu wearing a beret and black turtleneck"
889
+ random_generator = torch.Generator(device=device).manual_seed(1000)
890
+ output = pipeline(
891
+ prompt=prompt,
892
+ width=512,
893
+ height=512,
894
+ generator=random_generator,
895
+ prior_guidance_scale=4,
896
+ prior_num_inference_steps=25,
897
+ decoder_guidance_scale=8,
898
+ decoder_num_inference_steps=50,
899
+ )
900
+
901
+ image = output.images[0]
902
+ image.save("./shiba-inu.jpg")
903
+
904
+ # debug
905
+
906
+ # `pipeline.decoder_pipe` is a regular StableDiffusionImageVariationPipeline instance.
907
+ # It is used to convert clip image embedding to latents, then fed into VAE decoder.
908
+ print(pipeline.decoder_pipe.__class__)
909
+ # <class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline'>
910
+
911
+ # this pipeline only use prior module in "kakaobrain/karlo-v1-alpha"
912
+ # It is used to convert clip text embedding to clip image embedding.
913
+ print(pipeline)
914
+ # StableUnCLIPPipeline {
915
+ # "_class_name": "StableUnCLIPPipeline",
916
+ # "_diffusers_version": "0.12.0.dev0",
917
+ # "prior": [
918
+ # "diffusers",
919
+ # "PriorTransformer"
920
+ # ],
921
+ # "prior_scheduler": [
922
+ # "diffusers",
923
+ # "UnCLIPScheduler"
924
+ # ],
925
+ # "text_encoder": [
926
+ # "transformers",
927
+ # "CLIPTextModelWithProjection"
928
+ # ],
929
+ # "tokenizer": [
930
+ # "transformers",
931
+ # "CLIPTokenizer"
932
+ # ]
933
+ # }
934
+
935
+ # pipeline.prior_scheduler is the scheduler used for prior in UnCLIP.
936
+ print(pipeline.prior_scheduler)
937
+ # UnCLIPScheduler {
938
+ # "_class_name": "UnCLIPScheduler",
939
+ # "_diffusers_version": "0.12.0.dev0",
940
+ # "clip_sample": true,
941
+ # "clip_sample_range": 5.0,
942
+ # "num_train_timesteps": 1000,
943
+ # "prediction_type": "sample",
944
+ # "variance_type": "fixed_small_log"
945
+ # }
946
+ ```
947
+
948
+
949
+ `shiba-inu.jpg`
950
+
951
+
952
+ ![shiba-inu](https://user-images.githubusercontent.com/16448529/209185639-6e5ec794-ce9d-4883-aa29-bd6852a2abad.jpg)
953
+
huggingface_diffusers/examples/community/bit_diffusion.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple, Union
2
+
3
+ import torch
4
+
5
+ from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DConditionModel
6
+ from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
7
+ from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
8
+ from einops import rearrange, reduce
9
+
10
+
11
+ BITS = 8
12
+
13
+
14
+ # convert to bit representations and back taken from https://github.com/lucidrains/bit-diffusion/blob/main/bit_diffusion/bit_diffusion.py
15
+ def decimal_to_bits(x, bits=BITS):
16
+ """expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1"""
17
+ device = x.device
18
+
19
+ x = (x * 255).int().clamp(0, 255)
20
+
21
+ mask = 2 ** torch.arange(bits - 1, -1, -1, device=device)
22
+ mask = rearrange(mask, "d -> d 1 1")
23
+ x = rearrange(x, "b c h w -> b c 1 h w")
24
+
25
+ bits = ((x & mask) != 0).float()
26
+ bits = rearrange(bits, "b c d h w -> b (c d) h w")
27
+ bits = bits * 2 - 1
28
+ return bits
29
+
30
+
31
+ def bits_to_decimal(x, bits=BITS):
32
+ """expects bits from -1 to 1, outputs image tensor from 0 to 1"""
33
+ device = x.device
34
+
35
+ x = (x > 0).int()
36
+ mask = 2 ** torch.arange(bits - 1, -1, -1, device=device, dtype=torch.int32)
37
+
38
+ mask = rearrange(mask, "d -> d 1 1")
39
+ x = rearrange(x, "b (c d) h w -> b c d h w", d=8)
40
+ dec = reduce(x * mask, "b c d h w -> b c h w", "sum")
41
+ return (dec / 255).clamp(0.0, 1.0)
42
+
43
+
44
+ # modified scheduler step functions for clamping the predicted x_0 between -bit_scale and +bit_scale
45
+ def ddim_bit_scheduler_step(
46
+ self,
47
+ model_output: torch.FloatTensor,
48
+ timestep: int,
49
+ sample: torch.FloatTensor,
50
+ eta: float = 0.0,
51
+ use_clipped_model_output: bool = True,
52
+ generator=None,
53
+ return_dict: bool = True,
54
+ ) -> Union[DDIMSchedulerOutput, Tuple]:
55
+ """
56
+ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
57
+ process from the learned model outputs (most often the predicted noise).
58
+ Args:
59
+ model_output (`torch.FloatTensor`): direct output from learned diffusion model.
60
+ timestep (`int`): current discrete timestep in the diffusion chain.
61
+ sample (`torch.FloatTensor`):
62
+ current instance of sample being created by diffusion process.
63
+ eta (`float`): weight of noise for added noise in diffusion step.
64
+ use_clipped_model_output (`bool`): TODO
65
+ generator: random number generator.
66
+ return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class
67
+ Returns:
68
+ [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:
69
+ [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
70
+ returning a tuple, the first element is the sample tensor.
71
+ """
72
+ if self.num_inference_steps is None:
73
+ raise ValueError(
74
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
75
+ )
76
+
77
+ # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
78
+ # Ideally, read DDIM paper in-detail understanding
79
+
80
+ # Notation (<variable name> -> <name in paper>
81
+ # - pred_noise_t -> e_theta(x_t, t)
82
+ # - pred_original_sample -> f_theta(x_t, t) or x_0
83
+ # - std_dev_t -> sigma_t
84
+ # - eta -> η
85
+ # - pred_sample_direction -> "direction pointing to x_t"
86
+ # - pred_prev_sample -> "x_t-1"
87
+
88
+ # 1. get previous step value (=t-1)
89
+ prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
90
+
91
+ # 2. compute alphas, betas
92
+ alpha_prod_t = self.alphas_cumprod[timestep]
93
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
94
+
95
+ beta_prod_t = 1 - alpha_prod_t
96
+
97
+ # 3. compute predicted original sample from predicted noise also called
98
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
99
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
100
+
101
+ # 4. Clip "predicted x_0"
102
+ scale = self.bit_scale
103
+ if self.config.clip_sample:
104
+ pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
105
+
106
+ # 5. compute variance: "sigma_t(η)" -> see formula (16)
107
+ # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
108
+ variance = self._get_variance(timestep, prev_timestep)
109
+ std_dev_t = eta * variance ** (0.5)
110
+
111
+ if use_clipped_model_output:
112
+ # the model_output is always re-derived from the clipped x_0 in Glide
113
+ model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
114
+
115
+ # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
116
+ pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output
117
+
118
+ # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
119
+ prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
120
+
121
+ if eta > 0:
122
+ # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
123
+ device = model_output.device if torch.is_tensor(model_output) else "cpu"
124
+ noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device)
125
+ variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise
126
+
127
+ prev_sample = prev_sample + variance
128
+
129
+ if not return_dict:
130
+ return (prev_sample,)
131
+
132
+ return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
133
+
134
+
135
+ def ddpm_bit_scheduler_step(
136
+ self,
137
+ model_output: torch.FloatTensor,
138
+ timestep: int,
139
+ sample: torch.FloatTensor,
140
+ prediction_type="epsilon",
141
+ generator=None,
142
+ return_dict: bool = True,
143
+ ) -> Union[DDPMSchedulerOutput, Tuple]:
144
+ """
145
+ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
146
+ process from the learned model outputs (most often the predicted noise).
147
+ Args:
148
+ model_output (`torch.FloatTensor`): direct output from learned diffusion model.
149
+ timestep (`int`): current discrete timestep in the diffusion chain.
150
+ sample (`torch.FloatTensor`):
151
+ current instance of sample being created by diffusion process.
152
+ prediction_type (`str`, default `epsilon`):
153
+ indicates whether the model predicts the noise (epsilon), or the samples (`sample`).
154
+ generator: random number generator.
155
+ return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
156
+ Returns:
157
+ [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
158
+ [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
159
+ returning a tuple, the first element is the sample tensor.
160
+ """
161
+ t = timestep
162
+
163
+ if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
164
+ model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
165
+ else:
166
+ predicted_variance = None
167
+
168
+ # 1. compute alphas, betas
169
+ alpha_prod_t = self.alphas_cumprod[t]
170
+ alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
171
+ beta_prod_t = 1 - alpha_prod_t
172
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
173
+
174
+ # 2. compute predicted original sample from predicted noise also called
175
+ # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
176
+ if prediction_type == "epsilon":
177
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
178
+ elif prediction_type == "sample":
179
+ pred_original_sample = model_output
180
+ else:
181
+ raise ValueError(f"Unsupported prediction_type {prediction_type}.")
182
+
183
+ # 3. Clip "predicted x_0"
184
+ scale = self.bit_scale
185
+ if self.config.clip_sample:
186
+ pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
187
+
188
+ # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
189
+ # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
190
+ pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t
191
+ current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t
192
+
193
+ # 5. Compute predicted previous sample µ_t
194
+ # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
195
+ pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
196
+
197
+ # 6. Add noise
198
+ variance = 0
199
+ if t > 0:
200
+ noise = torch.randn(
201
+ model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator
202
+ ).to(model_output.device)
203
+ variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise
204
+
205
+ pred_prev_sample = pred_prev_sample + variance
206
+
207
+ if not return_dict:
208
+ return (pred_prev_sample,)
209
+
210
+ return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
211
+
212
+
213
+ class BitDiffusion(DiffusionPipeline):
214
+ def __init__(
215
+ self,
216
+ unet: UNet2DConditionModel,
217
+ scheduler: Union[DDIMScheduler, DDPMScheduler],
218
+ bit_scale: Optional[float] = 1.0,
219
+ ):
220
+ super().__init__()
221
+ self.bit_scale = bit_scale
222
+ self.scheduler.step = (
223
+ ddim_bit_scheduler_step if isinstance(scheduler, DDIMScheduler) else ddpm_bit_scheduler_step
224
+ )
225
+
226
+ self.register_modules(unet=unet, scheduler=scheduler)
227
+
228
+ @torch.no_grad()
229
+ def __call__(
230
+ self,
231
+ height: Optional[int] = 256,
232
+ width: Optional[int] = 256,
233
+ num_inference_steps: Optional[int] = 50,
234
+ generator: Optional[torch.Generator] = None,
235
+ batch_size: Optional[int] = 1,
236
+ output_type: Optional[str] = "pil",
237
+ return_dict: bool = True,
238
+ **kwargs,
239
+ ) -> Union[Tuple, ImagePipelineOutput]:
240
+ latents = torch.randn(
241
+ (batch_size, self.unet.in_channels, height, width),
242
+ generator=generator,
243
+ )
244
+ latents = decimal_to_bits(latents) * self.bit_scale
245
+ latents = latents.to(self.device)
246
+
247
+ self.scheduler.set_timesteps(num_inference_steps)
248
+
249
+ for t in self.progress_bar(self.scheduler.timesteps):
250
+ # predict the noise residual
251
+ noise_pred = self.unet(latents, t).sample
252
+
253
+ # compute the previous noisy sample x_t -> x_t-1
254
+ latents = self.scheduler.step(noise_pred, t, latents).prev_sample
255
+
256
+ image = bits_to_decimal(latents)
257
+
258
+ if output_type == "pil":
259
+ image = self.numpy_to_pil(image)
260
+
261
+ if not return_dict:
262
+ return (image,)
263
+
264
+ return ImagePipelineOutput(images=image)
huggingface_diffusers/examples/community/checkpoint_merger.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ from typing import Dict, List, Union
4
+
5
+ import torch
6
+
7
+ from diffusers.utils import is_safetensors_available
8
+
9
+
10
+ if is_safetensors_available():
11
+ import safetensors.torch
12
+
13
+ from diffusers import DiffusionPipeline, __version__
14
+ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
15
+ from diffusers.utils import CONFIG_NAME, DIFFUSERS_CACHE, ONNX_WEIGHTS_NAME, WEIGHTS_NAME
16
+ from huggingface_hub import snapshot_download
17
+
18
+
19
+ class CheckpointMergerPipeline(DiffusionPipeline):
20
+ """
21
+ A class that that supports merging diffusion models based on the discussion here:
22
+ https://github.com/huggingface/diffusers/issues/877
23
+
24
+ Example usage:-
25
+
26
+ pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger.py")
27
+
28
+ merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","prompthero/openjourney"], interp = 'inv_sigmoid', alpha = 0.8, force = True)
29
+
30
+ merged_pipe.to('cuda')
31
+
32
+ prompt = "An astronaut riding a unicycle on Mars"
33
+
34
+ results = merged_pipe(prompt)
35
+
36
+ ## For more details, see the docstring for the merge method.
37
+
38
+ """
39
+
40
+ def __init__(self):
41
+ self.register_to_config()
42
+ super().__init__()
43
+
44
+ def _compare_model_configs(self, dict0, dict1):
45
+ if dict0 == dict1:
46
+ return True
47
+ else:
48
+ config0, meta_keys0 = self._remove_meta_keys(dict0)
49
+ config1, meta_keys1 = self._remove_meta_keys(dict1)
50
+ if config0 == config1:
51
+ print(f"Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.")
52
+ return True
53
+ return False
54
+
55
+ def _remove_meta_keys(self, config_dict: Dict):
56
+ meta_keys = []
57
+ temp_dict = config_dict.copy()
58
+ for key in config_dict.keys():
59
+ if key.startswith("_"):
60
+ temp_dict.pop(key)
61
+ meta_keys.append(key)
62
+ return (temp_dict, meta_keys)
63
+
64
+ @torch.no_grad()
65
+ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]], **kwargs):
66
+ """
67
+ Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed
68
+ in the argument 'pretrained_model_name_or_path_list' as a list.
69
+
70
+ Parameters:
71
+ -----------
72
+ pretrained_model_name_or_path_list : A list of valid pretrained model names in the HuggingFace hub or paths to locally stored models in the HuggingFace format.
73
+
74
+ **kwargs:
75
+ Supports all the default DiffusionPipeline.get_config_dict kwargs viz..
76
+
77
+ cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map.
78
+
79
+ alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
80
+ would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
81
+
82
+ interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_difference" and None.
83
+ Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported.
84
+
85
+ force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
86
+
87
+ """
88
+ # Default kwargs from DiffusionPipeline
89
+ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
90
+ resume_download = kwargs.pop("resume_download", False)
91
+ force_download = kwargs.pop("force_download", False)
92
+ proxies = kwargs.pop("proxies", None)
93
+ local_files_only = kwargs.pop("local_files_only", False)
94
+ use_auth_token = kwargs.pop("use_auth_token", None)
95
+ revision = kwargs.pop("revision", None)
96
+ torch_dtype = kwargs.pop("torch_dtype", None)
97
+ device_map = kwargs.pop("device_map", None)
98
+
99
+ alpha = kwargs.pop("alpha", 0.5)
100
+ interp = kwargs.pop("interp", None)
101
+
102
+ print("Received list", pretrained_model_name_or_path_list)
103
+ print(f"Combining with alpha={alpha}, interpolation mode={interp}")
104
+
105
+ checkpoint_count = len(pretrained_model_name_or_path_list)
106
+ # Ignore result from model_index_json comparision of the two checkpoints
107
+ force = kwargs.pop("force", False)
108
+
109
+ # If less than 2 checkpoints, nothing to merge. If more than 3, not supported for now.
110
+ if checkpoint_count > 3 or checkpoint_count < 2:
111
+ raise ValueError(
112
+ "Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being"
113
+ " passed."
114
+ )
115
+
116
+ print("Received the right number of checkpoints")
117
+ # chkpt0, chkpt1 = pretrained_model_name_or_path_list[0:2]
118
+ # chkpt2 = pretrained_model_name_or_path_list[2] if checkpoint_count == 3 else None
119
+
120
+ # Validate that the checkpoints can be merged
121
+ # Step 1: Load the model config and compare the checkpoints. We'll compare the model_index.json first while ignoring the keys starting with '_'
122
+ config_dicts = []
123
+ for pretrained_model_name_or_path in pretrained_model_name_or_path_list:
124
+ config_dict = DiffusionPipeline.load_config(
125
+ pretrained_model_name_or_path,
126
+ cache_dir=cache_dir,
127
+ resume_download=resume_download,
128
+ force_download=force_download,
129
+ proxies=proxies,
130
+ local_files_only=local_files_only,
131
+ use_auth_token=use_auth_token,
132
+ revision=revision,
133
+ )
134
+ config_dicts.append(config_dict)
135
+
136
+ comparison_result = True
137
+ for idx in range(1, len(config_dicts)):
138
+ comparison_result &= self._compare_model_configs(config_dicts[idx - 1], config_dicts[idx])
139
+ if not force and comparison_result is False:
140
+ raise ValueError("Incompatible checkpoints. Please check model_index.json for the models.")
141
+ print(config_dicts[0], config_dicts[1])
142
+ print("Compatible model_index.json files found")
143
+ # Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files.
144
+ cached_folders = []
145
+ for pretrained_model_name_or_path, config_dict in zip(pretrained_model_name_or_path_list, config_dicts):
146
+ folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
147
+ allow_patterns = [os.path.join(k, "*") for k in folder_names]
148
+ allow_patterns += [
149
+ WEIGHTS_NAME,
150
+ SCHEDULER_CONFIG_NAME,
151
+ CONFIG_NAME,
152
+ ONNX_WEIGHTS_NAME,
153
+ DiffusionPipeline.config_name,
154
+ ]
155
+ requested_pipeline_class = config_dict.get("_class_name")
156
+ user_agent = {"diffusers": __version__, "pipeline_class": requested_pipeline_class}
157
+
158
+ cached_folder = (
159
+ pretrained_model_name_or_path
160
+ if os.path.isdir(pretrained_model_name_or_path)
161
+ else snapshot_download(
162
+ pretrained_model_name_or_path,
163
+ cache_dir=cache_dir,
164
+ resume_download=resume_download,
165
+ proxies=proxies,
166
+ local_files_only=local_files_only,
167
+ use_auth_token=use_auth_token,
168
+ revision=revision,
169
+ allow_patterns=allow_patterns,
170
+ user_agent=user_agent,
171
+ )
172
+ )
173
+ print("Cached Folder", cached_folder)
174
+ cached_folders.append(cached_folder)
175
+
176
+ # Step 3:-
177
+ # Load the first checkpoint as a diffusion pipeline and modify its module state_dict in place
178
+ final_pipe = DiffusionPipeline.from_pretrained(
179
+ cached_folders[0], torch_dtype=torch_dtype, device_map=device_map
180
+ )
181
+ final_pipe.to(self.device)
182
+
183
+ checkpoint_path_2 = None
184
+ if len(cached_folders) > 2:
185
+ checkpoint_path_2 = os.path.join(cached_folders[2])
186
+
187
+ if interp == "sigmoid":
188
+ theta_func = CheckpointMergerPipeline.sigmoid
189
+ elif interp == "inv_sigmoid":
190
+ theta_func = CheckpointMergerPipeline.inv_sigmoid
191
+ elif interp == "add_diff":
192
+ theta_func = CheckpointMergerPipeline.add_difference
193
+ else:
194
+ theta_func = CheckpointMergerPipeline.weighted_sum
195
+
196
+ # Find each module's state dict.
197
+ for attr in final_pipe.config.keys():
198
+ if not attr.startswith("_"):
199
+ checkpoint_path_1 = os.path.join(cached_folders[1], attr)
200
+ if os.path.exists(checkpoint_path_1):
201
+ files = list(
202
+ (
203
+ *glob.glob(os.path.join(checkpoint_path_1, "*.safetensors")),
204
+ *glob.glob(os.path.join(checkpoint_path_1, "*.bin")),
205
+ )
206
+ )
207
+ checkpoint_path_1 = files[0] if len(files) > 0 else None
208
+ if checkpoint_path_2 is not None and os.path.exists(checkpoint_path_2):
209
+ files = list(
210
+ (
211
+ *glob.glob(os.path.join(checkpoint_path_2, "*.safetensors")),
212
+ *glob.glob(os.path.join(checkpoint_path_2, "*.bin")),
213
+ )
214
+ )
215
+ checkpoint_path_2 = files[0] if len(files) > 0 else None
216
+ # For an attr if both checkpoint_path_1 and 2 are None, ignore.
217
+ # If atleast one is present, deal with it according to interp method, of course only if the state_dict keys match.
218
+ if checkpoint_path_1 is None and checkpoint_path_2 is None:
219
+ print(f"Skipping {attr}: not present in 2nd or 3d model")
220
+ continue
221
+ try:
222
+ module = getattr(final_pipe, attr)
223
+ if isinstance(module, bool): # ignore requires_safety_checker boolean
224
+ continue
225
+ theta_0 = getattr(module, "state_dict")
226
+ theta_0 = theta_0()
227
+
228
+ update_theta_0 = getattr(module, "load_state_dict")
229
+ theta_1 = (
230
+ safetensors.torch.load_file(checkpoint_path_1)
231
+ if (is_safetensors_available() and checkpoint_path_1.endswith(".safetensors"))
232
+ else torch.load(checkpoint_path_1, map_location="cpu")
233
+ )
234
+ theta_2 = None
235
+ if checkpoint_path_2:
236
+ theta_2 = (
237
+ safetensors.torch.load_file(checkpoint_path_2)
238
+ if (is_safetensors_available() and checkpoint_path_2.endswith(".safetensors"))
239
+ else torch.load(checkpoint_path_2, map_location="cpu")
240
+ )
241
+
242
+ if not theta_0.keys() == theta_1.keys():
243
+ print(f"Skipping {attr}: key mismatch")
244
+ continue
245
+ if theta_2 and not theta_1.keys() == theta_2.keys():
246
+ print(f"Skipping {attr}:y mismatch")
247
+ except Exception as e:
248
+ print(f"Skipping {attr} do to an unexpected error: {str(e)}")
249
+ continue
250
+ print(f"MERGING {attr}")
251
+
252
+ for key in theta_0.keys():
253
+ if theta_2:
254
+ theta_0[key] = theta_func(theta_0[key], theta_1[key], theta_2[key], alpha)
255
+ else:
256
+ theta_0[key] = theta_func(theta_0[key], theta_1[key], None, alpha)
257
+
258
+ del theta_1
259
+ del theta_2
260
+ update_theta_0(theta_0)
261
+
262
+ del theta_0
263
+ return final_pipe
264
+
265
+ @staticmethod
266
+ def weighted_sum(theta0, theta1, theta2, alpha):
267
+ return ((1 - alpha) * theta0) + (alpha * theta1)
268
+
269
+ # Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
270
+ @staticmethod
271
+ def sigmoid(theta0, theta1, theta2, alpha):
272
+ alpha = alpha * alpha * (3 - (2 * alpha))
273
+ return theta0 + ((theta1 - theta0) * alpha)
274
+
275
+ # Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
276
+ @staticmethod
277
+ def inv_sigmoid(theta0, theta1, theta2, alpha):
278
+ import math
279
+
280
+ alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
281
+ return theta0 + ((theta1 - theta0) * alpha)
282
+
283
+ @staticmethod
284
+ def add_difference(theta0, theta1, theta2, alpha):
285
+ return theta0 + (theta1 - theta2) * (1.0 - alpha)
huggingface_diffusers/examples/community/clip_guided_stable_diffusion.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import List, Optional, Union
3
+
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+
8
+ from diffusers import (
9
+ AutoencoderKL,
10
+ DDIMScheduler,
11
+ DiffusionPipeline,
12
+ LMSDiscreteScheduler,
13
+ PNDMScheduler,
14
+ UNet2DConditionModel,
15
+ )
16
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
17
+ from torchvision import transforms
18
+ from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
19
+
20
+
21
+ class MakeCutouts(nn.Module):
22
+ def __init__(self, cut_size, cut_power=1.0):
23
+ super().__init__()
24
+
25
+ self.cut_size = cut_size
26
+ self.cut_power = cut_power
27
+
28
+ def forward(self, pixel_values, num_cutouts):
29
+ sideY, sideX = pixel_values.shape[2:4]
30
+ max_size = min(sideX, sideY)
31
+ min_size = min(sideX, sideY, self.cut_size)
32
+ cutouts = []
33
+ for _ in range(num_cutouts):
34
+ size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
35
+ offsetx = torch.randint(0, sideX - size + 1, ())
36
+ offsety = torch.randint(0, sideY - size + 1, ())
37
+ cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
38
+ cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
39
+ return torch.cat(cutouts)
40
+
41
+
42
+ def spherical_dist_loss(x, y):
43
+ x = F.normalize(x, dim=-1)
44
+ y = F.normalize(y, dim=-1)
45
+ return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
46
+
47
+
48
+ def set_requires_grad(model, value):
49
+ for param in model.parameters():
50
+ param.requires_grad = value
51
+
52
+
53
+ class CLIPGuidedStableDiffusion(DiffusionPipeline):
54
+ """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
55
+ - https://github.com/Jack000/glid-3-xl
56
+ - https://github.dev/crowsonkb/k-diffusion
57
+ """
58
+
59
+ def __init__(
60
+ self,
61
+ vae: AutoencoderKL,
62
+ text_encoder: CLIPTextModel,
63
+ clip_model: CLIPModel,
64
+ tokenizer: CLIPTokenizer,
65
+ unet: UNet2DConditionModel,
66
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler],
67
+ feature_extractor: CLIPFeatureExtractor,
68
+ ):
69
+ super().__init__()
70
+ self.register_modules(
71
+ vae=vae,
72
+ text_encoder=text_encoder,
73
+ clip_model=clip_model,
74
+ tokenizer=tokenizer,
75
+ unet=unet,
76
+ scheduler=scheduler,
77
+ feature_extractor=feature_extractor,
78
+ )
79
+
80
+ self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
81
+ self.cut_out_size = (
82
+ feature_extractor.size
83
+ if isinstance(feature_extractor.size, int)
84
+ else feature_extractor.size["shortest_edge"]
85
+ )
86
+ self.make_cutouts = MakeCutouts(self.cut_out_size)
87
+
88
+ set_requires_grad(self.text_encoder, False)
89
+ set_requires_grad(self.clip_model, False)
90
+
91
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
92
+ if slice_size == "auto":
93
+ # half the attention head size is usually a good trade-off between
94
+ # speed and memory
95
+ slice_size = self.unet.config.attention_head_dim // 2
96
+ self.unet.set_attention_slice(slice_size)
97
+
98
+ def disable_attention_slicing(self):
99
+ self.enable_attention_slicing(None)
100
+
101
+ def freeze_vae(self):
102
+ set_requires_grad(self.vae, False)
103
+
104
+ def unfreeze_vae(self):
105
+ set_requires_grad(self.vae, True)
106
+
107
+ def freeze_unet(self):
108
+ set_requires_grad(self.unet, False)
109
+
110
+ def unfreeze_unet(self):
111
+ set_requires_grad(self.unet, True)
112
+
113
+ @torch.enable_grad()
114
+ def cond_fn(
115
+ self,
116
+ latents,
117
+ timestep,
118
+ index,
119
+ text_embeddings,
120
+ noise_pred_original,
121
+ text_embeddings_clip,
122
+ clip_guidance_scale,
123
+ num_cutouts,
124
+ use_cutouts=True,
125
+ ):
126
+ latents = latents.detach().requires_grad_()
127
+
128
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
129
+ sigma = self.scheduler.sigmas[index]
130
+ # the model input needs to be scaled to match the continuous ODE formulation in K-LMS
131
+ latent_model_input = latents / ((sigma**2 + 1) ** 0.5)
132
+ else:
133
+ latent_model_input = latents
134
+
135
+ # predict the noise residual
136
+ noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
137
+
138
+ if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler)):
139
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
140
+ beta_prod_t = 1 - alpha_prod_t
141
+ # compute predicted original sample from predicted noise also called
142
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
143
+ pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
144
+
145
+ fac = torch.sqrt(beta_prod_t)
146
+ sample = pred_original_sample * (fac) + latents * (1 - fac)
147
+ elif isinstance(self.scheduler, LMSDiscreteScheduler):
148
+ sigma = self.scheduler.sigmas[index]
149
+ sample = latents - sigma * noise_pred
150
+ else:
151
+ raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
152
+
153
+ sample = 1 / self.vae.config.scaling_factor * sample
154
+ image = self.vae.decode(sample).sample
155
+ image = (image / 2 + 0.5).clamp(0, 1)
156
+
157
+ if use_cutouts:
158
+ image = self.make_cutouts(image, num_cutouts)
159
+ else:
160
+ image = transforms.Resize(self.cut_out_size)(image)
161
+ image = self.normalize(image).to(latents.dtype)
162
+
163
+ image_embeddings_clip = self.clip_model.get_image_features(image)
164
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
165
+
166
+ if use_cutouts:
167
+ dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
168
+ dists = dists.view([num_cutouts, sample.shape[0], -1])
169
+ loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
170
+ else:
171
+ loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
172
+
173
+ grads = -torch.autograd.grad(loss, latents)[0]
174
+
175
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
176
+ latents = latents.detach() + grads * (sigma**2)
177
+ noise_pred = noise_pred_original
178
+ else:
179
+ noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
180
+ return noise_pred, latents
181
+
182
+ @torch.no_grad()
183
+ def __call__(
184
+ self,
185
+ prompt: Union[str, List[str]],
186
+ height: Optional[int] = 512,
187
+ width: Optional[int] = 512,
188
+ num_inference_steps: Optional[int] = 50,
189
+ guidance_scale: Optional[float] = 7.5,
190
+ num_images_per_prompt: Optional[int] = 1,
191
+ eta: float = 0.0,
192
+ clip_guidance_scale: Optional[float] = 100,
193
+ clip_prompt: Optional[Union[str, List[str]]] = None,
194
+ num_cutouts: Optional[int] = 4,
195
+ use_cutouts: Optional[bool] = True,
196
+ generator: Optional[torch.Generator] = None,
197
+ latents: Optional[torch.FloatTensor] = None,
198
+ output_type: Optional[str] = "pil",
199
+ return_dict: bool = True,
200
+ ):
201
+ if isinstance(prompt, str):
202
+ batch_size = 1
203
+ elif isinstance(prompt, list):
204
+ batch_size = len(prompt)
205
+ else:
206
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
207
+
208
+ if height % 8 != 0 or width % 8 != 0:
209
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
210
+
211
+ # get prompt text embeddings
212
+ text_input = self.tokenizer(
213
+ prompt,
214
+ padding="max_length",
215
+ max_length=self.tokenizer.model_max_length,
216
+ truncation=True,
217
+ return_tensors="pt",
218
+ )
219
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
220
+ # duplicate text embeddings for each generation per prompt
221
+ text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
222
+
223
+ if clip_guidance_scale > 0:
224
+ if clip_prompt is not None:
225
+ clip_text_input = self.tokenizer(
226
+ clip_prompt,
227
+ padding="max_length",
228
+ max_length=self.tokenizer.model_max_length,
229
+ truncation=True,
230
+ return_tensors="pt",
231
+ ).input_ids.to(self.device)
232
+ else:
233
+ clip_text_input = text_input.input_ids.to(self.device)
234
+ text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
235
+ text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
236
+ # duplicate text embeddings clip for each generation per prompt
237
+ text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
238
+
239
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
240
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
241
+ # corresponds to doing no classifier free guidance.
242
+ do_classifier_free_guidance = guidance_scale > 1.0
243
+ # get unconditional embeddings for classifier free guidance
244
+ if do_classifier_free_guidance:
245
+ max_length = text_input.input_ids.shape[-1]
246
+ uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
247
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
248
+ # duplicate unconditional embeddings for each generation per prompt
249
+ uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
250
+
251
+ # For classifier free guidance, we need to do two forward passes.
252
+ # Here we concatenate the unconditional and text embeddings into a single batch
253
+ # to avoid doing two forward passes
254
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
255
+
256
+ # get the initial random noise unless the user supplied it
257
+
258
+ # Unlike in other pipelines, latents need to be generated in the target device
259
+ # for 1-to-1 results reproducibility with the CompVis implementation.
260
+ # However this currently doesn't work in `mps`.
261
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)
262
+ latents_dtype = text_embeddings.dtype
263
+ if latents is None:
264
+ if self.device.type == "mps":
265
+ # randn does not work reproducibly on mps
266
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
267
+ self.device
268
+ )
269
+ else:
270
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
271
+ else:
272
+ if latents.shape != latents_shape:
273
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
274
+ latents = latents.to(self.device)
275
+
276
+ # set timesteps
277
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
278
+ extra_set_kwargs = {}
279
+ if accepts_offset:
280
+ extra_set_kwargs["offset"] = 1
281
+
282
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
283
+
284
+ # Some schedulers like PNDM have timesteps as arrays
285
+ # It's more optimized to move all timesteps to correct device beforehand
286
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
287
+
288
+ # scale the initial noise by the standard deviation required by the scheduler
289
+ latents = latents * self.scheduler.init_noise_sigma
290
+
291
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
292
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
293
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
294
+ # and should be between [0, 1]
295
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
296
+ extra_step_kwargs = {}
297
+ if accepts_eta:
298
+ extra_step_kwargs["eta"] = eta
299
+
300
+ # check if the scheduler accepts generator
301
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
302
+ if accepts_generator:
303
+ extra_step_kwargs["generator"] = generator
304
+
305
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
306
+ # expand the latents if we are doing classifier free guidance
307
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
308
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
309
+
310
+ # predict the noise residual
311
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
312
+
313
+ # perform classifier free guidance
314
+ if do_classifier_free_guidance:
315
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
316
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
317
+
318
+ # perform clip guidance
319
+ if clip_guidance_scale > 0:
320
+ text_embeddings_for_guidance = (
321
+ text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
322
+ )
323
+ noise_pred, latents = self.cond_fn(
324
+ latents,
325
+ t,
326
+ i,
327
+ text_embeddings_for_guidance,
328
+ noise_pred,
329
+ text_embeddings_clip,
330
+ clip_guidance_scale,
331
+ num_cutouts,
332
+ use_cutouts,
333
+ )
334
+
335
+ # compute the previous noisy sample x_t -> x_t-1
336
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
337
+
338
+ # scale and decode the image latents with vae
339
+ latents = 1 / self.vae.config.scaling_factor * latents
340
+ image = self.vae.decode(latents).sample
341
+
342
+ image = (image / 2 + 0.5).clamp(0, 1)
343
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
344
+
345
+ if output_type == "pil":
346
+ image = self.numpy_to_pil(image)
347
+
348
+ if not return_dict:
349
+ return (image, None)
350
+
351
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
huggingface_diffusers/examples/community/composable_stable_diffusion.py ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Callable, List, Optional, Union
17
+
18
+ import torch
19
+
20
+ from diffusers import DiffusionPipeline
21
+ from diffusers.configuration_utils import FrozenDict
22
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
23
+ from diffusers.schedulers import (
24
+ DDIMScheduler,
25
+ DPMSolverMultistepScheduler,
26
+ EulerAncestralDiscreteScheduler,
27
+ EulerDiscreteScheduler,
28
+ LMSDiscreteScheduler,
29
+ PNDMScheduler,
30
+ )
31
+ from diffusers.utils import is_accelerate_available
32
+ from packaging import version
33
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
34
+
35
+ from ...utils import deprecate, logging
36
+ from . import StableDiffusionPipelineOutput
37
+ from .safety_checker import StableDiffusionSafetyChecker
38
+
39
+
40
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
41
+
42
+
43
+ class ComposableStableDiffusionPipeline(DiffusionPipeline):
44
+ r"""
45
+ Pipeline for text-to-image generation using Stable Diffusion.
46
+
47
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
48
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
49
+
50
+ Args:
51
+ vae ([`AutoencoderKL`]):
52
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
53
+ text_encoder ([`CLIPTextModel`]):
54
+ Frozen text-encoder. Stable Diffusion uses the text portion of
55
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
56
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
57
+ tokenizer (`CLIPTokenizer`):
58
+ Tokenizer of class
59
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
60
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
61
+ scheduler ([`SchedulerMixin`]):
62
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
63
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
64
+ safety_checker ([`StableDiffusionSafetyChecker`]):
65
+ Classification module that estimates whether generated images could be considered offensive or harmful.
66
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
67
+ feature_extractor ([`CLIPFeatureExtractor`]):
68
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
69
+ """
70
+
71
+ _optional_components = ["safety_checker", "feature_extractor"]
72
+
73
+ def __init__(
74
+ self,
75
+ vae: AutoencoderKL,
76
+ text_encoder: CLIPTextModel,
77
+ tokenizer: CLIPTokenizer,
78
+ unet: UNet2DConditionModel,
79
+ scheduler: Union[
80
+ DDIMScheduler,
81
+ PNDMScheduler,
82
+ LMSDiscreteScheduler,
83
+ EulerDiscreteScheduler,
84
+ EulerAncestralDiscreteScheduler,
85
+ DPMSolverMultistepScheduler,
86
+ ],
87
+ safety_checker: StableDiffusionSafetyChecker,
88
+ feature_extractor: CLIPFeatureExtractor,
89
+ requires_safety_checker: bool = True,
90
+ ):
91
+ super().__init__()
92
+
93
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
94
+ deprecation_message = (
95
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
96
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
97
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
98
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
99
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
100
+ " file"
101
+ )
102
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
103
+ new_config = dict(scheduler.config)
104
+ new_config["steps_offset"] = 1
105
+ scheduler._internal_dict = FrozenDict(new_config)
106
+
107
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
108
+ deprecation_message = (
109
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
110
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
111
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
112
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
113
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
114
+ )
115
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
116
+ new_config = dict(scheduler.config)
117
+ new_config["clip_sample"] = False
118
+ scheduler._internal_dict = FrozenDict(new_config)
119
+
120
+ if safety_checker is None and requires_safety_checker:
121
+ logger.warning(
122
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
123
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
124
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
125
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
126
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
127
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
128
+ )
129
+
130
+ if safety_checker is not None and feature_extractor is None:
131
+ raise ValueError(
132
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
133
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
134
+ )
135
+
136
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
137
+ version.parse(unet.config._diffusers_version).base_version
138
+ ) < version.parse("0.9.0.dev0")
139
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
140
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
141
+ deprecation_message = (
142
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
143
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
144
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
145
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
146
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
147
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
148
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
149
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
150
+ " the `unet/config.json` file"
151
+ )
152
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
153
+ new_config = dict(unet.config)
154
+ new_config["sample_size"] = 64
155
+ unet._internal_dict = FrozenDict(new_config)
156
+
157
+ self.register_modules(
158
+ vae=vae,
159
+ text_encoder=text_encoder,
160
+ tokenizer=tokenizer,
161
+ unet=unet,
162
+ scheduler=scheduler,
163
+ safety_checker=safety_checker,
164
+ feature_extractor=feature_extractor,
165
+ )
166
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
167
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
168
+
169
+ def enable_vae_slicing(self):
170
+ r"""
171
+ Enable sliced VAE decoding.
172
+
173
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
174
+ steps. This is useful to save some memory and allow larger batch sizes.
175
+ """
176
+ self.vae.enable_slicing()
177
+
178
+ def disable_vae_slicing(self):
179
+ r"""
180
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
181
+ computing decoding in one step.
182
+ """
183
+ self.vae.disable_slicing()
184
+
185
+ def enable_sequential_cpu_offload(self, gpu_id=0):
186
+ r"""
187
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
188
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
189
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
190
+ """
191
+ if is_accelerate_available():
192
+ from accelerate import cpu_offload
193
+ else:
194
+ raise ImportError("Please install accelerate via `pip install accelerate`")
195
+
196
+ device = torch.device(f"cuda:{gpu_id}")
197
+
198
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
199
+ if cpu_offloaded_model is not None:
200
+ cpu_offload(cpu_offloaded_model, device)
201
+
202
+ if self.safety_checker is not None:
203
+ # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate
204
+ # fix by only offloading self.safety_checker for now
205
+ cpu_offload(self.safety_checker.vision_model, device)
206
+
207
+ @property
208
+ def _execution_device(self):
209
+ r"""
210
+ Returns the device on which the pipeline's models will be executed. After calling
211
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
212
+ hooks.
213
+ """
214
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
215
+ return self.device
216
+ for module in self.unet.modules():
217
+ if (
218
+ hasattr(module, "_hf_hook")
219
+ and hasattr(module._hf_hook, "execution_device")
220
+ and module._hf_hook.execution_device is not None
221
+ ):
222
+ return torch.device(module._hf_hook.execution_device)
223
+ return self.device
224
+
225
+ def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
226
+ r"""
227
+ Encodes the prompt into text encoder hidden states.
228
+
229
+ Args:
230
+ prompt (`str` or `list(int)`):
231
+ prompt to be encoded
232
+ device: (`torch.device`):
233
+ torch device
234
+ num_images_per_prompt (`int`):
235
+ number of images that should be generated per prompt
236
+ do_classifier_free_guidance (`bool`):
237
+ whether to use classifier free guidance or not
238
+ negative_prompt (`str` or `List[str]`):
239
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
240
+ if `guidance_scale` is less than `1`).
241
+ """
242
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
243
+
244
+ text_inputs = self.tokenizer(
245
+ prompt,
246
+ padding="max_length",
247
+ max_length=self.tokenizer.model_max_length,
248
+ truncation=True,
249
+ return_tensors="pt",
250
+ )
251
+ text_input_ids = text_inputs.input_ids
252
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
253
+
254
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
255
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
256
+ logger.warning(
257
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
258
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
259
+ )
260
+
261
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
262
+ attention_mask = text_inputs.attention_mask.to(device)
263
+ else:
264
+ attention_mask = None
265
+
266
+ text_embeddings = self.text_encoder(
267
+ text_input_ids.to(device),
268
+ attention_mask=attention_mask,
269
+ )
270
+ text_embeddings = text_embeddings[0]
271
+
272
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
273
+ bs_embed, seq_len, _ = text_embeddings.shape
274
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
275
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
276
+
277
+ # get unconditional embeddings for classifier free guidance
278
+ if do_classifier_free_guidance:
279
+ uncond_tokens: List[str]
280
+ if negative_prompt is None:
281
+ uncond_tokens = [""] * batch_size
282
+ elif type(prompt) is not type(negative_prompt):
283
+ raise TypeError(
284
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
285
+ f" {type(prompt)}."
286
+ )
287
+ elif isinstance(negative_prompt, str):
288
+ uncond_tokens = [negative_prompt]
289
+ elif batch_size != len(negative_prompt):
290
+ raise ValueError(
291
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
292
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
293
+ " the batch size of `prompt`."
294
+ )
295
+ else:
296
+ uncond_tokens = negative_prompt
297
+
298
+ max_length = text_input_ids.shape[-1]
299
+ uncond_input = self.tokenizer(
300
+ uncond_tokens,
301
+ padding="max_length",
302
+ max_length=max_length,
303
+ truncation=True,
304
+ return_tensors="pt",
305
+ )
306
+
307
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
308
+ attention_mask = uncond_input.attention_mask.to(device)
309
+ else:
310
+ attention_mask = None
311
+
312
+ uncond_embeddings = self.text_encoder(
313
+ uncond_input.input_ids.to(device),
314
+ attention_mask=attention_mask,
315
+ )
316
+ uncond_embeddings = uncond_embeddings[0]
317
+
318
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
319
+ seq_len = uncond_embeddings.shape[1]
320
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
321
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
322
+
323
+ # For classifier free guidance, we need to do two forward passes.
324
+ # Here we concatenate the unconditional and text embeddings into a single batch
325
+ # to avoid doing two forward passes
326
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
327
+
328
+ return text_embeddings
329
+
330
+ def run_safety_checker(self, image, device, dtype):
331
+ if self.safety_checker is not None:
332
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
333
+ image, has_nsfw_concept = self.safety_checker(
334
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
335
+ )
336
+ else:
337
+ has_nsfw_concept = None
338
+ return image, has_nsfw_concept
339
+
340
+ def decode_latents(self, latents):
341
+ latents = 1 / 0.18215 * latents
342
+ image = self.vae.decode(latents).sample
343
+ image = (image / 2 + 0.5).clamp(0, 1)
344
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
345
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
346
+ return image
347
+
348
+ def prepare_extra_step_kwargs(self, generator, eta):
349
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
350
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
351
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
352
+ # and should be between [0, 1]
353
+
354
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
355
+ extra_step_kwargs = {}
356
+ if accepts_eta:
357
+ extra_step_kwargs["eta"] = eta
358
+
359
+ # check if the scheduler accepts generator
360
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
361
+ if accepts_generator:
362
+ extra_step_kwargs["generator"] = generator
363
+ return extra_step_kwargs
364
+
365
+ def check_inputs(self, prompt, height, width, callback_steps):
366
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
367
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
368
+
369
+ if height % 8 != 0 or width % 8 != 0:
370
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
371
+
372
+ if (callback_steps is None) or (
373
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
374
+ ):
375
+ raise ValueError(
376
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
377
+ f" {type(callback_steps)}."
378
+ )
379
+
380
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
381
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
382
+ if latents is None:
383
+ if device.type == "mps":
384
+ # randn does not work reproducibly on mps
385
+ latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
386
+ else:
387
+ latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
388
+ else:
389
+ if latents.shape != shape:
390
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
391
+ latents = latents.to(device)
392
+
393
+ # scale the initial noise by the standard deviation required by the scheduler
394
+ latents = latents * self.scheduler.init_noise_sigma
395
+ return latents
396
+
397
+ @torch.no_grad()
398
+ def __call__(
399
+ self,
400
+ prompt: Union[str, List[str]],
401
+ height: Optional[int] = None,
402
+ width: Optional[int] = None,
403
+ num_inference_steps: int = 50,
404
+ guidance_scale: float = 7.5,
405
+ negative_prompt: Optional[Union[str, List[str]]] = None,
406
+ num_images_per_prompt: Optional[int] = 1,
407
+ eta: float = 0.0,
408
+ generator: Optional[torch.Generator] = None,
409
+ latents: Optional[torch.FloatTensor] = None,
410
+ output_type: Optional[str] = "pil",
411
+ return_dict: bool = True,
412
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
413
+ callback_steps: Optional[int] = 1,
414
+ weights: Optional[str] = "",
415
+ ):
416
+ r"""
417
+ Function invoked when calling the pipeline for generation.
418
+
419
+ Args:
420
+ prompt (`str` or `List[str]`):
421
+ The prompt or prompts to guide the image generation.
422
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
423
+ The height in pixels of the generated image.
424
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
425
+ The width in pixels of the generated image.
426
+ num_inference_steps (`int`, *optional*, defaults to 50):
427
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
428
+ expense of slower inference.
429
+ guidance_scale (`float`, *optional*, defaults to 7.5):
430
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
431
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
432
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
433
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
434
+ usually at the expense of lower image quality.
435
+ negative_prompt (`str` or `List[str]`, *optional*):
436
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
437
+ if `guidance_scale` is less than `1`).
438
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
439
+ The number of images to generate per prompt.
440
+ eta (`float`, *optional*, defaults to 0.0):
441
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
442
+ [`schedulers.DDIMScheduler`], will be ignored for others.
443
+ generator (`torch.Generator`, *optional*):
444
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
445
+ deterministic.
446
+ latents (`torch.FloatTensor`, *optional*):
447
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
448
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
449
+ tensor will ge generated by sampling using the supplied random `generator`.
450
+ output_type (`str`, *optional*, defaults to `"pil"`):
451
+ The output format of the generate image. Choose between
452
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
453
+ return_dict (`bool`, *optional*, defaults to `True`):
454
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
455
+ plain tuple.
456
+ callback (`Callable`, *optional*):
457
+ A function that will be called every `callback_steps` steps during inference. The function will be
458
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
459
+ callback_steps (`int`, *optional*, defaults to 1):
460
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
461
+ called at every step.
462
+
463
+ Returns:
464
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
465
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
466
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
467
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
468
+ (nsfw) content, according to the `safety_checker`.
469
+ """
470
+ # 0. Default height and width to unet
471
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
472
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
473
+
474
+ # 1. Check inputs. Raise error if not correct
475
+ self.check_inputs(prompt, height, width, callback_steps)
476
+
477
+ # 2. Define call parameters
478
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
479
+ device = self._execution_device
480
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
481
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
482
+ # corresponds to doing no classifier free guidance.
483
+ do_classifier_free_guidance = guidance_scale > 1.0
484
+
485
+ if "|" in prompt:
486
+ prompt = [x.strip() for x in prompt.split("|")]
487
+ print(f"composing {prompt}...")
488
+
489
+ if not weights:
490
+ # specify weights for prompts (excluding the unconditional score)
491
+ print("using equal positive weights (conjunction) for all prompts...")
492
+ weights = torch.tensor([guidance_scale] * len(prompt), device=self.device).reshape(-1, 1, 1, 1)
493
+ else:
494
+ # set prompt weight for each
495
+ num_prompts = len(prompt) if isinstance(prompt, list) else 1
496
+ weights = [float(w.strip()) for w in weights.split("|")]
497
+ # guidance scale as the default
498
+ if len(weights) < num_prompts:
499
+ weights.append(guidance_scale)
500
+ else:
501
+ weights = weights[:num_prompts]
502
+ assert len(weights) == len(prompt), "weights specified are not equal to the number of prompts"
503
+ weights = torch.tensor(weights, device=self.device).reshape(-1, 1, 1, 1)
504
+ else:
505
+ weights = guidance_scale
506
+
507
+ # 3. Encode input prompt
508
+ text_embeddings = self._encode_prompt(
509
+ prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
510
+ )
511
+
512
+ # 4. Prepare timesteps
513
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
514
+ timesteps = self.scheduler.timesteps
515
+
516
+ # 5. Prepare latent variables
517
+ num_channels_latents = self.unet.in_channels
518
+ latents = self.prepare_latents(
519
+ batch_size * num_images_per_prompt,
520
+ num_channels_latents,
521
+ height,
522
+ width,
523
+ text_embeddings.dtype,
524
+ device,
525
+ generator,
526
+ latents,
527
+ )
528
+
529
+ # composable diffusion
530
+ if isinstance(prompt, list) and batch_size == 1:
531
+ # remove extra unconditional embedding
532
+ # N = one unconditional embed + conditional embeds
533
+ text_embeddings = text_embeddings[len(prompt) - 1 :]
534
+
535
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
536
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
537
+
538
+ # 7. Denoising loop
539
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
540
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
541
+ for i, t in enumerate(timesteps):
542
+ # expand the latents if we are doing classifier free guidance
543
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
544
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
545
+
546
+ # predict the noise residual
547
+ noise_pred = []
548
+ for j in range(text_embeddings.shape[0]):
549
+ noise_pred.append(
550
+ self.unet(latent_model_input[:1], t, encoder_hidden_states=text_embeddings[j : j + 1]).sample
551
+ )
552
+ noise_pred = torch.cat(noise_pred, dim=0)
553
+
554
+ # perform guidance
555
+ if do_classifier_free_guidance:
556
+ noise_pred_uncond, noise_pred_text = noise_pred[:1], noise_pred[1:]
557
+ noise_pred = noise_pred_uncond + (weights * (noise_pred_text - noise_pred_uncond)).sum(
558
+ dim=0, keepdims=True
559
+ )
560
+
561
+ # compute the previous noisy sample x_t -> x_t-1
562
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
563
+
564
+ # call the callback, if provided
565
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
566
+ progress_bar.update()
567
+ if callback is not None and i % callback_steps == 0:
568
+ callback(i, t, latents)
569
+
570
+ # 8. Post-processing
571
+ image = self.decode_latents(latents)
572
+
573
+ # 9. Run safety checker
574
+ image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
575
+
576
+ # 10. Convert to PIL
577
+ if output_type == "pil":
578
+ image = self.numpy_to_pil(image)
579
+
580
+ if not return_dict:
581
+ return (image, has_nsfw_concept)
582
+
583
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
huggingface_diffusers/examples/community/imagic_stable_diffusion.py ADDED
@@ -0,0 +1,502 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ modeled after the textual_inversion.py / train_dreambooth.py and the work
3
+ of justinpinkney here: https://github.com/justinpinkney/stable-diffusion/blob/main/notebooks/imagic.ipynb
4
+ """
5
+
6
+ import inspect
7
+ import warnings
8
+ from typing import List, Optional, Union
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn.functional as F
13
+
14
+ import PIL
15
+ from accelerate import Accelerator
16
+ from diffusers import DiffusionPipeline
17
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
18
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
19
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
20
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
21
+ from diffusers.utils import deprecate, logging
22
+
23
+ # TODO: remove and import from diffusers.utils when the new version of diffusers is released
24
+ from packaging import version
25
+ from tqdm.auto import tqdm
26
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
27
+
28
+
29
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
30
+ PIL_INTERPOLATION = {
31
+ "linear": PIL.Image.Resampling.BILINEAR,
32
+ "bilinear": PIL.Image.Resampling.BILINEAR,
33
+ "bicubic": PIL.Image.Resampling.BICUBIC,
34
+ "lanczos": PIL.Image.Resampling.LANCZOS,
35
+ "nearest": PIL.Image.Resampling.NEAREST,
36
+ }
37
+ else:
38
+ PIL_INTERPOLATION = {
39
+ "linear": PIL.Image.LINEAR,
40
+ "bilinear": PIL.Image.BILINEAR,
41
+ "bicubic": PIL.Image.BICUBIC,
42
+ "lanczos": PIL.Image.LANCZOS,
43
+ "nearest": PIL.Image.NEAREST,
44
+ }
45
+ # ------------------------------------------------------------------------------
46
+
47
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
48
+
49
+
50
+ def preprocess(image):
51
+ w, h = image.size
52
+ w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
53
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
54
+ image = np.array(image).astype(np.float32) / 255.0
55
+ image = image[None].transpose(0, 3, 1, 2)
56
+ image = torch.from_numpy(image)
57
+ return 2.0 * image - 1.0
58
+
59
+
60
+ class ImagicStableDiffusionPipeline(DiffusionPipeline):
61
+ r"""
62
+ Pipeline for imagic image editing.
63
+ See paper here: https://arxiv.org/pdf/2210.09276.pdf
64
+
65
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
66
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
67
+ Args:
68
+ vae ([`AutoencoderKL`]):
69
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
70
+ text_encoder ([`CLIPTextModel`]):
71
+ Frozen text-encoder. Stable Diffusion uses the text portion of
72
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
73
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
74
+ tokenizer (`CLIPTokenizer`):
75
+ Tokenizer of class
76
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
77
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
78
+ scheduler ([`SchedulerMixin`]):
79
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
80
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
81
+ safety_checker ([`StableDiffusionSafetyChecker`]):
82
+ Classification module that estimates whether generated images could be considered offsensive or harmful.
83
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
84
+ feature_extractor ([`CLIPFeatureExtractor`]):
85
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
86
+ """
87
+
88
+ def __init__(
89
+ self,
90
+ vae: AutoencoderKL,
91
+ text_encoder: CLIPTextModel,
92
+ tokenizer: CLIPTokenizer,
93
+ unet: UNet2DConditionModel,
94
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
95
+ safety_checker: StableDiffusionSafetyChecker,
96
+ feature_extractor: CLIPFeatureExtractor,
97
+ ):
98
+ super().__init__()
99
+ self.register_modules(
100
+ vae=vae,
101
+ text_encoder=text_encoder,
102
+ tokenizer=tokenizer,
103
+ unet=unet,
104
+ scheduler=scheduler,
105
+ safety_checker=safety_checker,
106
+ feature_extractor=feature_extractor,
107
+ )
108
+
109
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
110
+ r"""
111
+ Enable sliced attention computation.
112
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
113
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
114
+ Args:
115
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
116
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
117
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
118
+ `attention_head_dim` must be a multiple of `slice_size`.
119
+ """
120
+ if slice_size == "auto":
121
+ # half the attention head size is usually a good trade-off between
122
+ # speed and memory
123
+ slice_size = self.unet.config.attention_head_dim // 2
124
+ self.unet.set_attention_slice(slice_size)
125
+
126
+ def disable_attention_slicing(self):
127
+ r"""
128
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
129
+ back to computing attention in one step.
130
+ """
131
+ # set slice_size = `None` to disable `attention slicing`
132
+ self.enable_attention_slicing(None)
133
+
134
+ def train(
135
+ self,
136
+ prompt: Union[str, List[str]],
137
+ image: Union[torch.FloatTensor, PIL.Image.Image],
138
+ height: Optional[int] = 512,
139
+ width: Optional[int] = 512,
140
+ generator: Optional[torch.Generator] = None,
141
+ embedding_learning_rate: float = 0.001,
142
+ diffusion_model_learning_rate: float = 2e-6,
143
+ text_embedding_optimization_steps: int = 500,
144
+ model_fine_tuning_optimization_steps: int = 1000,
145
+ **kwargs,
146
+ ):
147
+ r"""
148
+ Function invoked when calling the pipeline for generation.
149
+ Args:
150
+ prompt (`str` or `List[str]`):
151
+ The prompt or prompts to guide the image generation.
152
+ height (`int`, *optional*, defaults to 512):
153
+ The height in pixels of the generated image.
154
+ width (`int`, *optional*, defaults to 512):
155
+ The width in pixels of the generated image.
156
+ num_inference_steps (`int`, *optional*, defaults to 50):
157
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
158
+ expense of slower inference.
159
+ guidance_scale (`float`, *optional*, defaults to 7.5):
160
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
161
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
162
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
163
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
164
+ usually at the expense of lower image quality.
165
+ eta (`float`, *optional*, defaults to 0.0):
166
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
167
+ [`schedulers.DDIMScheduler`], will be ignored for others.
168
+ generator (`torch.Generator`, *optional*):
169
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
170
+ deterministic.
171
+ latents (`torch.FloatTensor`, *optional*):
172
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
173
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
174
+ tensor will ge generated by sampling using the supplied random `generator`.
175
+ output_type (`str`, *optional*, defaults to `"pil"`):
176
+ The output format of the generate image. Choose between
177
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
178
+ return_dict (`bool`, *optional*, defaults to `True`):
179
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
180
+ plain tuple.
181
+ Returns:
182
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
183
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
184
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
185
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
186
+ (nsfw) content, according to the `safety_checker`.
187
+ """
188
+ message = "Please use `image` instead of `init_image`."
189
+ init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
190
+ image = init_image or image
191
+
192
+ accelerator = Accelerator(
193
+ gradient_accumulation_steps=1,
194
+ mixed_precision="fp16",
195
+ )
196
+
197
+ if "torch_device" in kwargs:
198
+ device = kwargs.pop("torch_device")
199
+ warnings.warn(
200
+ "`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0."
201
+ " Consider using `pipe.to(torch_device)` instead."
202
+ )
203
+
204
+ if device is None:
205
+ device = "cuda" if torch.cuda.is_available() else "cpu"
206
+ self.to(device)
207
+
208
+ if height % 8 != 0 or width % 8 != 0:
209
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
210
+
211
+ # Freeze vae and unet
212
+ self.vae.requires_grad_(False)
213
+ self.unet.requires_grad_(False)
214
+ self.text_encoder.requires_grad_(False)
215
+ self.unet.eval()
216
+ self.vae.eval()
217
+ self.text_encoder.eval()
218
+
219
+ if accelerator.is_main_process:
220
+ accelerator.init_trackers(
221
+ "imagic",
222
+ config={
223
+ "embedding_learning_rate": embedding_learning_rate,
224
+ "text_embedding_optimization_steps": text_embedding_optimization_steps,
225
+ },
226
+ )
227
+
228
+ # get text embeddings for prompt
229
+ text_input = self.tokenizer(
230
+ prompt,
231
+ padding="max_length",
232
+ max_length=self.tokenizer.model_max_length,
233
+ truncation=True,
234
+ return_tensors="pt",
235
+ )
236
+ text_embeddings = torch.nn.Parameter(
237
+ self.text_encoder(text_input.input_ids.to(self.device))[0], requires_grad=True
238
+ )
239
+ text_embeddings = text_embeddings.detach()
240
+ text_embeddings.requires_grad_()
241
+ text_embeddings_orig = text_embeddings.clone()
242
+
243
+ # Initialize the optimizer
244
+ optimizer = torch.optim.Adam(
245
+ [text_embeddings], # only optimize the embeddings
246
+ lr=embedding_learning_rate,
247
+ )
248
+
249
+ if isinstance(image, PIL.Image.Image):
250
+ image = preprocess(image)
251
+
252
+ latents_dtype = text_embeddings.dtype
253
+ image = image.to(device=self.device, dtype=latents_dtype)
254
+ init_latent_image_dist = self.vae.encode(image).latent_dist
255
+ image_latents = init_latent_image_dist.sample(generator=generator)
256
+ image_latents = 0.18215 * image_latents
257
+
258
+ progress_bar = tqdm(range(text_embedding_optimization_steps), disable=not accelerator.is_local_main_process)
259
+ progress_bar.set_description("Steps")
260
+
261
+ global_step = 0
262
+
263
+ logger.info("First optimizing the text embedding to better reconstruct the init image")
264
+ for _ in range(text_embedding_optimization_steps):
265
+ with accelerator.accumulate(text_embeddings):
266
+ # Sample noise that we'll add to the latents
267
+ noise = torch.randn(image_latents.shape).to(image_latents.device)
268
+ timesteps = torch.randint(1000, (1,), device=image_latents.device)
269
+
270
+ # Add noise to the latents according to the noise magnitude at each timestep
271
+ # (this is the forward diffusion process)
272
+ noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
273
+
274
+ # Predict the noise residual
275
+ noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
276
+
277
+ loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
278
+ accelerator.backward(loss)
279
+
280
+ optimizer.step()
281
+ optimizer.zero_grad()
282
+
283
+ # Checks if the accelerator has performed an optimization step behind the scenes
284
+ if accelerator.sync_gradients:
285
+ progress_bar.update(1)
286
+ global_step += 1
287
+
288
+ logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
289
+ progress_bar.set_postfix(**logs)
290
+ accelerator.log(logs, step=global_step)
291
+
292
+ accelerator.wait_for_everyone()
293
+
294
+ text_embeddings.requires_grad_(False)
295
+
296
+ # Now we fine tune the unet to better reconstruct the image
297
+ self.unet.requires_grad_(True)
298
+ self.unet.train()
299
+ optimizer = torch.optim.Adam(
300
+ self.unet.parameters(), # only optimize unet
301
+ lr=diffusion_model_learning_rate,
302
+ )
303
+ progress_bar = tqdm(range(model_fine_tuning_optimization_steps), disable=not accelerator.is_local_main_process)
304
+
305
+ logger.info("Next fine tuning the entire model to better reconstruct the init image")
306
+ for _ in range(model_fine_tuning_optimization_steps):
307
+ with accelerator.accumulate(self.unet.parameters()):
308
+ # Sample noise that we'll add to the latents
309
+ noise = torch.randn(image_latents.shape).to(image_latents.device)
310
+ timesteps = torch.randint(1000, (1,), device=image_latents.device)
311
+
312
+ # Add noise to the latents according to the noise magnitude at each timestep
313
+ # (this is the forward diffusion process)
314
+ noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
315
+
316
+ # Predict the noise residual
317
+ noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
318
+
319
+ loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
320
+ accelerator.backward(loss)
321
+
322
+ optimizer.step()
323
+ optimizer.zero_grad()
324
+
325
+ # Checks if the accelerator has performed an optimization step behind the scenes
326
+ if accelerator.sync_gradients:
327
+ progress_bar.update(1)
328
+ global_step += 1
329
+
330
+ logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
331
+ progress_bar.set_postfix(**logs)
332
+ accelerator.log(logs, step=global_step)
333
+
334
+ accelerator.wait_for_everyone()
335
+ self.text_embeddings_orig = text_embeddings_orig
336
+ self.text_embeddings = text_embeddings
337
+
338
+ @torch.no_grad()
339
+ def __call__(
340
+ self,
341
+ alpha: float = 1.2,
342
+ height: Optional[int] = 512,
343
+ width: Optional[int] = 512,
344
+ num_inference_steps: Optional[int] = 50,
345
+ generator: Optional[torch.Generator] = None,
346
+ output_type: Optional[str] = "pil",
347
+ return_dict: bool = True,
348
+ guidance_scale: float = 7.5,
349
+ eta: float = 0.0,
350
+ **kwargs,
351
+ ):
352
+ r"""
353
+ Function invoked when calling the pipeline for generation.
354
+ Args:
355
+ prompt (`str` or `List[str]`):
356
+ The prompt or prompts to guide the image generation.
357
+ height (`int`, *optional*, defaults to 512):
358
+ The height in pixels of the generated image.
359
+ width (`int`, *optional*, defaults to 512):
360
+ The width in pixels of the generated image.
361
+ num_inference_steps (`int`, *optional*, defaults to 50):
362
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
363
+ expense of slower inference.
364
+ guidance_scale (`float`, *optional*, defaults to 7.5):
365
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
366
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
367
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
368
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
369
+ usually at the expense of lower image quality.
370
+ eta (`float`, *optional*, defaults to 0.0):
371
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
372
+ [`schedulers.DDIMScheduler`], will be ignored for others.
373
+ generator (`torch.Generator`, *optional*):
374
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
375
+ deterministic.
376
+ latents (`torch.FloatTensor`, *optional*):
377
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
378
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
379
+ tensor will ge generated by sampling using the supplied random `generator`.
380
+ output_type (`str`, *optional*, defaults to `"pil"`):
381
+ The output format of the generate image. Choose between
382
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
383
+ return_dict (`bool`, *optional*, defaults to `True`):
384
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
385
+ plain tuple.
386
+ Returns:
387
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
388
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
389
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
390
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
391
+ (nsfw) content, according to the `safety_checker`.
392
+ """
393
+ if height % 8 != 0 or width % 8 != 0:
394
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
395
+ if self.text_embeddings is None:
396
+ raise ValueError("Please run the pipe.train() before trying to generate an image.")
397
+ if self.text_embeddings_orig is None:
398
+ raise ValueError("Please run the pipe.train() before trying to generate an image.")
399
+
400
+ text_embeddings = alpha * self.text_embeddings_orig + (1 - alpha) * self.text_embeddings
401
+
402
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
403
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
404
+ # corresponds to doing no classifier free guidance.
405
+ do_classifier_free_guidance = guidance_scale > 1.0
406
+ # get unconditional embeddings for classifier free guidance
407
+ if do_classifier_free_guidance:
408
+ uncond_tokens = [""]
409
+ max_length = self.tokenizer.model_max_length
410
+ uncond_input = self.tokenizer(
411
+ uncond_tokens,
412
+ padding="max_length",
413
+ max_length=max_length,
414
+ truncation=True,
415
+ return_tensors="pt",
416
+ )
417
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
418
+
419
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
420
+ seq_len = uncond_embeddings.shape[1]
421
+ uncond_embeddings = uncond_embeddings.view(1, seq_len, -1)
422
+
423
+ # For classifier free guidance, we need to do two forward passes.
424
+ # Here we concatenate the unconditional and text embeddings into a single batch
425
+ # to avoid doing two forward passes
426
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
427
+
428
+ # get the initial random noise unless the user supplied it
429
+
430
+ # Unlike in other pipelines, latents need to be generated in the target device
431
+ # for 1-to-1 results reproducibility with the CompVis implementation.
432
+ # However this currently doesn't work in `mps`.
433
+ latents_shape = (1, self.unet.in_channels, height // 8, width // 8)
434
+ latents_dtype = text_embeddings.dtype
435
+ if self.device.type == "mps":
436
+ # randn does not exist on mps
437
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
438
+ self.device
439
+ )
440
+ else:
441
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
442
+
443
+ # set timesteps
444
+ self.scheduler.set_timesteps(num_inference_steps)
445
+
446
+ # Some schedulers like PNDM have timesteps as arrays
447
+ # It's more optimized to move all timesteps to correct device beforehand
448
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
449
+
450
+ # scale the initial noise by the standard deviation required by the scheduler
451
+ latents = latents * self.scheduler.init_noise_sigma
452
+
453
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
454
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
455
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
456
+ # and should be between [0, 1]
457
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
458
+ extra_step_kwargs = {}
459
+ if accepts_eta:
460
+ extra_step_kwargs["eta"] = eta
461
+
462
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
463
+ # expand the latents if we are doing classifier free guidance
464
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
465
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
466
+
467
+ # predict the noise residual
468
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
469
+
470
+ # perform guidance
471
+ if do_classifier_free_guidance:
472
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
473
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
474
+
475
+ # compute the previous noisy sample x_t -> x_t-1
476
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
477
+
478
+ latents = 1 / 0.18215 * latents
479
+ image = self.vae.decode(latents).sample
480
+
481
+ image = (image / 2 + 0.5).clamp(0, 1)
482
+
483
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
484
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
485
+
486
+ if self.safety_checker is not None:
487
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
488
+ self.device
489
+ )
490
+ image, has_nsfw_concept = self.safety_checker(
491
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
492
+ )
493
+ else:
494
+ has_nsfw_concept = None
495
+
496
+ if output_type == "pil":
497
+ image = self.numpy_to_pil(image)
498
+
499
+ if not return_dict:
500
+ return (image, has_nsfw_concept)
501
+
502
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
huggingface_diffusers/examples/community/img2img_inpainting.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Callable, List, Optional, Tuple, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+
7
+ import PIL
8
+ from diffusers import DiffusionPipeline
9
+ from diffusers.configuration_utils import FrozenDict
10
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
11
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
12
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
13
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
14
+ from diffusers.utils import deprecate, logging
15
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
16
+
17
+
18
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
19
+
20
+
21
+ def prepare_mask_and_masked_image(image, mask):
22
+ image = np.array(image.convert("RGB"))
23
+ image = image[None].transpose(0, 3, 1, 2)
24
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
25
+
26
+ mask = np.array(mask.convert("L"))
27
+ mask = mask.astype(np.float32) / 255.0
28
+ mask = mask[None, None]
29
+ mask[mask < 0.5] = 0
30
+ mask[mask >= 0.5] = 1
31
+ mask = torch.from_numpy(mask)
32
+
33
+ masked_image = image * (mask < 0.5)
34
+
35
+ return mask, masked_image
36
+
37
+
38
+ def check_size(image, height, width):
39
+ if isinstance(image, PIL.Image.Image):
40
+ w, h = image.size
41
+ elif isinstance(image, torch.Tensor):
42
+ *_, h, w = image.shape
43
+
44
+ if h != height or w != width:
45
+ raise ValueError(f"Image size should be {height}x{width}, but got {h}x{w}")
46
+
47
+
48
+ def overlay_inner_image(image, inner_image, paste_offset: Tuple[int] = (0, 0)):
49
+ inner_image = inner_image.convert("RGBA")
50
+ image = image.convert("RGB")
51
+
52
+ image.paste(inner_image, paste_offset, inner_image)
53
+ image = image.convert("RGB")
54
+
55
+ return image
56
+
57
+
58
+ class ImageToImageInpaintingPipeline(DiffusionPipeline):
59
+ r"""
60
+ Pipeline for text-guided image-to-image inpainting using Stable Diffusion. *This is an experimental feature*.
61
+
62
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
63
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
64
+
65
+ Args:
66
+ vae ([`AutoencoderKL`]):
67
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
68
+ text_encoder ([`CLIPTextModel`]):
69
+ Frozen text-encoder. Stable Diffusion uses the text portion of
70
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
71
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
72
+ tokenizer (`CLIPTokenizer`):
73
+ Tokenizer of class
74
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
75
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
76
+ scheduler ([`SchedulerMixin`]):
77
+ A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
78
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
79
+ safety_checker ([`StableDiffusionSafetyChecker`]):
80
+ Classification module that estimates whether generated images could be considered offensive or harmful.
81
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
82
+ feature_extractor ([`CLIPFeatureExtractor`]):
83
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ vae: AutoencoderKL,
89
+ text_encoder: CLIPTextModel,
90
+ tokenizer: CLIPTokenizer,
91
+ unet: UNet2DConditionModel,
92
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
93
+ safety_checker: StableDiffusionSafetyChecker,
94
+ feature_extractor: CLIPFeatureExtractor,
95
+ ):
96
+ super().__init__()
97
+
98
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
99
+ deprecation_message = (
100
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
101
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
102
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
103
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
104
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
105
+ " file"
106
+ )
107
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
108
+ new_config = dict(scheduler.config)
109
+ new_config["steps_offset"] = 1
110
+ scheduler._internal_dict = FrozenDict(new_config)
111
+
112
+ if safety_checker is None:
113
+ logger.warning(
114
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
115
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
116
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
117
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
118
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
119
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
120
+ )
121
+
122
+ self.register_modules(
123
+ vae=vae,
124
+ text_encoder=text_encoder,
125
+ tokenizer=tokenizer,
126
+ unet=unet,
127
+ scheduler=scheduler,
128
+ safety_checker=safety_checker,
129
+ feature_extractor=feature_extractor,
130
+ )
131
+
132
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
133
+ r"""
134
+ Enable sliced attention computation.
135
+
136
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
137
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
138
+
139
+ Args:
140
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
141
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
142
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
143
+ `attention_head_dim` must be a multiple of `slice_size`.
144
+ """
145
+ if slice_size == "auto":
146
+ # half the attention head size is usually a good trade-off between
147
+ # speed and memory
148
+ slice_size = self.unet.config.attention_head_dim // 2
149
+ self.unet.set_attention_slice(slice_size)
150
+
151
+ def disable_attention_slicing(self):
152
+ r"""
153
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
154
+ back to computing attention in one step.
155
+ """
156
+ # set slice_size = `None` to disable `attention slicing`
157
+ self.enable_attention_slicing(None)
158
+
159
+ @torch.no_grad()
160
+ def __call__(
161
+ self,
162
+ prompt: Union[str, List[str]],
163
+ image: Union[torch.FloatTensor, PIL.Image.Image],
164
+ inner_image: Union[torch.FloatTensor, PIL.Image.Image],
165
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
166
+ height: int = 512,
167
+ width: int = 512,
168
+ num_inference_steps: int = 50,
169
+ guidance_scale: float = 7.5,
170
+ negative_prompt: Optional[Union[str, List[str]]] = None,
171
+ num_images_per_prompt: Optional[int] = 1,
172
+ eta: float = 0.0,
173
+ generator: Optional[torch.Generator] = None,
174
+ latents: Optional[torch.FloatTensor] = None,
175
+ output_type: Optional[str] = "pil",
176
+ return_dict: bool = True,
177
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
178
+ callback_steps: Optional[int] = 1,
179
+ **kwargs,
180
+ ):
181
+ r"""
182
+ Function invoked when calling the pipeline for generation.
183
+
184
+ Args:
185
+ prompt (`str` or `List[str]`):
186
+ The prompt or prompts to guide the image generation.
187
+ image (`torch.Tensor` or `PIL.Image.Image`):
188
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
189
+ be masked out with `mask_image` and repainted according to `prompt`.
190
+ inner_image (`torch.Tensor` or `PIL.Image.Image`):
191
+ `Image`, or tensor representing an image batch which will be overlayed onto `image`. Non-transparent
192
+ regions of `inner_image` must fit inside white pixels in `mask_image`. Expects four channels, with
193
+ the last channel representing the alpha channel, which will be used to blend `inner_image` with
194
+ `image`. If not provided, it will be forcibly cast to RGBA.
195
+ mask_image (`PIL.Image.Image`):
196
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
197
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
198
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
199
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
200
+ height (`int`, *optional*, defaults to 512):
201
+ The height in pixels of the generated image.
202
+ width (`int`, *optional*, defaults to 512):
203
+ The width in pixels of the generated image.
204
+ num_inference_steps (`int`, *optional*, defaults to 50):
205
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
206
+ expense of slower inference.
207
+ guidance_scale (`float`, *optional*, defaults to 7.5):
208
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
209
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
210
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
211
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
212
+ usually at the expense of lower image quality.
213
+ negative_prompt (`str` or `List[str]`, *optional*):
214
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
215
+ if `guidance_scale` is less than `1`).
216
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
217
+ The number of images to generate per prompt.
218
+ eta (`float`, *optional*, defaults to 0.0):
219
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
220
+ [`schedulers.DDIMScheduler`], will be ignored for others.
221
+ generator (`torch.Generator`, *optional*):
222
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
223
+ deterministic.
224
+ latents (`torch.FloatTensor`, *optional*):
225
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
226
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
227
+ tensor will ge generated by sampling using the supplied random `generator`.
228
+ output_type (`str`, *optional*, defaults to `"pil"`):
229
+ The output format of the generate image. Choose between
230
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
231
+ return_dict (`bool`, *optional*, defaults to `True`):
232
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
233
+ plain tuple.
234
+ callback (`Callable`, *optional*):
235
+ A function that will be called every `callback_steps` steps during inference. The function will be
236
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
237
+ callback_steps (`int`, *optional*, defaults to 1):
238
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
239
+ called at every step.
240
+
241
+ Returns:
242
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
243
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
244
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
245
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
246
+ (nsfw) content, according to the `safety_checker`.
247
+ """
248
+
249
+ if isinstance(prompt, str):
250
+ batch_size = 1
251
+ elif isinstance(prompt, list):
252
+ batch_size = len(prompt)
253
+ else:
254
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
255
+
256
+ if height % 8 != 0 or width % 8 != 0:
257
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
258
+
259
+ if (callback_steps is None) or (
260
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
261
+ ):
262
+ raise ValueError(
263
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
264
+ f" {type(callback_steps)}."
265
+ )
266
+
267
+ # check if input sizes are correct
268
+ check_size(image, height, width)
269
+ check_size(inner_image, height, width)
270
+ check_size(mask_image, height, width)
271
+
272
+ # get prompt text embeddings
273
+ text_inputs = self.tokenizer(
274
+ prompt,
275
+ padding="max_length",
276
+ max_length=self.tokenizer.model_max_length,
277
+ return_tensors="pt",
278
+ )
279
+ text_input_ids = text_inputs.input_ids
280
+
281
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
282
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
283
+ logger.warning(
284
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
285
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
286
+ )
287
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
288
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
289
+
290
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
291
+ bs_embed, seq_len, _ = text_embeddings.shape
292
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
293
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
294
+
295
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
296
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
297
+ # corresponds to doing no classifier free guidance.
298
+ do_classifier_free_guidance = guidance_scale > 1.0
299
+ # get unconditional embeddings for classifier free guidance
300
+ if do_classifier_free_guidance:
301
+ uncond_tokens: List[str]
302
+ if negative_prompt is None:
303
+ uncond_tokens = [""]
304
+ elif type(prompt) is not type(negative_prompt):
305
+ raise TypeError(
306
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
307
+ f" {type(prompt)}."
308
+ )
309
+ elif isinstance(negative_prompt, str):
310
+ uncond_tokens = [negative_prompt]
311
+ elif batch_size != len(negative_prompt):
312
+ raise ValueError(
313
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
314
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
315
+ " the batch size of `prompt`."
316
+ )
317
+ else:
318
+ uncond_tokens = negative_prompt
319
+
320
+ max_length = text_input_ids.shape[-1]
321
+ uncond_input = self.tokenizer(
322
+ uncond_tokens,
323
+ padding="max_length",
324
+ max_length=max_length,
325
+ truncation=True,
326
+ return_tensors="pt",
327
+ )
328
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
329
+
330
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
331
+ seq_len = uncond_embeddings.shape[1]
332
+ uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
333
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
334
+
335
+ # For classifier free guidance, we need to do two forward passes.
336
+ # Here we concatenate the unconditional and text embeddings into a single batch
337
+ # to avoid doing two forward passes
338
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
339
+
340
+ # get the initial random noise unless the user supplied it
341
+ # Unlike in other pipelines, latents need to be generated in the target device
342
+ # for 1-to-1 results reproducibility with the CompVis implementation.
343
+ # However this currently doesn't work in `mps`.
344
+ num_channels_latents = self.vae.config.latent_channels
345
+ latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8)
346
+ latents_dtype = text_embeddings.dtype
347
+ if latents is None:
348
+ if self.device.type == "mps":
349
+ # randn does not exist on mps
350
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
351
+ self.device
352
+ )
353
+ else:
354
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
355
+ else:
356
+ if latents.shape != latents_shape:
357
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
358
+ latents = latents.to(self.device)
359
+
360
+ # overlay the inner image
361
+ image = overlay_inner_image(image, inner_image)
362
+
363
+ # prepare mask and masked_image
364
+ mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
365
+ mask = mask.to(device=self.device, dtype=text_embeddings.dtype)
366
+ masked_image = masked_image.to(device=self.device, dtype=text_embeddings.dtype)
367
+
368
+ # resize the mask to latents shape as we concatenate the mask to the latents
369
+ mask = torch.nn.functional.interpolate(mask, size=(height // 8, width // 8))
370
+
371
+ # encode the mask image into latents space so we can concatenate it to the latents
372
+ masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
373
+ masked_image_latents = 0.18215 * masked_image_latents
374
+
375
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
376
+ mask = mask.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
377
+ masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
378
+
379
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
380
+ masked_image_latents = (
381
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
382
+ )
383
+
384
+ num_channels_mask = mask.shape[1]
385
+ num_channels_masked_image = masked_image_latents.shape[1]
386
+
387
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
388
+ raise ValueError(
389
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
390
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
391
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
392
+ f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
393
+ " `pipeline.unet` or your `mask_image` or `image` input."
394
+ )
395
+
396
+ # set timesteps
397
+ self.scheduler.set_timesteps(num_inference_steps)
398
+
399
+ # Some schedulers like PNDM have timesteps as arrays
400
+ # It's more optimized to move all timesteps to correct device beforehand
401
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
402
+
403
+ # scale the initial noise by the standard deviation required by the scheduler
404
+ latents = latents * self.scheduler.init_noise_sigma
405
+
406
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
407
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
408
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
409
+ # and should be between [0, 1]
410
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
411
+ extra_step_kwargs = {}
412
+ if accepts_eta:
413
+ extra_step_kwargs["eta"] = eta
414
+
415
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
416
+ # expand the latents if we are doing classifier free guidance
417
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
418
+
419
+ # concat latents, mask, masked_image_latents in the channel dimension
420
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
421
+
422
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
423
+
424
+ # predict the noise residual
425
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
426
+
427
+ # perform guidance
428
+ if do_classifier_free_guidance:
429
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
430
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
431
+
432
+ # compute the previous noisy sample x_t -> x_t-1
433
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
434
+
435
+ # call the callback, if provided
436
+ if callback is not None and i % callback_steps == 0:
437
+ callback(i, t, latents)
438
+
439
+ latents = 1 / 0.18215 * latents
440
+ image = self.vae.decode(latents).sample
441
+
442
+ image = (image / 2 + 0.5).clamp(0, 1)
443
+
444
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
445
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
446
+
447
+ if self.safety_checker is not None:
448
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
449
+ self.device
450
+ )
451
+ image, has_nsfw_concept = self.safety_checker(
452
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
453
+ )
454
+ else:
455
+ has_nsfw_concept = None
456
+
457
+ if output_type == "pil":
458
+ image = self.numpy_to_pil(image)
459
+
460
+ if not return_dict:
461
+ return (image, has_nsfw_concept)
462
+
463
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
huggingface_diffusers/examples/community/interpolate_stable_diffusion.py ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import time
3
+ from pathlib import Path
4
+ from typing import Callable, List, Optional, Union
5
+
6
+ import numpy as np
7
+ import torch
8
+
9
+ from diffusers import DiffusionPipeline
10
+ from diffusers.configuration_utils import FrozenDict
11
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
14
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
15
+ from diffusers.utils import deprecate, logging
16
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
17
+
18
+
19
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
20
+
21
+
22
+ def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
23
+ """helper function to spherically interpolate two arrays v1 v2"""
24
+
25
+ if not isinstance(v0, np.ndarray):
26
+ inputs_are_torch = True
27
+ input_device = v0.device
28
+ v0 = v0.cpu().numpy()
29
+ v1 = v1.cpu().numpy()
30
+
31
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
32
+ if np.abs(dot) > DOT_THRESHOLD:
33
+ v2 = (1 - t) * v0 + t * v1
34
+ else:
35
+ theta_0 = np.arccos(dot)
36
+ sin_theta_0 = np.sin(theta_0)
37
+ theta_t = theta_0 * t
38
+ sin_theta_t = np.sin(theta_t)
39
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
40
+ s1 = sin_theta_t / sin_theta_0
41
+ v2 = s0 * v0 + s1 * v1
42
+
43
+ if inputs_are_torch:
44
+ v2 = torch.from_numpy(v2).to(input_device)
45
+
46
+ return v2
47
+
48
+
49
+ class StableDiffusionWalkPipeline(DiffusionPipeline):
50
+ r"""
51
+ Pipeline for text-to-image generation using Stable Diffusion.
52
+
53
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
54
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
55
+
56
+ Args:
57
+ vae ([`AutoencoderKL`]):
58
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
59
+ text_encoder ([`CLIPTextModel`]):
60
+ Frozen text-encoder. Stable Diffusion uses the text portion of
61
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
62
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
63
+ tokenizer (`CLIPTokenizer`):
64
+ Tokenizer of class
65
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
66
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
67
+ scheduler ([`SchedulerMixin`]):
68
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
69
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
70
+ safety_checker ([`StableDiffusionSafetyChecker`]):
71
+ Classification module that estimates whether generated images could be considered offensive or harmful.
72
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
73
+ feature_extractor ([`CLIPFeatureExtractor`]):
74
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
75
+ """
76
+
77
+ def __init__(
78
+ self,
79
+ vae: AutoencoderKL,
80
+ text_encoder: CLIPTextModel,
81
+ tokenizer: CLIPTokenizer,
82
+ unet: UNet2DConditionModel,
83
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
84
+ safety_checker: StableDiffusionSafetyChecker,
85
+ feature_extractor: CLIPFeatureExtractor,
86
+ ):
87
+ super().__init__()
88
+
89
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
90
+ deprecation_message = (
91
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
92
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
93
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
94
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
95
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
96
+ " file"
97
+ )
98
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
99
+ new_config = dict(scheduler.config)
100
+ new_config["steps_offset"] = 1
101
+ scheduler._internal_dict = FrozenDict(new_config)
102
+
103
+ if safety_checker is None:
104
+ logger.warning(
105
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
106
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
107
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
108
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
109
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
110
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
111
+ )
112
+
113
+ self.register_modules(
114
+ vae=vae,
115
+ text_encoder=text_encoder,
116
+ tokenizer=tokenizer,
117
+ unet=unet,
118
+ scheduler=scheduler,
119
+ safety_checker=safety_checker,
120
+ feature_extractor=feature_extractor,
121
+ )
122
+
123
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
124
+ r"""
125
+ Enable sliced attention computation.
126
+
127
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
128
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
129
+
130
+ Args:
131
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
132
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
133
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
134
+ `attention_head_dim` must be a multiple of `slice_size`.
135
+ """
136
+ if slice_size == "auto":
137
+ # half the attention head size is usually a good trade-off between
138
+ # speed and memory
139
+ slice_size = self.unet.config.attention_head_dim // 2
140
+ self.unet.set_attention_slice(slice_size)
141
+
142
+ def disable_attention_slicing(self):
143
+ r"""
144
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
145
+ back to computing attention in one step.
146
+ """
147
+ # set slice_size = `None` to disable `attention slicing`
148
+ self.enable_attention_slicing(None)
149
+
150
+ @torch.no_grad()
151
+ def __call__(
152
+ self,
153
+ prompt: Optional[Union[str, List[str]]] = None,
154
+ height: int = 512,
155
+ width: int = 512,
156
+ num_inference_steps: int = 50,
157
+ guidance_scale: float = 7.5,
158
+ negative_prompt: Optional[Union[str, List[str]]] = None,
159
+ num_images_per_prompt: Optional[int] = 1,
160
+ eta: float = 0.0,
161
+ generator: Optional[torch.Generator] = None,
162
+ latents: Optional[torch.FloatTensor] = None,
163
+ output_type: Optional[str] = "pil",
164
+ return_dict: bool = True,
165
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
166
+ callback_steps: Optional[int] = 1,
167
+ text_embeddings: Optional[torch.FloatTensor] = None,
168
+ **kwargs,
169
+ ):
170
+ r"""
171
+ Function invoked when calling the pipeline for generation.
172
+
173
+ Args:
174
+ prompt (`str` or `List[str]`, *optional*, defaults to `None`):
175
+ The prompt or prompts to guide the image generation. If not provided, `text_embeddings` is required.
176
+ height (`int`, *optional*, defaults to 512):
177
+ The height in pixels of the generated image.
178
+ width (`int`, *optional*, defaults to 512):
179
+ The width in pixels of the generated image.
180
+ num_inference_steps (`int`, *optional*, defaults to 50):
181
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
182
+ expense of slower inference.
183
+ guidance_scale (`float`, *optional*, defaults to 7.5):
184
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
185
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
186
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
187
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
188
+ usually at the expense of lower image quality.
189
+ negative_prompt (`str` or `List[str]`, *optional*):
190
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
191
+ if `guidance_scale` is less than `1`).
192
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
193
+ The number of images to generate per prompt.
194
+ eta (`float`, *optional*, defaults to 0.0):
195
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
196
+ [`schedulers.DDIMScheduler`], will be ignored for others.
197
+ generator (`torch.Generator`, *optional*):
198
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
199
+ deterministic.
200
+ latents (`torch.FloatTensor`, *optional*):
201
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
202
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
203
+ tensor will ge generated by sampling using the supplied random `generator`.
204
+ output_type (`str`, *optional*, defaults to `"pil"`):
205
+ The output format of the generate image. Choose between
206
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
207
+ return_dict (`bool`, *optional*, defaults to `True`):
208
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
209
+ plain tuple.
210
+ callback (`Callable`, *optional*):
211
+ A function that will be called every `callback_steps` steps during inference. The function will be
212
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
213
+ callback_steps (`int`, *optional*, defaults to 1):
214
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
215
+ called at every step.
216
+ text_embeddings (`torch.FloatTensor`, *optional*, defaults to `None`):
217
+ Pre-generated text embeddings to be used as inputs for image generation. Can be used in place of
218
+ `prompt` to avoid re-computing the embeddings. If not provided, the embeddings will be generated from
219
+ the supplied `prompt`.
220
+
221
+ Returns:
222
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
223
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
224
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
225
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
226
+ (nsfw) content, according to the `safety_checker`.
227
+ """
228
+
229
+ if height % 8 != 0 or width % 8 != 0:
230
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
231
+
232
+ if (callback_steps is None) or (
233
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
234
+ ):
235
+ raise ValueError(
236
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
237
+ f" {type(callback_steps)}."
238
+ )
239
+
240
+ if text_embeddings is None:
241
+ if isinstance(prompt, str):
242
+ batch_size = 1
243
+ elif isinstance(prompt, list):
244
+ batch_size = len(prompt)
245
+ else:
246
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
247
+
248
+ # get prompt text embeddings
249
+ text_inputs = self.tokenizer(
250
+ prompt,
251
+ padding="max_length",
252
+ max_length=self.tokenizer.model_max_length,
253
+ return_tensors="pt",
254
+ )
255
+ text_input_ids = text_inputs.input_ids
256
+
257
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
258
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
259
+ print(
260
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
261
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
262
+ )
263
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
264
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
265
+ else:
266
+ batch_size = text_embeddings.shape[0]
267
+
268
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
269
+ bs_embed, seq_len, _ = text_embeddings.shape
270
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
271
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
272
+
273
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
274
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
275
+ # corresponds to doing no classifier free guidance.
276
+ do_classifier_free_guidance = guidance_scale > 1.0
277
+ # get unconditional embeddings for classifier free guidance
278
+ if do_classifier_free_guidance:
279
+ uncond_tokens: List[str]
280
+ if negative_prompt is None:
281
+ uncond_tokens = [""] * batch_size
282
+ elif type(prompt) is not type(negative_prompt):
283
+ raise TypeError(
284
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
285
+ f" {type(prompt)}."
286
+ )
287
+ elif isinstance(negative_prompt, str):
288
+ uncond_tokens = [negative_prompt]
289
+ elif batch_size != len(negative_prompt):
290
+ raise ValueError(
291
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
292
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
293
+ " the batch size of `prompt`."
294
+ )
295
+ else:
296
+ uncond_tokens = negative_prompt
297
+
298
+ max_length = self.tokenizer.model_max_length
299
+ uncond_input = self.tokenizer(
300
+ uncond_tokens,
301
+ padding="max_length",
302
+ max_length=max_length,
303
+ truncation=True,
304
+ return_tensors="pt",
305
+ )
306
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
307
+
308
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
309
+ seq_len = uncond_embeddings.shape[1]
310
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
311
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
312
+
313
+ # For classifier free guidance, we need to do two forward passes.
314
+ # Here we concatenate the unconditional and text embeddings into a single batch
315
+ # to avoid doing two forward passes
316
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
317
+
318
+ # get the initial random noise unless the user supplied it
319
+
320
+ # Unlike in other pipelines, latents need to be generated in the target device
321
+ # for 1-to-1 results reproducibility with the CompVis implementation.
322
+ # However this currently doesn't work in `mps`.
323
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)
324
+ latents_dtype = text_embeddings.dtype
325
+ if latents is None:
326
+ if self.device.type == "mps":
327
+ # randn does not work reproducibly on mps
328
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
329
+ self.device
330
+ )
331
+ else:
332
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
333
+ else:
334
+ if latents.shape != latents_shape:
335
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
336
+ latents = latents.to(self.device)
337
+
338
+ # set timesteps
339
+ self.scheduler.set_timesteps(num_inference_steps)
340
+
341
+ # Some schedulers like PNDM have timesteps as arrays
342
+ # It's more optimized to move all timesteps to correct device beforehand
343
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
344
+
345
+ # scale the initial noise by the standard deviation required by the scheduler
346
+ latents = latents * self.scheduler.init_noise_sigma
347
+
348
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
349
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
350
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
351
+ # and should be between [0, 1]
352
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
353
+ extra_step_kwargs = {}
354
+ if accepts_eta:
355
+ extra_step_kwargs["eta"] = eta
356
+
357
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
358
+ # expand the latents if we are doing classifier free guidance
359
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
360
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
361
+
362
+ # predict the noise residual
363
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
364
+
365
+ # perform guidance
366
+ if do_classifier_free_guidance:
367
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
368
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
369
+
370
+ # compute the previous noisy sample x_t -> x_t-1
371
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
372
+
373
+ # call the callback, if provided
374
+ if callback is not None and i % callback_steps == 0:
375
+ callback(i, t, latents)
376
+
377
+ latents = 1 / 0.18215 * latents
378
+ image = self.vae.decode(latents).sample
379
+
380
+ image = (image / 2 + 0.5).clamp(0, 1)
381
+
382
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
383
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
384
+
385
+ if self.safety_checker is not None:
386
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
387
+ self.device
388
+ )
389
+ image, has_nsfw_concept = self.safety_checker(
390
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
391
+ )
392
+ else:
393
+ has_nsfw_concept = None
394
+
395
+ if output_type == "pil":
396
+ image = self.numpy_to_pil(image)
397
+
398
+ if not return_dict:
399
+ return (image, has_nsfw_concept)
400
+
401
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
402
+
403
+ def embed_text(self, text):
404
+ """takes in text and turns it into text embeddings"""
405
+ text_input = self.tokenizer(
406
+ text,
407
+ padding="max_length",
408
+ max_length=self.tokenizer.model_max_length,
409
+ truncation=True,
410
+ return_tensors="pt",
411
+ )
412
+ with torch.no_grad():
413
+ embed = self.text_encoder(text_input.input_ids.to(self.device))[0]
414
+ return embed
415
+
416
+ def get_noise(self, seed, dtype=torch.float32, height=512, width=512):
417
+ """Takes in random seed and returns corresponding noise vector"""
418
+ return torch.randn(
419
+ (1, self.unet.in_channels, height // 8, width // 8),
420
+ generator=torch.Generator(device=self.device).manual_seed(seed),
421
+ device=self.device,
422
+ dtype=dtype,
423
+ )
424
+
425
+ def walk(
426
+ self,
427
+ prompts: List[str],
428
+ seeds: List[int],
429
+ num_interpolation_steps: Optional[int] = 6,
430
+ output_dir: Optional[str] = "./dreams",
431
+ name: Optional[str] = None,
432
+ batch_size: Optional[int] = 1,
433
+ height: Optional[int] = 512,
434
+ width: Optional[int] = 512,
435
+ guidance_scale: Optional[float] = 7.5,
436
+ num_inference_steps: Optional[int] = 50,
437
+ eta: Optional[float] = 0.0,
438
+ ) -> List[str]:
439
+ """
440
+ Walks through a series of prompts and seeds, interpolating between them and saving the results to disk.
441
+
442
+ Args:
443
+ prompts (`List[str]`):
444
+ List of prompts to generate images for.
445
+ seeds (`List[int]`):
446
+ List of seeds corresponding to provided prompts. Must be the same length as prompts.
447
+ num_interpolation_steps (`int`, *optional*, defaults to 6):
448
+ Number of interpolation steps to take between prompts.
449
+ output_dir (`str`, *optional*, defaults to `./dreams`):
450
+ Directory to save the generated images to.
451
+ name (`str`, *optional*, defaults to `None`):
452
+ Subdirectory of `output_dir` to save the generated images to. If `None`, the name will
453
+ be the current time.
454
+ batch_size (`int`, *optional*, defaults to 1):
455
+ Number of images to generate at once.
456
+ height (`int`, *optional*, defaults to 512):
457
+ Height of the generated images.
458
+ width (`int`, *optional*, defaults to 512):
459
+ Width of the generated images.
460
+ guidance_scale (`float`, *optional*, defaults to 7.5):
461
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
462
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
463
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
464
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
465
+ usually at the expense of lower image quality.
466
+ num_inference_steps (`int`, *optional*, defaults to 50):
467
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
468
+ expense of slower inference.
469
+ eta (`float`, *optional*, defaults to 0.0):
470
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
471
+ [`schedulers.DDIMScheduler`], will be ignored for others.
472
+
473
+ Returns:
474
+ `List[str]`: List of paths to the generated images.
475
+ """
476
+ if not len(prompts) == len(seeds):
477
+ raise ValueError(
478
+ f"Number of prompts and seeds must be equalGot {len(prompts)} prompts and {len(seeds)} seeds"
479
+ )
480
+
481
+ name = name or time.strftime("%Y%m%d-%H%M%S")
482
+ save_path = Path(output_dir) / name
483
+ save_path.mkdir(exist_ok=True, parents=True)
484
+
485
+ frame_idx = 0
486
+ frame_filepaths = []
487
+ for prompt_a, prompt_b, seed_a, seed_b in zip(prompts, prompts[1:], seeds, seeds[1:]):
488
+ # Embed Text
489
+ embed_a = self.embed_text(prompt_a)
490
+ embed_b = self.embed_text(prompt_b)
491
+
492
+ # Get Noise
493
+ noise_dtype = embed_a.dtype
494
+ noise_a = self.get_noise(seed_a, noise_dtype, height, width)
495
+ noise_b = self.get_noise(seed_b, noise_dtype, height, width)
496
+
497
+ noise_batch, embeds_batch = None, None
498
+ T = np.linspace(0.0, 1.0, num_interpolation_steps)
499
+ for i, t in enumerate(T):
500
+ noise = slerp(float(t), noise_a, noise_b)
501
+ embed = torch.lerp(embed_a, embed_b, t)
502
+
503
+ noise_batch = noise if noise_batch is None else torch.cat([noise_batch, noise], dim=0)
504
+ embeds_batch = embed if embeds_batch is None else torch.cat([embeds_batch, embed], dim=0)
505
+
506
+ batch_is_ready = embeds_batch.shape[0] == batch_size or i + 1 == T.shape[0]
507
+ if batch_is_ready:
508
+ outputs = self(
509
+ latents=noise_batch,
510
+ text_embeddings=embeds_batch,
511
+ height=height,
512
+ width=width,
513
+ guidance_scale=guidance_scale,
514
+ eta=eta,
515
+ num_inference_steps=num_inference_steps,
516
+ )
517
+ noise_batch, embeds_batch = None, None
518
+
519
+ for image in outputs["images"]:
520
+ frame_filepath = str(save_path / f"frame_{frame_idx:06d}.png")
521
+ image.save(frame_filepath)
522
+ frame_filepaths.append(frame_filepath)
523
+ frame_idx += 1
524
+ return frame_filepaths
huggingface_diffusers/examples/community/lpw_stable_diffusion.py ADDED
@@ -0,0 +1,1162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ from typing import Callable, List, Optional, Union
4
+
5
+ import numpy as np
6
+ import torch
7
+
8
+ import diffusers
9
+ import PIL
10
+ from diffusers import SchedulerMixin, StableDiffusionPipeline
11
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
13
+ from diffusers.utils import deprecate, logging
14
+ from packaging import version
15
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
16
+
17
+
18
+ try:
19
+ from diffusers.utils import PIL_INTERPOLATION
20
+ except ImportError:
21
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
22
+ PIL_INTERPOLATION = {
23
+ "linear": PIL.Image.Resampling.BILINEAR,
24
+ "bilinear": PIL.Image.Resampling.BILINEAR,
25
+ "bicubic": PIL.Image.Resampling.BICUBIC,
26
+ "lanczos": PIL.Image.Resampling.LANCZOS,
27
+ "nearest": PIL.Image.Resampling.NEAREST,
28
+ }
29
+ else:
30
+ PIL_INTERPOLATION = {
31
+ "linear": PIL.Image.LINEAR,
32
+ "bilinear": PIL.Image.BILINEAR,
33
+ "bicubic": PIL.Image.BICUBIC,
34
+ "lanczos": PIL.Image.LANCZOS,
35
+ "nearest": PIL.Image.NEAREST,
36
+ }
37
+ # ------------------------------------------------------------------------------
38
+
39
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
40
+
41
+ re_attention = re.compile(
42
+ r"""
43
+ \\\(|
44
+ \\\)|
45
+ \\\[|
46
+ \\]|
47
+ \\\\|
48
+ \\|
49
+ \(|
50
+ \[|
51
+ :([+-]?[.\d]+)\)|
52
+ \)|
53
+ ]|
54
+ [^\\()\[\]:]+|
55
+ :
56
+ """,
57
+ re.X,
58
+ )
59
+
60
+
61
+ def parse_prompt_attention(text):
62
+ """
63
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
64
+ Accepted tokens are:
65
+ (abc) - increases attention to abc by a multiplier of 1.1
66
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
67
+ [abc] - decreases attention to abc by a multiplier of 1.1
68
+ \( - literal character '('
69
+ \[ - literal character '['
70
+ \) - literal character ')'
71
+ \] - literal character ']'
72
+ \\ - literal character '\'
73
+ anything else - just text
74
+ >>> parse_prompt_attention('normal text')
75
+ [['normal text', 1.0]]
76
+ >>> parse_prompt_attention('an (important) word')
77
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
78
+ >>> parse_prompt_attention('(unbalanced')
79
+ [['unbalanced', 1.1]]
80
+ >>> parse_prompt_attention('\(literal\]')
81
+ [['(literal]', 1.0]]
82
+ >>> parse_prompt_attention('(unnecessary)(parens)')
83
+ [['unnecessaryparens', 1.1]]
84
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
85
+ [['a ', 1.0],
86
+ ['house', 1.5730000000000004],
87
+ [' ', 1.1],
88
+ ['on', 1.0],
89
+ [' a ', 1.1],
90
+ ['hill', 0.55],
91
+ [', sun, ', 1.1],
92
+ ['sky', 1.4641000000000006],
93
+ ['.', 1.1]]
94
+ """
95
+
96
+ res = []
97
+ round_brackets = []
98
+ square_brackets = []
99
+
100
+ round_bracket_multiplier = 1.1
101
+ square_bracket_multiplier = 1 / 1.1
102
+
103
+ def multiply_range(start_position, multiplier):
104
+ for p in range(start_position, len(res)):
105
+ res[p][1] *= multiplier
106
+
107
+ for m in re_attention.finditer(text):
108
+ text = m.group(0)
109
+ weight = m.group(1)
110
+
111
+ if text.startswith("\\"):
112
+ res.append([text[1:], 1.0])
113
+ elif text == "(":
114
+ round_brackets.append(len(res))
115
+ elif text == "[":
116
+ square_brackets.append(len(res))
117
+ elif weight is not None and len(round_brackets) > 0:
118
+ multiply_range(round_brackets.pop(), float(weight))
119
+ elif text == ")" and len(round_brackets) > 0:
120
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
121
+ elif text == "]" and len(square_brackets) > 0:
122
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
123
+ else:
124
+ res.append([text, 1.0])
125
+
126
+ for pos in round_brackets:
127
+ multiply_range(pos, round_bracket_multiplier)
128
+
129
+ for pos in square_brackets:
130
+ multiply_range(pos, square_bracket_multiplier)
131
+
132
+ if len(res) == 0:
133
+ res = [["", 1.0]]
134
+
135
+ # merge runs of identical weights
136
+ i = 0
137
+ while i + 1 < len(res):
138
+ if res[i][1] == res[i + 1][1]:
139
+ res[i][0] += res[i + 1][0]
140
+ res.pop(i + 1)
141
+ else:
142
+ i += 1
143
+
144
+ return res
145
+
146
+
147
+ def get_prompts_with_weights(pipe: StableDiffusionPipeline, prompt: List[str], max_length: int):
148
+ r"""
149
+ Tokenize a list of prompts and return its tokens with weights of each token.
150
+
151
+ No padding, starting or ending token is included.
152
+ """
153
+ tokens = []
154
+ weights = []
155
+ truncated = False
156
+ for text in prompt:
157
+ texts_and_weights = parse_prompt_attention(text)
158
+ text_token = []
159
+ text_weight = []
160
+ for word, weight in texts_and_weights:
161
+ # tokenize and discard the starting and the ending token
162
+ token = pipe.tokenizer(word).input_ids[1:-1]
163
+ text_token += token
164
+ # copy the weight by length of token
165
+ text_weight += [weight] * len(token)
166
+ # stop if the text is too long (longer than truncation limit)
167
+ if len(text_token) > max_length:
168
+ truncated = True
169
+ break
170
+ # truncate
171
+ if len(text_token) > max_length:
172
+ truncated = True
173
+ text_token = text_token[:max_length]
174
+ text_weight = text_weight[:max_length]
175
+ tokens.append(text_token)
176
+ weights.append(text_weight)
177
+ if truncated:
178
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
179
+ return tokens, weights
180
+
181
+
182
+ def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, no_boseos_middle=True, chunk_length=77):
183
+ r"""
184
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
185
+ """
186
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
187
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
188
+ for i in range(len(tokens)):
189
+ tokens[i] = [bos] + tokens[i] + [eos] * (max_length - 1 - len(tokens[i]))
190
+ if no_boseos_middle:
191
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
192
+ else:
193
+ w = []
194
+ if len(weights[i]) == 0:
195
+ w = [1.0] * weights_length
196
+ else:
197
+ for j in range(max_embeddings_multiples):
198
+ w.append(1.0) # weight for starting token in this chunk
199
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
200
+ w.append(1.0) # weight for ending token in this chunk
201
+ w += [1.0] * (weights_length - len(w))
202
+ weights[i] = w[:]
203
+
204
+ return tokens, weights
205
+
206
+
207
+ def get_unweighted_text_embeddings(
208
+ pipe: StableDiffusionPipeline,
209
+ text_input: torch.Tensor,
210
+ chunk_length: int,
211
+ no_boseos_middle: Optional[bool] = True,
212
+ ):
213
+ """
214
+ When the length of tokens is a multiple of the capacity of the text encoder,
215
+ it should be split into chunks and sent to the text encoder individually.
216
+ """
217
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
218
+ if max_embeddings_multiples > 1:
219
+ text_embeddings = []
220
+ for i in range(max_embeddings_multiples):
221
+ # extract the i-th chunk
222
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
223
+
224
+ # cover the head and the tail by the starting and the ending tokens
225
+ text_input_chunk[:, 0] = text_input[0, 0]
226
+ text_input_chunk[:, -1] = text_input[0, -1]
227
+ text_embedding = pipe.text_encoder(text_input_chunk)[0]
228
+
229
+ if no_boseos_middle:
230
+ if i == 0:
231
+ # discard the ending token
232
+ text_embedding = text_embedding[:, :-1]
233
+ elif i == max_embeddings_multiples - 1:
234
+ # discard the starting token
235
+ text_embedding = text_embedding[:, 1:]
236
+ else:
237
+ # discard both starting and ending tokens
238
+ text_embedding = text_embedding[:, 1:-1]
239
+
240
+ text_embeddings.append(text_embedding)
241
+ text_embeddings = torch.concat(text_embeddings, axis=1)
242
+ else:
243
+ text_embeddings = pipe.text_encoder(text_input)[0]
244
+ return text_embeddings
245
+
246
+
247
+ def get_weighted_text_embeddings(
248
+ pipe: StableDiffusionPipeline,
249
+ prompt: Union[str, List[str]],
250
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
251
+ max_embeddings_multiples: Optional[int] = 3,
252
+ no_boseos_middle: Optional[bool] = False,
253
+ skip_parsing: Optional[bool] = False,
254
+ skip_weighting: Optional[bool] = False,
255
+ **kwargs,
256
+ ):
257
+ r"""
258
+ Prompts can be assigned with local weights using brackets. For example,
259
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
260
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
261
+
262
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
263
+
264
+ Args:
265
+ pipe (`StableDiffusionPipeline`):
266
+ Pipe to provide access to the tokenizer and the text encoder.
267
+ prompt (`str` or `List[str]`):
268
+ The prompt or prompts to guide the image generation.
269
+ uncond_prompt (`str` or `List[str]`):
270
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
271
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
272
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
273
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
274
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
275
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
276
+ ending token in each of the chunk in the middle.
277
+ skip_parsing (`bool`, *optional*, defaults to `False`):
278
+ Skip the parsing of brackets.
279
+ skip_weighting (`bool`, *optional*, defaults to `False`):
280
+ Skip the weighting. When the parsing is skipped, it is forced True.
281
+ """
282
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
283
+ if isinstance(prompt, str):
284
+ prompt = [prompt]
285
+
286
+ if not skip_parsing:
287
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
288
+ if uncond_prompt is not None:
289
+ if isinstance(uncond_prompt, str):
290
+ uncond_prompt = [uncond_prompt]
291
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
292
+ else:
293
+ prompt_tokens = [
294
+ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
295
+ ]
296
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
297
+ if uncond_prompt is not None:
298
+ if isinstance(uncond_prompt, str):
299
+ uncond_prompt = [uncond_prompt]
300
+ uncond_tokens = [
301
+ token[1:-1]
302
+ for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
303
+ ]
304
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
305
+
306
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
307
+ max_length = max([len(token) for token in prompt_tokens])
308
+ if uncond_prompt is not None:
309
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
310
+
311
+ max_embeddings_multiples = min(
312
+ max_embeddings_multiples,
313
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
314
+ )
315
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
316
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
317
+
318
+ # pad the length of tokens and weights
319
+ bos = pipe.tokenizer.bos_token_id
320
+ eos = pipe.tokenizer.eos_token_id
321
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
322
+ prompt_tokens,
323
+ prompt_weights,
324
+ max_length,
325
+ bos,
326
+ eos,
327
+ no_boseos_middle=no_boseos_middle,
328
+ chunk_length=pipe.tokenizer.model_max_length,
329
+ )
330
+ prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
331
+ if uncond_prompt is not None:
332
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
333
+ uncond_tokens,
334
+ uncond_weights,
335
+ max_length,
336
+ bos,
337
+ eos,
338
+ no_boseos_middle=no_boseos_middle,
339
+ chunk_length=pipe.tokenizer.model_max_length,
340
+ )
341
+ uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
342
+
343
+ # get the embeddings
344
+ text_embeddings = get_unweighted_text_embeddings(
345
+ pipe,
346
+ prompt_tokens,
347
+ pipe.tokenizer.model_max_length,
348
+ no_boseos_middle=no_boseos_middle,
349
+ )
350
+ prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=pipe.device)
351
+ if uncond_prompt is not None:
352
+ uncond_embeddings = get_unweighted_text_embeddings(
353
+ pipe,
354
+ uncond_tokens,
355
+ pipe.tokenizer.model_max_length,
356
+ no_boseos_middle=no_boseos_middle,
357
+ )
358
+ uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=pipe.device)
359
+
360
+ # assign weights to the prompts and normalize in the sense of mean
361
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
362
+ if (not skip_parsing) and (not skip_weighting):
363
+ previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
364
+ text_embeddings *= prompt_weights.unsqueeze(-1)
365
+ current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
366
+ text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
367
+ if uncond_prompt is not None:
368
+ previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
369
+ uncond_embeddings *= uncond_weights.unsqueeze(-1)
370
+ current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
371
+ uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
372
+
373
+ if uncond_prompt is not None:
374
+ return text_embeddings, uncond_embeddings
375
+ return text_embeddings, None
376
+
377
+
378
+ def preprocess_image(image):
379
+ w, h = image.size
380
+ w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
381
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
382
+ image = np.array(image).astype(np.float32) / 255.0
383
+ image = image[None].transpose(0, 3, 1, 2)
384
+ image = torch.from_numpy(image)
385
+ return 2.0 * image - 1.0
386
+
387
+
388
+ def preprocess_mask(mask, scale_factor=8):
389
+ mask = mask.convert("L")
390
+ w, h = mask.size
391
+ w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
392
+ mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
393
+ mask = np.array(mask).astype(np.float32) / 255.0
394
+ mask = np.tile(mask, (4, 1, 1))
395
+ mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
396
+ mask = 1 - mask # repaint white, keep black
397
+ mask = torch.from_numpy(mask)
398
+ return mask
399
+
400
+
401
+ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
402
+ r"""
403
+ Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
404
+ weighting in prompt.
405
+
406
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
407
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
408
+
409
+ Args:
410
+ vae ([`AutoencoderKL`]):
411
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
412
+ text_encoder ([`CLIPTextModel`]):
413
+ Frozen text-encoder. Stable Diffusion uses the text portion of
414
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
415
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
416
+ tokenizer (`CLIPTokenizer`):
417
+ Tokenizer of class
418
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
419
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
420
+ scheduler ([`SchedulerMixin`]):
421
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
422
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
423
+ safety_checker ([`StableDiffusionSafetyChecker`]):
424
+ Classification module that estimates whether generated images could be considered offensive or harmful.
425
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
426
+ feature_extractor ([`CLIPFeatureExtractor`]):
427
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
428
+ """
429
+
430
+ if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"):
431
+
432
+ def __init__(
433
+ self,
434
+ vae: AutoencoderKL,
435
+ text_encoder: CLIPTextModel,
436
+ tokenizer: CLIPTokenizer,
437
+ unet: UNet2DConditionModel,
438
+ scheduler: SchedulerMixin,
439
+ safety_checker: StableDiffusionSafetyChecker,
440
+ feature_extractor: CLIPFeatureExtractor,
441
+ requires_safety_checker: bool = True,
442
+ ):
443
+ super().__init__(
444
+ vae=vae,
445
+ text_encoder=text_encoder,
446
+ tokenizer=tokenizer,
447
+ unet=unet,
448
+ scheduler=scheduler,
449
+ safety_checker=safety_checker,
450
+ feature_extractor=feature_extractor,
451
+ requires_safety_checker=requires_safety_checker,
452
+ )
453
+ self.__init__additional__()
454
+
455
+ else:
456
+
457
+ def __init__(
458
+ self,
459
+ vae: AutoencoderKL,
460
+ text_encoder: CLIPTextModel,
461
+ tokenizer: CLIPTokenizer,
462
+ unet: UNet2DConditionModel,
463
+ scheduler: SchedulerMixin,
464
+ safety_checker: StableDiffusionSafetyChecker,
465
+ feature_extractor: CLIPFeatureExtractor,
466
+ ):
467
+ super().__init__(
468
+ vae=vae,
469
+ text_encoder=text_encoder,
470
+ tokenizer=tokenizer,
471
+ unet=unet,
472
+ scheduler=scheduler,
473
+ safety_checker=safety_checker,
474
+ feature_extractor=feature_extractor,
475
+ )
476
+ self.__init__additional__()
477
+
478
+ def __init__additional__(self):
479
+ if not hasattr(self, "vae_scale_factor"):
480
+ setattr(self, "vae_scale_factor", 2 ** (len(self.vae.config.block_out_channels) - 1))
481
+
482
+ @property
483
+ def _execution_device(self):
484
+ r"""
485
+ Returns the device on which the pipeline's models will be executed. After calling
486
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
487
+ hooks.
488
+ """
489
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
490
+ return self.device
491
+ for module in self.unet.modules():
492
+ if (
493
+ hasattr(module, "_hf_hook")
494
+ and hasattr(module._hf_hook, "execution_device")
495
+ and module._hf_hook.execution_device is not None
496
+ ):
497
+ return torch.device(module._hf_hook.execution_device)
498
+ return self.device
499
+
500
+ def _encode_prompt(
501
+ self,
502
+ prompt,
503
+ device,
504
+ num_images_per_prompt,
505
+ do_classifier_free_guidance,
506
+ negative_prompt,
507
+ max_embeddings_multiples,
508
+ ):
509
+ r"""
510
+ Encodes the prompt into text encoder hidden states.
511
+
512
+ Args:
513
+ prompt (`str` or `list(int)`):
514
+ prompt to be encoded
515
+ device: (`torch.device`):
516
+ torch device
517
+ num_images_per_prompt (`int`):
518
+ number of images that should be generated per prompt
519
+ do_classifier_free_guidance (`bool`):
520
+ whether to use classifier free guidance or not
521
+ negative_prompt (`str` or `List[str]`):
522
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
523
+ if `guidance_scale` is less than `1`).
524
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
525
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
526
+ """
527
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
528
+
529
+ if negative_prompt is None:
530
+ negative_prompt = [""] * batch_size
531
+ elif isinstance(negative_prompt, str):
532
+ negative_prompt = [negative_prompt] * batch_size
533
+ if batch_size != len(negative_prompt):
534
+ raise ValueError(
535
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
536
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
537
+ " the batch size of `prompt`."
538
+ )
539
+
540
+ text_embeddings, uncond_embeddings = get_weighted_text_embeddings(
541
+ pipe=self,
542
+ prompt=prompt,
543
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
544
+ max_embeddings_multiples=max_embeddings_multiples,
545
+ )
546
+ bs_embed, seq_len, _ = text_embeddings.shape
547
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
548
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
549
+
550
+ if do_classifier_free_guidance:
551
+ bs_embed, seq_len, _ = uncond_embeddings.shape
552
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
553
+ uncond_embeddings = uncond_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
554
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
555
+
556
+ return text_embeddings
557
+
558
+ def check_inputs(self, prompt, height, width, strength, callback_steps):
559
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
560
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
561
+
562
+ if strength < 0 or strength > 1:
563
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
564
+
565
+ if height % 8 != 0 or width % 8 != 0:
566
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
567
+
568
+ if (callback_steps is None) or (
569
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
570
+ ):
571
+ raise ValueError(
572
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
573
+ f" {type(callback_steps)}."
574
+ )
575
+
576
+ def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
577
+ if is_text2img:
578
+ return self.scheduler.timesteps.to(device), num_inference_steps
579
+ else:
580
+ # get the original timestep using init_timestep
581
+ offset = self.scheduler.config.get("steps_offset", 0)
582
+ init_timestep = int(num_inference_steps * strength) + offset
583
+ init_timestep = min(init_timestep, num_inference_steps)
584
+
585
+ t_start = max(num_inference_steps - init_timestep + offset, 0)
586
+ timesteps = self.scheduler.timesteps[t_start:].to(device)
587
+ return timesteps, num_inference_steps - t_start
588
+
589
+ def run_safety_checker(self, image, device, dtype):
590
+ if self.safety_checker is not None:
591
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
592
+ image, has_nsfw_concept = self.safety_checker(
593
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
594
+ )
595
+ else:
596
+ has_nsfw_concept = None
597
+ return image, has_nsfw_concept
598
+
599
+ def decode_latents(self, latents):
600
+ latents = 1 / 0.18215 * latents
601
+ image = self.vae.decode(latents).sample
602
+ image = (image / 2 + 0.5).clamp(0, 1)
603
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
604
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
605
+ return image
606
+
607
+ def prepare_extra_step_kwargs(self, generator, eta):
608
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
609
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
610
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
611
+ # and should be between [0, 1]
612
+
613
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
614
+ extra_step_kwargs = {}
615
+ if accepts_eta:
616
+ extra_step_kwargs["eta"] = eta
617
+
618
+ # check if the scheduler accepts generator
619
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
620
+ if accepts_generator:
621
+ extra_step_kwargs["generator"] = generator
622
+ return extra_step_kwargs
623
+
624
+ def prepare_latents(self, image, timestep, batch_size, height, width, dtype, device, generator, latents=None):
625
+ if image is None:
626
+ shape = (
627
+ batch_size,
628
+ self.unet.in_channels,
629
+ height // self.vae_scale_factor,
630
+ width // self.vae_scale_factor,
631
+ )
632
+
633
+ if latents is None:
634
+ if device.type == "mps":
635
+ # randn does not work reproducibly on mps
636
+ latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
637
+ else:
638
+ latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
639
+ else:
640
+ if latents.shape != shape:
641
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
642
+ latents = latents.to(device)
643
+
644
+ # scale the initial noise by the standard deviation required by the scheduler
645
+ latents = latents * self.scheduler.init_noise_sigma
646
+ return latents, None, None
647
+ else:
648
+ init_latent_dist = self.vae.encode(image).latent_dist
649
+ init_latents = init_latent_dist.sample(generator=generator)
650
+ init_latents = 0.18215 * init_latents
651
+ init_latents = torch.cat([init_latents] * batch_size, dim=0)
652
+ init_latents_orig = init_latents
653
+ shape = init_latents.shape
654
+
655
+ # add noise to latents using the timesteps
656
+ if device.type == "mps":
657
+ noise = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
658
+ else:
659
+ noise = torch.randn(shape, generator=generator, device=device, dtype=dtype)
660
+ latents = self.scheduler.add_noise(init_latents, noise, timestep)
661
+ return latents, init_latents_orig, noise
662
+
663
+ @torch.no_grad()
664
+ def __call__(
665
+ self,
666
+ prompt: Union[str, List[str]],
667
+ negative_prompt: Optional[Union[str, List[str]]] = None,
668
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
669
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
670
+ height: int = 512,
671
+ width: int = 512,
672
+ num_inference_steps: int = 50,
673
+ guidance_scale: float = 7.5,
674
+ strength: float = 0.8,
675
+ num_images_per_prompt: Optional[int] = 1,
676
+ eta: float = 0.0,
677
+ generator: Optional[torch.Generator] = None,
678
+ latents: Optional[torch.FloatTensor] = None,
679
+ max_embeddings_multiples: Optional[int] = 3,
680
+ output_type: Optional[str] = "pil",
681
+ return_dict: bool = True,
682
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
683
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
684
+ callback_steps: Optional[int] = 1,
685
+ **kwargs,
686
+ ):
687
+ r"""
688
+ Function invoked when calling the pipeline for generation.
689
+
690
+ Args:
691
+ prompt (`str` or `List[str]`):
692
+ The prompt or prompts to guide the image generation.
693
+ negative_prompt (`str` or `List[str]`, *optional*):
694
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
695
+ if `guidance_scale` is less than `1`).
696
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
697
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
698
+ process.
699
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
700
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
701
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
702
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
703
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
704
+ height (`int`, *optional*, defaults to 512):
705
+ The height in pixels of the generated image.
706
+ width (`int`, *optional*, defaults to 512):
707
+ The width in pixels of the generated image.
708
+ num_inference_steps (`int`, *optional*, defaults to 50):
709
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
710
+ expense of slower inference.
711
+ guidance_scale (`float`, *optional*, defaults to 7.5):
712
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
713
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
714
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
715
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
716
+ usually at the expense of lower image quality.
717
+ strength (`float`, *optional*, defaults to 0.8):
718
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
719
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
720
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
721
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
722
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
723
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
724
+ The number of images to generate per prompt.
725
+ eta (`float`, *optional*, defaults to 0.0):
726
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
727
+ [`schedulers.DDIMScheduler`], will be ignored for others.
728
+ generator (`torch.Generator`, *optional*):
729
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
730
+ deterministic.
731
+ latents (`torch.FloatTensor`, *optional*):
732
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
733
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
734
+ tensor will ge generated by sampling using the supplied random `generator`.
735
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
736
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
737
+ output_type (`str`, *optional*, defaults to `"pil"`):
738
+ The output format of the generate image. Choose between
739
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
740
+ return_dict (`bool`, *optional*, defaults to `True`):
741
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
742
+ plain tuple.
743
+ callback (`Callable`, *optional*):
744
+ A function that will be called every `callback_steps` steps during inference. The function will be
745
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
746
+ is_cancelled_callback (`Callable`, *optional*):
747
+ A function that will be called every `callback_steps` steps during inference. If the function returns
748
+ `True`, the inference will be cancelled.
749
+ callback_steps (`int`, *optional*, defaults to 1):
750
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
751
+ called at every step.
752
+
753
+ Returns:
754
+ `None` if cancelled by `is_cancelled_callback`,
755
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
756
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
757
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
758
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
759
+ (nsfw) content, according to the `safety_checker`.
760
+ """
761
+ message = "Please use `image` instead of `init_image`."
762
+ init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
763
+ image = init_image or image
764
+
765
+ # 0. Default height and width to unet
766
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
767
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
768
+
769
+ # 1. Check inputs. Raise error if not correct
770
+ self.check_inputs(prompt, height, width, strength, callback_steps)
771
+
772
+ # 2. Define call parameters
773
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
774
+ device = self._execution_device
775
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
776
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
777
+ # corresponds to doing no classifier free guidance.
778
+ do_classifier_free_guidance = guidance_scale > 1.0
779
+
780
+ # 3. Encode input prompt
781
+ text_embeddings = self._encode_prompt(
782
+ prompt,
783
+ device,
784
+ num_images_per_prompt,
785
+ do_classifier_free_guidance,
786
+ negative_prompt,
787
+ max_embeddings_multiples,
788
+ )
789
+ dtype = text_embeddings.dtype
790
+
791
+ # 4. Preprocess image and mask
792
+ if isinstance(image, PIL.Image.Image):
793
+ image = preprocess_image(image)
794
+ if image is not None:
795
+ image = image.to(device=self.device, dtype=dtype)
796
+ if isinstance(mask_image, PIL.Image.Image):
797
+ mask_image = preprocess_mask(mask_image, self.vae_scale_factor)
798
+ if mask_image is not None:
799
+ mask = mask_image.to(device=self.device, dtype=dtype)
800
+ mask = torch.cat([mask] * batch_size * num_images_per_prompt)
801
+ else:
802
+ mask = None
803
+
804
+ # 5. set timesteps
805
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
806
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
807
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
808
+
809
+ # 6. Prepare latent variables
810
+ latents, init_latents_orig, noise = self.prepare_latents(
811
+ image,
812
+ latent_timestep,
813
+ batch_size * num_images_per_prompt,
814
+ height,
815
+ width,
816
+ dtype,
817
+ device,
818
+ generator,
819
+ latents,
820
+ )
821
+
822
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
823
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
824
+
825
+ # 8. Denoising loop
826
+ for i, t in enumerate(self.progress_bar(timesteps)):
827
+ # expand the latents if we are doing classifier free guidance
828
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
829
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
830
+
831
+ # predict the noise residual
832
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
833
+
834
+ # perform guidance
835
+ if do_classifier_free_guidance:
836
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
837
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
838
+
839
+ # compute the previous noisy sample x_t -> x_t-1
840
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
841
+
842
+ if mask is not None:
843
+ # masking
844
+ init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
845
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
846
+
847
+ # call the callback, if provided
848
+ if i % callback_steps == 0:
849
+ if callback is not None:
850
+ callback(i, t, latents)
851
+ if is_cancelled_callback is not None and is_cancelled_callback():
852
+ return None
853
+
854
+ # 9. Post-processing
855
+ image = self.decode_latents(latents)
856
+
857
+ # 10. Run safety checker
858
+ image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
859
+
860
+ # 11. Convert to PIL
861
+ if output_type == "pil":
862
+ image = self.numpy_to_pil(image)
863
+
864
+ if not return_dict:
865
+ return image, has_nsfw_concept
866
+
867
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
868
+
869
+ def text2img(
870
+ self,
871
+ prompt: Union[str, List[str]],
872
+ negative_prompt: Optional[Union[str, List[str]]] = None,
873
+ height: int = 512,
874
+ width: int = 512,
875
+ num_inference_steps: int = 50,
876
+ guidance_scale: float = 7.5,
877
+ num_images_per_prompt: Optional[int] = 1,
878
+ eta: float = 0.0,
879
+ generator: Optional[torch.Generator] = None,
880
+ latents: Optional[torch.FloatTensor] = None,
881
+ max_embeddings_multiples: Optional[int] = 3,
882
+ output_type: Optional[str] = "pil",
883
+ return_dict: bool = True,
884
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
885
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
886
+ callback_steps: Optional[int] = 1,
887
+ **kwargs,
888
+ ):
889
+ r"""
890
+ Function for text-to-image generation.
891
+ Args:
892
+ prompt (`str` or `List[str]`):
893
+ The prompt or prompts to guide the image generation.
894
+ negative_prompt (`str` or `List[str]`, *optional*):
895
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
896
+ if `guidance_scale` is less than `1`).
897
+ height (`int`, *optional*, defaults to 512):
898
+ The height in pixels of the generated image.
899
+ width (`int`, *optional*, defaults to 512):
900
+ The width in pixels of the generated image.
901
+ num_inference_steps (`int`, *optional*, defaults to 50):
902
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
903
+ expense of slower inference.
904
+ guidance_scale (`float`, *optional*, defaults to 7.5):
905
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
906
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
907
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
908
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
909
+ usually at the expense of lower image quality.
910
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
911
+ The number of images to generate per prompt.
912
+ eta (`float`, *optional*, defaults to 0.0):
913
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
914
+ [`schedulers.DDIMScheduler`], will be ignored for others.
915
+ generator (`torch.Generator`, *optional*):
916
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
917
+ deterministic.
918
+ latents (`torch.FloatTensor`, *optional*):
919
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
920
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
921
+ tensor will ge generated by sampling using the supplied random `generator`.
922
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
923
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
924
+ output_type (`str`, *optional*, defaults to `"pil"`):
925
+ The output format of the generate image. Choose between
926
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
927
+ return_dict (`bool`, *optional*, defaults to `True`):
928
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
929
+ plain tuple.
930
+ callback (`Callable`, *optional*):
931
+ A function that will be called every `callback_steps` steps during inference. The function will be
932
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
933
+ is_cancelled_callback (`Callable`, *optional*):
934
+ A function that will be called every `callback_steps` steps during inference. If the function returns
935
+ `True`, the inference will be cancelled.
936
+ callback_steps (`int`, *optional*, defaults to 1):
937
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
938
+ called at every step.
939
+ Returns:
940
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
941
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
942
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
943
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
944
+ (nsfw) content, according to the `safety_checker`.
945
+ """
946
+ return self.__call__(
947
+ prompt=prompt,
948
+ negative_prompt=negative_prompt,
949
+ height=height,
950
+ width=width,
951
+ num_inference_steps=num_inference_steps,
952
+ guidance_scale=guidance_scale,
953
+ num_images_per_prompt=num_images_per_prompt,
954
+ eta=eta,
955
+ generator=generator,
956
+ latents=latents,
957
+ max_embeddings_multiples=max_embeddings_multiples,
958
+ output_type=output_type,
959
+ return_dict=return_dict,
960
+ callback=callback,
961
+ is_cancelled_callback=is_cancelled_callback,
962
+ callback_steps=callback_steps,
963
+ **kwargs,
964
+ )
965
+
966
+ def img2img(
967
+ self,
968
+ image: Union[torch.FloatTensor, PIL.Image.Image],
969
+ prompt: Union[str, List[str]],
970
+ negative_prompt: Optional[Union[str, List[str]]] = None,
971
+ strength: float = 0.8,
972
+ num_inference_steps: Optional[int] = 50,
973
+ guidance_scale: Optional[float] = 7.5,
974
+ num_images_per_prompt: Optional[int] = 1,
975
+ eta: Optional[float] = 0.0,
976
+ generator: Optional[torch.Generator] = None,
977
+ max_embeddings_multiples: Optional[int] = 3,
978
+ output_type: Optional[str] = "pil",
979
+ return_dict: bool = True,
980
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
981
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
982
+ callback_steps: Optional[int] = 1,
983
+ **kwargs,
984
+ ):
985
+ r"""
986
+ Function for image-to-image generation.
987
+ Args:
988
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
989
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
990
+ process.
991
+ prompt (`str` or `List[str]`):
992
+ The prompt or prompts to guide the image generation.
993
+ negative_prompt (`str` or `List[str]`, *optional*):
994
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
995
+ if `guidance_scale` is less than `1`).
996
+ strength (`float`, *optional*, defaults to 0.8):
997
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
998
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
999
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
1000
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
1001
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
1002
+ num_inference_steps (`int`, *optional*, defaults to 50):
1003
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1004
+ expense of slower inference. This parameter will be modulated by `strength`.
1005
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1006
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1007
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1008
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1009
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1010
+ usually at the expense of lower image quality.
1011
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1012
+ The number of images to generate per prompt.
1013
+ eta (`float`, *optional*, defaults to 0.0):
1014
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1015
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1016
+ generator (`torch.Generator`, *optional*):
1017
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1018
+ deterministic.
1019
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1020
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1021
+ output_type (`str`, *optional*, defaults to `"pil"`):
1022
+ The output format of the generate image. Choose between
1023
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1024
+ return_dict (`bool`, *optional*, defaults to `True`):
1025
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1026
+ plain tuple.
1027
+ callback (`Callable`, *optional*):
1028
+ A function that will be called every `callback_steps` steps during inference. The function will be
1029
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1030
+ is_cancelled_callback (`Callable`, *optional*):
1031
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1032
+ `True`, the inference will be cancelled.
1033
+ callback_steps (`int`, *optional*, defaults to 1):
1034
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1035
+ called at every step.
1036
+ Returns:
1037
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1038
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1039
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1040
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1041
+ (nsfw) content, according to the `safety_checker`.
1042
+ """
1043
+ return self.__call__(
1044
+ prompt=prompt,
1045
+ negative_prompt=negative_prompt,
1046
+ image=image,
1047
+ num_inference_steps=num_inference_steps,
1048
+ guidance_scale=guidance_scale,
1049
+ strength=strength,
1050
+ num_images_per_prompt=num_images_per_prompt,
1051
+ eta=eta,
1052
+ generator=generator,
1053
+ max_embeddings_multiples=max_embeddings_multiples,
1054
+ output_type=output_type,
1055
+ return_dict=return_dict,
1056
+ callback=callback,
1057
+ is_cancelled_callback=is_cancelled_callback,
1058
+ callback_steps=callback_steps,
1059
+ **kwargs,
1060
+ )
1061
+
1062
+ def inpaint(
1063
+ self,
1064
+ image: Union[torch.FloatTensor, PIL.Image.Image],
1065
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
1066
+ prompt: Union[str, List[str]],
1067
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1068
+ strength: float = 0.8,
1069
+ num_inference_steps: Optional[int] = 50,
1070
+ guidance_scale: Optional[float] = 7.5,
1071
+ num_images_per_prompt: Optional[int] = 1,
1072
+ eta: Optional[float] = 0.0,
1073
+ generator: Optional[torch.Generator] = None,
1074
+ max_embeddings_multiples: Optional[int] = 3,
1075
+ output_type: Optional[str] = "pil",
1076
+ return_dict: bool = True,
1077
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1078
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1079
+ callback_steps: Optional[int] = 1,
1080
+ **kwargs,
1081
+ ):
1082
+ r"""
1083
+ Function for inpaint.
1084
+ Args:
1085
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
1086
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1087
+ process. This is the image whose masked region will be inpainted.
1088
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
1089
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1090
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1091
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1092
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1093
+ prompt (`str` or `List[str]`):
1094
+ The prompt or prompts to guide the image generation.
1095
+ negative_prompt (`str` or `List[str]`, *optional*):
1096
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1097
+ if `guidance_scale` is less than `1`).
1098
+ strength (`float`, *optional*, defaults to 0.8):
1099
+ Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1100
+ is 1, the denoising process will be run on the masked area for the full number of iterations specified
1101
+ in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1102
+ noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1103
+ num_inference_steps (`int`, *optional*, defaults to 50):
1104
+ The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1105
+ the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1106
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1107
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1108
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1109
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1110
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1111
+ usually at the expense of lower image quality.
1112
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1113
+ The number of images to generate per prompt.
1114
+ eta (`float`, *optional*, defaults to 0.0):
1115
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1116
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1117
+ generator (`torch.Generator`, *optional*):
1118
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1119
+ deterministic.
1120
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1121
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1122
+ output_type (`str`, *optional*, defaults to `"pil"`):
1123
+ The output format of the generate image. Choose between
1124
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1125
+ return_dict (`bool`, *optional*, defaults to `True`):
1126
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1127
+ plain tuple.
1128
+ callback (`Callable`, *optional*):
1129
+ A function that will be called every `callback_steps` steps during inference. The function will be
1130
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1131
+ is_cancelled_callback (`Callable`, *optional*):
1132
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1133
+ `True`, the inference will be cancelled.
1134
+ callback_steps (`int`, *optional*, defaults to 1):
1135
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1136
+ called at every step.
1137
+ Returns:
1138
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1139
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1140
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1141
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1142
+ (nsfw) content, according to the `safety_checker`.
1143
+ """
1144
+ return self.__call__(
1145
+ prompt=prompt,
1146
+ negative_prompt=negative_prompt,
1147
+ image=image,
1148
+ mask_image=mask_image,
1149
+ num_inference_steps=num_inference_steps,
1150
+ guidance_scale=guidance_scale,
1151
+ strength=strength,
1152
+ num_images_per_prompt=num_images_per_prompt,
1153
+ eta=eta,
1154
+ generator=generator,
1155
+ max_embeddings_multiples=max_embeddings_multiples,
1156
+ output_type=output_type,
1157
+ return_dict=return_dict,
1158
+ callback=callback,
1159
+ is_cancelled_callback=is_cancelled_callback,
1160
+ callback_steps=callback_steps,
1161
+ **kwargs,
1162
+ )
huggingface_diffusers/examples/community/lpw_stable_diffusion_onnx.py ADDED
@@ -0,0 +1,1148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ from typing import Callable, List, Optional, Union
4
+
5
+ import numpy as np
6
+ import torch
7
+
8
+ import diffusers
9
+ import PIL
10
+ from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin
11
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
12
+ from diffusers.utils import deprecate, logging
13
+ from packaging import version
14
+ from transformers import CLIPFeatureExtractor, CLIPTokenizer
15
+
16
+
17
+ try:
18
+ from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE
19
+ except ImportError:
20
+ ORT_TO_NP_TYPE = {
21
+ "tensor(bool)": np.bool_,
22
+ "tensor(int8)": np.int8,
23
+ "tensor(uint8)": np.uint8,
24
+ "tensor(int16)": np.int16,
25
+ "tensor(uint16)": np.uint16,
26
+ "tensor(int32)": np.int32,
27
+ "tensor(uint32)": np.uint32,
28
+ "tensor(int64)": np.int64,
29
+ "tensor(uint64)": np.uint64,
30
+ "tensor(float16)": np.float16,
31
+ "tensor(float)": np.float32,
32
+ "tensor(double)": np.float64,
33
+ }
34
+
35
+ try:
36
+ from diffusers.utils import PIL_INTERPOLATION
37
+ except ImportError:
38
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
39
+ PIL_INTERPOLATION = {
40
+ "linear": PIL.Image.Resampling.BILINEAR,
41
+ "bilinear": PIL.Image.Resampling.BILINEAR,
42
+ "bicubic": PIL.Image.Resampling.BICUBIC,
43
+ "lanczos": PIL.Image.Resampling.LANCZOS,
44
+ "nearest": PIL.Image.Resampling.NEAREST,
45
+ }
46
+ else:
47
+ PIL_INTERPOLATION = {
48
+ "linear": PIL.Image.LINEAR,
49
+ "bilinear": PIL.Image.BILINEAR,
50
+ "bicubic": PIL.Image.BICUBIC,
51
+ "lanczos": PIL.Image.LANCZOS,
52
+ "nearest": PIL.Image.NEAREST,
53
+ }
54
+ # ------------------------------------------------------------------------------
55
+
56
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
57
+
58
+ re_attention = re.compile(
59
+ r"""
60
+ \\\(|
61
+ \\\)|
62
+ \\\[|
63
+ \\]|
64
+ \\\\|
65
+ \\|
66
+ \(|
67
+ \[|
68
+ :([+-]?[.\d]+)\)|
69
+ \)|
70
+ ]|
71
+ [^\\()\[\]:]+|
72
+ :
73
+ """,
74
+ re.X,
75
+ )
76
+
77
+
78
+ def parse_prompt_attention(text):
79
+ """
80
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
81
+ Accepted tokens are:
82
+ (abc) - increases attention to abc by a multiplier of 1.1
83
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
84
+ [abc] - decreases attention to abc by a multiplier of 1.1
85
+ \( - literal character '('
86
+ \[ - literal character '['
87
+ \) - literal character ')'
88
+ \] - literal character ']'
89
+ \\ - literal character '\'
90
+ anything else - just text
91
+ >>> parse_prompt_attention('normal text')
92
+ [['normal text', 1.0]]
93
+ >>> parse_prompt_attention('an (important) word')
94
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
95
+ >>> parse_prompt_attention('(unbalanced')
96
+ [['unbalanced', 1.1]]
97
+ >>> parse_prompt_attention('\(literal\]')
98
+ [['(literal]', 1.0]]
99
+ >>> parse_prompt_attention('(unnecessary)(parens)')
100
+ [['unnecessaryparens', 1.1]]
101
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
102
+ [['a ', 1.0],
103
+ ['house', 1.5730000000000004],
104
+ [' ', 1.1],
105
+ ['on', 1.0],
106
+ [' a ', 1.1],
107
+ ['hill', 0.55],
108
+ [', sun, ', 1.1],
109
+ ['sky', 1.4641000000000006],
110
+ ['.', 1.1]]
111
+ """
112
+
113
+ res = []
114
+ round_brackets = []
115
+ square_brackets = []
116
+
117
+ round_bracket_multiplier = 1.1
118
+ square_bracket_multiplier = 1 / 1.1
119
+
120
+ def multiply_range(start_position, multiplier):
121
+ for p in range(start_position, len(res)):
122
+ res[p][1] *= multiplier
123
+
124
+ for m in re_attention.finditer(text):
125
+ text = m.group(0)
126
+ weight = m.group(1)
127
+
128
+ if text.startswith("\\"):
129
+ res.append([text[1:], 1.0])
130
+ elif text == "(":
131
+ round_brackets.append(len(res))
132
+ elif text == "[":
133
+ square_brackets.append(len(res))
134
+ elif weight is not None and len(round_brackets) > 0:
135
+ multiply_range(round_brackets.pop(), float(weight))
136
+ elif text == ")" and len(round_brackets) > 0:
137
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
138
+ elif text == "]" and len(square_brackets) > 0:
139
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
140
+ else:
141
+ res.append([text, 1.0])
142
+
143
+ for pos in round_brackets:
144
+ multiply_range(pos, round_bracket_multiplier)
145
+
146
+ for pos in square_brackets:
147
+ multiply_range(pos, square_bracket_multiplier)
148
+
149
+ if len(res) == 0:
150
+ res = [["", 1.0]]
151
+
152
+ # merge runs of identical weights
153
+ i = 0
154
+ while i + 1 < len(res):
155
+ if res[i][1] == res[i + 1][1]:
156
+ res[i][0] += res[i + 1][0]
157
+ res.pop(i + 1)
158
+ else:
159
+ i += 1
160
+
161
+ return res
162
+
163
+
164
+ def get_prompts_with_weights(pipe, prompt: List[str], max_length: int):
165
+ r"""
166
+ Tokenize a list of prompts and return its tokens with weights of each token.
167
+
168
+ No padding, starting or ending token is included.
169
+ """
170
+ tokens = []
171
+ weights = []
172
+ truncated = False
173
+ for text in prompt:
174
+ texts_and_weights = parse_prompt_attention(text)
175
+ text_token = []
176
+ text_weight = []
177
+ for word, weight in texts_and_weights:
178
+ # tokenize and discard the starting and the ending token
179
+ token = pipe.tokenizer(word, return_tensors="np").input_ids[0, 1:-1]
180
+ text_token += list(token)
181
+ # copy the weight by length of token
182
+ text_weight += [weight] * len(token)
183
+ # stop if the text is too long (longer than truncation limit)
184
+ if len(text_token) > max_length:
185
+ truncated = True
186
+ break
187
+ # truncate
188
+ if len(text_token) > max_length:
189
+ truncated = True
190
+ text_token = text_token[:max_length]
191
+ text_weight = text_weight[:max_length]
192
+ tokens.append(text_token)
193
+ weights.append(text_weight)
194
+ if truncated:
195
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
196
+ return tokens, weights
197
+
198
+
199
+ def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, no_boseos_middle=True, chunk_length=77):
200
+ r"""
201
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
202
+ """
203
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
204
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
205
+ for i in range(len(tokens)):
206
+ tokens[i] = [bos] + tokens[i] + [eos] * (max_length - 1 - len(tokens[i]))
207
+ if no_boseos_middle:
208
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
209
+ else:
210
+ w = []
211
+ if len(weights[i]) == 0:
212
+ w = [1.0] * weights_length
213
+ else:
214
+ for j in range(max_embeddings_multiples):
215
+ w.append(1.0) # weight for starting token in this chunk
216
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
217
+ w.append(1.0) # weight for ending token in this chunk
218
+ w += [1.0] * (weights_length - len(w))
219
+ weights[i] = w[:]
220
+
221
+ return tokens, weights
222
+
223
+
224
+ def get_unweighted_text_embeddings(
225
+ pipe,
226
+ text_input: np.array,
227
+ chunk_length: int,
228
+ no_boseos_middle: Optional[bool] = True,
229
+ ):
230
+ """
231
+ When the length of tokens is a multiple of the capacity of the text encoder,
232
+ it should be split into chunks and sent to the text encoder individually.
233
+ """
234
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
235
+ if max_embeddings_multiples > 1:
236
+ text_embeddings = []
237
+ for i in range(max_embeddings_multiples):
238
+ # extract the i-th chunk
239
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].copy()
240
+
241
+ # cover the head and the tail by the starting and the ending tokens
242
+ text_input_chunk[:, 0] = text_input[0, 0]
243
+ text_input_chunk[:, -1] = text_input[0, -1]
244
+
245
+ text_embedding = pipe.text_encoder(input_ids=text_input_chunk)[0]
246
+
247
+ if no_boseos_middle:
248
+ if i == 0:
249
+ # discard the ending token
250
+ text_embedding = text_embedding[:, :-1]
251
+ elif i == max_embeddings_multiples - 1:
252
+ # discard the starting token
253
+ text_embedding = text_embedding[:, 1:]
254
+ else:
255
+ # discard both starting and ending tokens
256
+ text_embedding = text_embedding[:, 1:-1]
257
+
258
+ text_embeddings.append(text_embedding)
259
+ text_embeddings = np.concatenate(text_embeddings, axis=1)
260
+ else:
261
+ text_embeddings = pipe.text_encoder(input_ids=text_input)[0]
262
+ return text_embeddings
263
+
264
+
265
+ def get_weighted_text_embeddings(
266
+ pipe,
267
+ prompt: Union[str, List[str]],
268
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
269
+ max_embeddings_multiples: Optional[int] = 4,
270
+ no_boseos_middle: Optional[bool] = False,
271
+ skip_parsing: Optional[bool] = False,
272
+ skip_weighting: Optional[bool] = False,
273
+ **kwargs,
274
+ ):
275
+ r"""
276
+ Prompts can be assigned with local weights using brackets. For example,
277
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
278
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
279
+
280
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
281
+
282
+ Args:
283
+ pipe (`OnnxStableDiffusionPipeline`):
284
+ Pipe to provide access to the tokenizer and the text encoder.
285
+ prompt (`str` or `List[str]`):
286
+ The prompt or prompts to guide the image generation.
287
+ uncond_prompt (`str` or `List[str]`):
288
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
289
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
290
+ max_embeddings_multiples (`int`, *optional*, defaults to `1`):
291
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
292
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
293
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
294
+ ending token in each of the chunk in the middle.
295
+ skip_parsing (`bool`, *optional*, defaults to `False`):
296
+ Skip the parsing of brackets.
297
+ skip_weighting (`bool`, *optional*, defaults to `False`):
298
+ Skip the weighting. When the parsing is skipped, it is forced True.
299
+ """
300
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
301
+ if isinstance(prompt, str):
302
+ prompt = [prompt]
303
+
304
+ if not skip_parsing:
305
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
306
+ if uncond_prompt is not None:
307
+ if isinstance(uncond_prompt, str):
308
+ uncond_prompt = [uncond_prompt]
309
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
310
+ else:
311
+ prompt_tokens = [
312
+ token[1:-1]
313
+ for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True, return_tensors="np").input_ids
314
+ ]
315
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
316
+ if uncond_prompt is not None:
317
+ if isinstance(uncond_prompt, str):
318
+ uncond_prompt = [uncond_prompt]
319
+ uncond_tokens = [
320
+ token[1:-1]
321
+ for token in pipe.tokenizer(
322
+ uncond_prompt,
323
+ max_length=max_length,
324
+ truncation=True,
325
+ return_tensors="np",
326
+ ).input_ids
327
+ ]
328
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
329
+
330
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
331
+ max_length = max([len(token) for token in prompt_tokens])
332
+ if uncond_prompt is not None:
333
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
334
+
335
+ max_embeddings_multiples = min(
336
+ max_embeddings_multiples,
337
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
338
+ )
339
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
340
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
341
+
342
+ # pad the length of tokens and weights
343
+ bos = pipe.tokenizer.bos_token_id
344
+ eos = pipe.tokenizer.eos_token_id
345
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
346
+ prompt_tokens,
347
+ prompt_weights,
348
+ max_length,
349
+ bos,
350
+ eos,
351
+ no_boseos_middle=no_boseos_middle,
352
+ chunk_length=pipe.tokenizer.model_max_length,
353
+ )
354
+ prompt_tokens = np.array(prompt_tokens, dtype=np.int32)
355
+ if uncond_prompt is not None:
356
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
357
+ uncond_tokens,
358
+ uncond_weights,
359
+ max_length,
360
+ bos,
361
+ eos,
362
+ no_boseos_middle=no_boseos_middle,
363
+ chunk_length=pipe.tokenizer.model_max_length,
364
+ )
365
+ uncond_tokens = np.array(uncond_tokens, dtype=np.int32)
366
+
367
+ # get the embeddings
368
+ text_embeddings = get_unweighted_text_embeddings(
369
+ pipe,
370
+ prompt_tokens,
371
+ pipe.tokenizer.model_max_length,
372
+ no_boseos_middle=no_boseos_middle,
373
+ )
374
+ prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)
375
+ if uncond_prompt is not None:
376
+ uncond_embeddings = get_unweighted_text_embeddings(
377
+ pipe,
378
+ uncond_tokens,
379
+ pipe.tokenizer.model_max_length,
380
+ no_boseos_middle=no_boseos_middle,
381
+ )
382
+ uncond_weights = np.array(uncond_weights, dtype=uncond_embeddings.dtype)
383
+
384
+ # assign weights to the prompts and normalize in the sense of mean
385
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
386
+ if (not skip_parsing) and (not skip_weighting):
387
+ previous_mean = text_embeddings.mean(axis=(-2, -1))
388
+ text_embeddings *= prompt_weights[:, :, None]
389
+ text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]
390
+ if uncond_prompt is not None:
391
+ previous_mean = uncond_embeddings.mean(axis=(-2, -1))
392
+ uncond_embeddings *= uncond_weights[:, :, None]
393
+ uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=(-2, -1)))[:, None, None]
394
+
395
+ # For classifier free guidance, we need to do two forward passes.
396
+ # Here we concatenate the unconditional and text embeddings into a single batch
397
+ # to avoid doing two forward passes
398
+ if uncond_prompt is not None:
399
+ return text_embeddings, uncond_embeddings
400
+
401
+ return text_embeddings
402
+
403
+
404
+ def preprocess_image(image):
405
+ w, h = image.size
406
+ w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
407
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
408
+ image = np.array(image).astype(np.float32) / 255.0
409
+ image = image[None].transpose(0, 3, 1, 2)
410
+ return 2.0 * image - 1.0
411
+
412
+
413
+ def preprocess_mask(mask, scale_factor=8):
414
+ mask = mask.convert("L")
415
+ w, h = mask.size
416
+ w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
417
+ mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
418
+ mask = np.array(mask).astype(np.float32) / 255.0
419
+ mask = np.tile(mask, (4, 1, 1))
420
+ mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
421
+ mask = 1 - mask # repaint white, keep black
422
+ return mask
423
+
424
+
425
+ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline):
426
+ r"""
427
+ Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
428
+ weighting in prompt.
429
+
430
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
431
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
432
+ """
433
+
434
+ if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"):
435
+
436
+ def __init__(
437
+ self,
438
+ vae_encoder: OnnxRuntimeModel,
439
+ vae_decoder: OnnxRuntimeModel,
440
+ text_encoder: OnnxRuntimeModel,
441
+ tokenizer: CLIPTokenizer,
442
+ unet: OnnxRuntimeModel,
443
+ scheduler: SchedulerMixin,
444
+ safety_checker: OnnxRuntimeModel,
445
+ feature_extractor: CLIPFeatureExtractor,
446
+ requires_safety_checker: bool = True,
447
+ ):
448
+ super().__init__(
449
+ vae_encoder=vae_encoder,
450
+ vae_decoder=vae_decoder,
451
+ text_encoder=text_encoder,
452
+ tokenizer=tokenizer,
453
+ unet=unet,
454
+ scheduler=scheduler,
455
+ safety_checker=safety_checker,
456
+ feature_extractor=feature_extractor,
457
+ requires_safety_checker=requires_safety_checker,
458
+ )
459
+ self.__init__additional__()
460
+
461
+ else:
462
+
463
+ def __init__(
464
+ self,
465
+ vae_encoder: OnnxRuntimeModel,
466
+ vae_decoder: OnnxRuntimeModel,
467
+ text_encoder: OnnxRuntimeModel,
468
+ tokenizer: CLIPTokenizer,
469
+ unet: OnnxRuntimeModel,
470
+ scheduler: SchedulerMixin,
471
+ safety_checker: OnnxRuntimeModel,
472
+ feature_extractor: CLIPFeatureExtractor,
473
+ ):
474
+ super().__init__(
475
+ vae_encoder=vae_encoder,
476
+ vae_decoder=vae_decoder,
477
+ text_encoder=text_encoder,
478
+ tokenizer=tokenizer,
479
+ unet=unet,
480
+ scheduler=scheduler,
481
+ safety_checker=safety_checker,
482
+ feature_extractor=feature_extractor,
483
+ )
484
+ self.__init__additional__()
485
+
486
+ def __init__additional__(self):
487
+ self.unet_in_channels = 4
488
+ self.vae_scale_factor = 8
489
+
490
+ def _encode_prompt(
491
+ self,
492
+ prompt,
493
+ num_images_per_prompt,
494
+ do_classifier_free_guidance,
495
+ negative_prompt,
496
+ max_embeddings_multiples,
497
+ ):
498
+ r"""
499
+ Encodes the prompt into text encoder hidden states.
500
+
501
+ Args:
502
+ prompt (`str` or `list(int)`):
503
+ prompt to be encoded
504
+ num_images_per_prompt (`int`):
505
+ number of images that should be generated per prompt
506
+ do_classifier_free_guidance (`bool`):
507
+ whether to use classifier free guidance or not
508
+ negative_prompt (`str` or `List[str]`):
509
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
510
+ if `guidance_scale` is less than `1`).
511
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
512
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
513
+ """
514
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
515
+
516
+ if negative_prompt is None:
517
+ negative_prompt = [""] * batch_size
518
+ elif isinstance(negative_prompt, str):
519
+ negative_prompt = [negative_prompt] * batch_size
520
+ if batch_size != len(negative_prompt):
521
+ raise ValueError(
522
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
523
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
524
+ " the batch size of `prompt`."
525
+ )
526
+
527
+ text_embeddings, uncond_embeddings = get_weighted_text_embeddings(
528
+ pipe=self,
529
+ prompt=prompt,
530
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
531
+ max_embeddings_multiples=max_embeddings_multiples,
532
+ )
533
+
534
+ text_embeddings = text_embeddings.repeat(num_images_per_prompt, 0)
535
+ if do_classifier_free_guidance:
536
+ uncond_embeddings = uncond_embeddings.repeat(num_images_per_prompt, 0)
537
+ text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
538
+
539
+ return text_embeddings
540
+
541
+ def check_inputs(self, prompt, height, width, strength, callback_steps):
542
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
543
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
544
+
545
+ if strength < 0 or strength > 1:
546
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
547
+
548
+ if height % 8 != 0 or width % 8 != 0:
549
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
550
+
551
+ if (callback_steps is None) or (
552
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
553
+ ):
554
+ raise ValueError(
555
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
556
+ f" {type(callback_steps)}."
557
+ )
558
+
559
+ def get_timesteps(self, num_inference_steps, strength, is_text2img):
560
+ if is_text2img:
561
+ return self.scheduler.timesteps, num_inference_steps
562
+ else:
563
+ # get the original timestep using init_timestep
564
+ offset = self.scheduler.config.get("steps_offset", 0)
565
+ init_timestep = int(num_inference_steps * strength) + offset
566
+ init_timestep = min(init_timestep, num_inference_steps)
567
+
568
+ t_start = max(num_inference_steps - init_timestep + offset, 0)
569
+ timesteps = self.scheduler.timesteps[t_start:]
570
+ return timesteps, num_inference_steps - t_start
571
+
572
+ def run_safety_checker(self, image):
573
+ if self.safety_checker is not None:
574
+ safety_checker_input = self.feature_extractor(
575
+ self.numpy_to_pil(image), return_tensors="np"
576
+ ).pixel_values.astype(image.dtype)
577
+ # There will throw an error if use safety_checker directly and batchsize>1
578
+ images, has_nsfw_concept = [], []
579
+ for i in range(image.shape[0]):
580
+ image_i, has_nsfw_concept_i = self.safety_checker(
581
+ clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1]
582
+ )
583
+ images.append(image_i)
584
+ has_nsfw_concept.append(has_nsfw_concept_i[0])
585
+ image = np.concatenate(images)
586
+ else:
587
+ has_nsfw_concept = None
588
+ return image, has_nsfw_concept
589
+
590
+ def decode_latents(self, latents):
591
+ latents = 1 / 0.18215 * latents
592
+ # image = self.vae_decoder(latent_sample=latents)[0]
593
+ # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
594
+ image = np.concatenate(
595
+ [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
596
+ )
597
+ image = np.clip(image / 2 + 0.5, 0, 1)
598
+ image = image.transpose((0, 2, 3, 1))
599
+ return image
600
+
601
+ def prepare_extra_step_kwargs(self, generator, eta):
602
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
603
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
604
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
605
+ # and should be between [0, 1]
606
+
607
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
608
+ extra_step_kwargs = {}
609
+ if accepts_eta:
610
+ extra_step_kwargs["eta"] = eta
611
+
612
+ # check if the scheduler accepts generator
613
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
614
+ if accepts_generator:
615
+ extra_step_kwargs["generator"] = generator
616
+ return extra_step_kwargs
617
+
618
+ def prepare_latents(self, image, timestep, batch_size, height, width, dtype, generator, latents=None):
619
+ if image is None:
620
+ shape = (
621
+ batch_size,
622
+ self.unet_in_channels,
623
+ height // self.vae_scale_factor,
624
+ width // self.vae_scale_factor,
625
+ )
626
+
627
+ if latents is None:
628
+ latents = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
629
+ else:
630
+ if latents.shape != shape:
631
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
632
+
633
+ # scale the initial noise by the standard deviation required by the scheduler
634
+ latents = (torch.from_numpy(latents) * self.scheduler.init_noise_sigma).numpy()
635
+ return latents, None, None
636
+ else:
637
+ init_latents = self.vae_encoder(sample=image)[0]
638
+ init_latents = 0.18215 * init_latents
639
+ init_latents = np.concatenate([init_latents] * batch_size, axis=0)
640
+ init_latents_orig = init_latents
641
+ shape = init_latents.shape
642
+
643
+ # add noise to latents using the timesteps
644
+ noise = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
645
+ latents = self.scheduler.add_noise(
646
+ torch.from_numpy(init_latents), torch.from_numpy(noise), timestep
647
+ ).numpy()
648
+ return latents, init_latents_orig, noise
649
+
650
+ @torch.no_grad()
651
+ def __call__(
652
+ self,
653
+ prompt: Union[str, List[str]],
654
+ negative_prompt: Optional[Union[str, List[str]]] = None,
655
+ image: Union[np.ndarray, PIL.Image.Image] = None,
656
+ mask_image: Union[np.ndarray, PIL.Image.Image] = None,
657
+ height: int = 512,
658
+ width: int = 512,
659
+ num_inference_steps: int = 50,
660
+ guidance_scale: float = 7.5,
661
+ strength: float = 0.8,
662
+ num_images_per_prompt: Optional[int] = 1,
663
+ eta: float = 0.0,
664
+ generator: Optional[torch.Generator] = None,
665
+ latents: Optional[np.ndarray] = None,
666
+ max_embeddings_multiples: Optional[int] = 3,
667
+ output_type: Optional[str] = "pil",
668
+ return_dict: bool = True,
669
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
670
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
671
+ callback_steps: Optional[int] = 1,
672
+ **kwargs,
673
+ ):
674
+ r"""
675
+ Function invoked when calling the pipeline for generation.
676
+
677
+ Args:
678
+ prompt (`str` or `List[str]`):
679
+ The prompt or prompts to guide the image generation.
680
+ negative_prompt (`str` or `List[str]`, *optional*):
681
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
682
+ if `guidance_scale` is less than `1`).
683
+ image (`np.ndarray` or `PIL.Image.Image`):
684
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
685
+ process.
686
+ mask_image (`np.ndarray` or `PIL.Image.Image`):
687
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
688
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
689
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
690
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
691
+ height (`int`, *optional*, defaults to 512):
692
+ The height in pixels of the generated image.
693
+ width (`int`, *optional*, defaults to 512):
694
+ The width in pixels of the generated image.
695
+ num_inference_steps (`int`, *optional*, defaults to 50):
696
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
697
+ expense of slower inference.
698
+ guidance_scale (`float`, *optional*, defaults to 7.5):
699
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
700
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
701
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
702
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
703
+ usually at the expense of lower image quality.
704
+ strength (`float`, *optional*, defaults to 0.8):
705
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
706
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
707
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
708
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
709
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
710
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
711
+ The number of images to generate per prompt.
712
+ eta (`float`, *optional*, defaults to 0.0):
713
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
714
+ [`schedulers.DDIMScheduler`], will be ignored for others.
715
+ generator (`torch.Generator`, *optional*):
716
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
717
+ deterministic.
718
+ latents (`np.ndarray`, *optional*):
719
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
720
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
721
+ tensor will ge generated by sampling using the supplied random `generator`.
722
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
723
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
724
+ output_type (`str`, *optional*, defaults to `"pil"`):
725
+ The output format of the generate image. Choose between
726
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
727
+ return_dict (`bool`, *optional*, defaults to `True`):
728
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
729
+ plain tuple.
730
+ callback (`Callable`, *optional*):
731
+ A function that will be called every `callback_steps` steps during inference. The function will be
732
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
733
+ is_cancelled_callback (`Callable`, *optional*):
734
+ A function that will be called every `callback_steps` steps during inference. If the function returns
735
+ `True`, the inference will be cancelled.
736
+ callback_steps (`int`, *optional*, defaults to 1):
737
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
738
+ called at every step.
739
+
740
+ Returns:
741
+ `None` if cancelled by `is_cancelled_callback`,
742
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
743
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
744
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
745
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
746
+ (nsfw) content, according to the `safety_checker`.
747
+ """
748
+ message = "Please use `image` instead of `init_image`."
749
+ init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
750
+ image = init_image or image
751
+
752
+ # 0. Default height and width to unet
753
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
754
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
755
+
756
+ # 1. Check inputs. Raise error if not correct
757
+ self.check_inputs(prompt, height, width, strength, callback_steps)
758
+
759
+ # 2. Define call parameters
760
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
761
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
762
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
763
+ # corresponds to doing no classifier free guidance.
764
+ do_classifier_free_guidance = guidance_scale > 1.0
765
+
766
+ # 3. Encode input prompt
767
+ text_embeddings = self._encode_prompt(
768
+ prompt,
769
+ num_images_per_prompt,
770
+ do_classifier_free_guidance,
771
+ negative_prompt,
772
+ max_embeddings_multiples,
773
+ )
774
+ dtype = text_embeddings.dtype
775
+
776
+ # 4. Preprocess image and mask
777
+ if isinstance(image, PIL.Image.Image):
778
+ image = preprocess_image(image)
779
+ if image is not None:
780
+ image = image.astype(dtype)
781
+ if isinstance(mask_image, PIL.Image.Image):
782
+ mask_image = preprocess_mask(mask_image, self.vae_scale_factor)
783
+ if mask_image is not None:
784
+ mask = mask_image.astype(dtype)
785
+ mask = np.concatenate([mask] * batch_size * num_images_per_prompt)
786
+ else:
787
+ mask = None
788
+
789
+ # 5. set timesteps
790
+ self.scheduler.set_timesteps(num_inference_steps)
791
+ timestep_dtype = next(
792
+ (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
793
+ )
794
+ timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
795
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, image is None)
796
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
797
+
798
+ # 6. Prepare latent variables
799
+ latents, init_latents_orig, noise = self.prepare_latents(
800
+ image,
801
+ latent_timestep,
802
+ batch_size * num_images_per_prompt,
803
+ height,
804
+ width,
805
+ dtype,
806
+ generator,
807
+ latents,
808
+ )
809
+
810
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
811
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
812
+
813
+ # 8. Denoising loop
814
+ for i, t in enumerate(self.progress_bar(timesteps)):
815
+ # expand the latents if we are doing classifier free guidance
816
+ latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
817
+ latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t)
818
+ latent_model_input = latent_model_input.numpy()
819
+
820
+ # predict the noise residual
821
+ noise_pred = self.unet(
822
+ sample=latent_model_input,
823
+ timestep=np.array([t], dtype=timestep_dtype),
824
+ encoder_hidden_states=text_embeddings,
825
+ )
826
+ noise_pred = noise_pred[0]
827
+
828
+ # perform guidance
829
+ if do_classifier_free_guidance:
830
+ noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
831
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
832
+
833
+ # compute the previous noisy sample x_t -> x_t-1
834
+ scheduler_output = self.scheduler.step(
835
+ torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs
836
+ )
837
+ latents = scheduler_output.prev_sample.numpy()
838
+
839
+ if mask is not None:
840
+ # masking
841
+ init_latents_proper = self.scheduler.add_noise(
842
+ torch.from_numpy(init_latents_orig),
843
+ torch.from_numpy(noise),
844
+ t,
845
+ ).numpy()
846
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
847
+
848
+ # call the callback, if provided
849
+ if i % callback_steps == 0:
850
+ if callback is not None:
851
+ callback(i, t, latents)
852
+ if is_cancelled_callback is not None and is_cancelled_callback():
853
+ return None
854
+
855
+ # 9. Post-processing
856
+ image = self.decode_latents(latents)
857
+
858
+ # 10. Run safety checker
859
+ image, has_nsfw_concept = self.run_safety_checker(image)
860
+
861
+ # 11. Convert to PIL
862
+ if output_type == "pil":
863
+ image = self.numpy_to_pil(image)
864
+
865
+ if not return_dict:
866
+ return image, has_nsfw_concept
867
+
868
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
869
+
870
+ def text2img(
871
+ self,
872
+ prompt: Union[str, List[str]],
873
+ negative_prompt: Optional[Union[str, List[str]]] = None,
874
+ height: int = 512,
875
+ width: int = 512,
876
+ num_inference_steps: int = 50,
877
+ guidance_scale: float = 7.5,
878
+ num_images_per_prompt: Optional[int] = 1,
879
+ eta: float = 0.0,
880
+ generator: Optional[torch.Generator] = None,
881
+ latents: Optional[np.ndarray] = None,
882
+ max_embeddings_multiples: Optional[int] = 3,
883
+ output_type: Optional[str] = "pil",
884
+ return_dict: bool = True,
885
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
886
+ callback_steps: Optional[int] = 1,
887
+ **kwargs,
888
+ ):
889
+ r"""
890
+ Function for text-to-image generation.
891
+ Args:
892
+ prompt (`str` or `List[str]`):
893
+ The prompt or prompts to guide the image generation.
894
+ negative_prompt (`str` or `List[str]`, *optional*):
895
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
896
+ if `guidance_scale` is less than `1`).
897
+ height (`int`, *optional*, defaults to 512):
898
+ The height in pixels of the generated image.
899
+ width (`int`, *optional*, defaults to 512):
900
+ The width in pixels of the generated image.
901
+ num_inference_steps (`int`, *optional*, defaults to 50):
902
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
903
+ expense of slower inference.
904
+ guidance_scale (`float`, *optional*, defaults to 7.5):
905
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
906
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
907
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
908
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
909
+ usually at the expense of lower image quality.
910
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
911
+ The number of images to generate per prompt.
912
+ eta (`float`, *optional*, defaults to 0.0):
913
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
914
+ [`schedulers.DDIMScheduler`], will be ignored for others.
915
+ generator (`torch.Generator`, *optional*):
916
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
917
+ deterministic.
918
+ latents (`np.ndarray`, *optional*):
919
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
920
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
921
+ tensor will ge generated by sampling using the supplied random `generator`.
922
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
923
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
924
+ output_type (`str`, *optional*, defaults to `"pil"`):
925
+ The output format of the generate image. Choose between
926
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
927
+ return_dict (`bool`, *optional*, defaults to `True`):
928
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
929
+ plain tuple.
930
+ callback (`Callable`, *optional*):
931
+ A function that will be called every `callback_steps` steps during inference. The function will be
932
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
933
+ callback_steps (`int`, *optional*, defaults to 1):
934
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
935
+ called at every step.
936
+ Returns:
937
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
938
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
939
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
940
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
941
+ (nsfw) content, according to the `safety_checker`.
942
+ """
943
+ return self.__call__(
944
+ prompt=prompt,
945
+ negative_prompt=negative_prompt,
946
+ height=height,
947
+ width=width,
948
+ num_inference_steps=num_inference_steps,
949
+ guidance_scale=guidance_scale,
950
+ num_images_per_prompt=num_images_per_prompt,
951
+ eta=eta,
952
+ generator=generator,
953
+ latents=latents,
954
+ max_embeddings_multiples=max_embeddings_multiples,
955
+ output_type=output_type,
956
+ return_dict=return_dict,
957
+ callback=callback,
958
+ callback_steps=callback_steps,
959
+ **kwargs,
960
+ )
961
+
962
+ def img2img(
963
+ self,
964
+ image: Union[np.ndarray, PIL.Image.Image],
965
+ prompt: Union[str, List[str]],
966
+ negative_prompt: Optional[Union[str, List[str]]] = None,
967
+ strength: float = 0.8,
968
+ num_inference_steps: Optional[int] = 50,
969
+ guidance_scale: Optional[float] = 7.5,
970
+ num_images_per_prompt: Optional[int] = 1,
971
+ eta: Optional[float] = 0.0,
972
+ generator: Optional[torch.Generator] = None,
973
+ max_embeddings_multiples: Optional[int] = 3,
974
+ output_type: Optional[str] = "pil",
975
+ return_dict: bool = True,
976
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
977
+ callback_steps: Optional[int] = 1,
978
+ **kwargs,
979
+ ):
980
+ r"""
981
+ Function for image-to-image generation.
982
+ Args:
983
+ image (`np.ndarray` or `PIL.Image.Image`):
984
+ `Image`, or ndarray representing an image batch, that will be used as the starting point for the
985
+ process.
986
+ prompt (`str` or `List[str]`):
987
+ The prompt or prompts to guide the image generation.
988
+ negative_prompt (`str` or `List[str]`, *optional*):
989
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
990
+ if `guidance_scale` is less than `1`).
991
+ strength (`float`, *optional*, defaults to 0.8):
992
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
993
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
994
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
995
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
996
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
997
+ num_inference_steps (`int`, *optional*, defaults to 50):
998
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
999
+ expense of slower inference. This parameter will be modulated by `strength`.
1000
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1001
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1002
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1003
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1004
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1005
+ usually at the expense of lower image quality.
1006
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1007
+ The number of images to generate per prompt.
1008
+ eta (`float`, *optional*, defaults to 0.0):
1009
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1010
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1011
+ generator (`torch.Generator`, *optional*):
1012
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1013
+ deterministic.
1014
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1015
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1016
+ output_type (`str`, *optional*, defaults to `"pil"`):
1017
+ The output format of the generate image. Choose between
1018
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1019
+ return_dict (`bool`, *optional*, defaults to `True`):
1020
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1021
+ plain tuple.
1022
+ callback (`Callable`, *optional*):
1023
+ A function that will be called every `callback_steps` steps during inference. The function will be
1024
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
1025
+ callback_steps (`int`, *optional*, defaults to 1):
1026
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1027
+ called at every step.
1028
+ Returns:
1029
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1030
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1031
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1032
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1033
+ (nsfw) content, according to the `safety_checker`.
1034
+ """
1035
+ return self.__call__(
1036
+ prompt=prompt,
1037
+ negative_prompt=negative_prompt,
1038
+ image=image,
1039
+ num_inference_steps=num_inference_steps,
1040
+ guidance_scale=guidance_scale,
1041
+ strength=strength,
1042
+ num_images_per_prompt=num_images_per_prompt,
1043
+ eta=eta,
1044
+ generator=generator,
1045
+ max_embeddings_multiples=max_embeddings_multiples,
1046
+ output_type=output_type,
1047
+ return_dict=return_dict,
1048
+ callback=callback,
1049
+ callback_steps=callback_steps,
1050
+ **kwargs,
1051
+ )
1052
+
1053
+ def inpaint(
1054
+ self,
1055
+ image: Union[np.ndarray, PIL.Image.Image],
1056
+ mask_image: Union[np.ndarray, PIL.Image.Image],
1057
+ prompt: Union[str, List[str]],
1058
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1059
+ strength: float = 0.8,
1060
+ num_inference_steps: Optional[int] = 50,
1061
+ guidance_scale: Optional[float] = 7.5,
1062
+ num_images_per_prompt: Optional[int] = 1,
1063
+ eta: Optional[float] = 0.0,
1064
+ generator: Optional[torch.Generator] = None,
1065
+ max_embeddings_multiples: Optional[int] = 3,
1066
+ output_type: Optional[str] = "pil",
1067
+ return_dict: bool = True,
1068
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
1069
+ callback_steps: Optional[int] = 1,
1070
+ **kwargs,
1071
+ ):
1072
+ r"""
1073
+ Function for inpaint.
1074
+ Args:
1075
+ image (`np.ndarray` or `PIL.Image.Image`):
1076
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1077
+ process. This is the image whose masked region will be inpainted.
1078
+ mask_image (`np.ndarray` or `PIL.Image.Image`):
1079
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1080
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1081
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1082
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1083
+ prompt (`str` or `List[str]`):
1084
+ The prompt or prompts to guide the image generation.
1085
+ negative_prompt (`str` or `List[str]`, *optional*):
1086
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1087
+ if `guidance_scale` is less than `1`).
1088
+ strength (`float`, *optional*, defaults to 0.8):
1089
+ Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1090
+ is 1, the denoising process will be run on the masked area for the full number of iterations specified
1091
+ in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1092
+ noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1093
+ num_inference_steps (`int`, *optional*, defaults to 50):
1094
+ The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1095
+ the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1096
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1097
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1098
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1099
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1100
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1101
+ usually at the expense of lower image quality.
1102
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1103
+ The number of images to generate per prompt.
1104
+ eta (`float`, *optional*, defaults to 0.0):
1105
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1106
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1107
+ generator (`torch.Generator`, *optional*):
1108
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1109
+ deterministic.
1110
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1111
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1112
+ output_type (`str`, *optional*, defaults to `"pil"`):
1113
+ The output format of the generate image. Choose between
1114
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1115
+ return_dict (`bool`, *optional*, defaults to `True`):
1116
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1117
+ plain tuple.
1118
+ callback (`Callable`, *optional*):
1119
+ A function that will be called every `callback_steps` steps during inference. The function will be
1120
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
1121
+ callback_steps (`int`, *optional*, defaults to 1):
1122
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1123
+ called at every step.
1124
+ Returns:
1125
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1126
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1127
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1128
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1129
+ (nsfw) content, according to the `safety_checker`.
1130
+ """
1131
+ return self.__call__(
1132
+ prompt=prompt,
1133
+ negative_prompt=negative_prompt,
1134
+ image=image,
1135
+ mask_image=mask_image,
1136
+ num_inference_steps=num_inference_steps,
1137
+ guidance_scale=guidance_scale,
1138
+ strength=strength,
1139
+ num_images_per_prompt=num_images_per_prompt,
1140
+ eta=eta,
1141
+ generator=generator,
1142
+ max_embeddings_multiples=max_embeddings_multiples,
1143
+ output_type=output_type,
1144
+ return_dict=return_dict,
1145
+ callback=callback,
1146
+ callback_steps=callback_steps,
1147
+ **kwargs,
1148
+ )
huggingface_diffusers/examples/community/magic_mix.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ import torch
4
+
5
+ from diffusers import (
6
+ AutoencoderKL,
7
+ DDIMScheduler,
8
+ DiffusionPipeline,
9
+ LMSDiscreteScheduler,
10
+ PNDMScheduler,
11
+ UNet2DConditionModel,
12
+ )
13
+ from PIL import Image
14
+ from torchvision import transforms as tfms
15
+ from tqdm.auto import tqdm
16
+ from transformers import CLIPTextModel, CLIPTokenizer
17
+
18
+
19
+ class MagicMixPipeline(DiffusionPipeline):
20
+ def __init__(
21
+ self,
22
+ vae: AutoencoderKL,
23
+ text_encoder: CLIPTextModel,
24
+ tokenizer: CLIPTokenizer,
25
+ unet: UNet2DConditionModel,
26
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler],
27
+ ):
28
+ super().__init__()
29
+
30
+ self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
31
+
32
+ # convert PIL image to latents
33
+ def encode(self, img):
34
+ with torch.no_grad():
35
+ latent = self.vae.encode(tfms.ToTensor()(img).unsqueeze(0).to(self.device) * 2 - 1)
36
+ latent = 0.18215 * latent.latent_dist.sample()
37
+ return latent
38
+
39
+ # convert latents to PIL image
40
+ def decode(self, latent):
41
+ latent = (1 / 0.18215) * latent
42
+ with torch.no_grad():
43
+ img = self.vae.decode(latent).sample
44
+ img = (img / 2 + 0.5).clamp(0, 1)
45
+ img = img.detach().cpu().permute(0, 2, 3, 1).numpy()
46
+ img = (img * 255).round().astype("uint8")
47
+ return Image.fromarray(img[0])
48
+
49
+ # convert prompt into text embeddings, also unconditional embeddings
50
+ def prep_text(self, prompt):
51
+ text_input = self.tokenizer(
52
+ prompt,
53
+ padding="max_length",
54
+ max_length=self.tokenizer.model_max_length,
55
+ truncation=True,
56
+ return_tensors="pt",
57
+ )
58
+
59
+ text_embedding = self.text_encoder(text_input.input_ids.to(self.device))[0]
60
+
61
+ uncond_input = self.tokenizer(
62
+ "",
63
+ padding="max_length",
64
+ max_length=self.tokenizer.model_max_length,
65
+ truncation=True,
66
+ return_tensors="pt",
67
+ )
68
+
69
+ uncond_embedding = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
70
+
71
+ return torch.cat([uncond_embedding, text_embedding])
72
+
73
+ def __call__(
74
+ self,
75
+ img: Image.Image,
76
+ prompt: str,
77
+ kmin: float = 0.3,
78
+ kmax: float = 0.6,
79
+ mix_factor: float = 0.5,
80
+ seed: int = 42,
81
+ steps: int = 50,
82
+ guidance_scale: float = 7.5,
83
+ ) -> Image.Image:
84
+ tmin = steps - int(kmin * steps)
85
+ tmax = steps - int(kmax * steps)
86
+
87
+ text_embeddings = self.prep_text(prompt)
88
+
89
+ self.scheduler.set_timesteps(steps)
90
+
91
+ width, height = img.size
92
+ encoded = self.encode(img)
93
+
94
+ torch.manual_seed(seed)
95
+ noise = torch.randn(
96
+ (1, self.unet.in_channels, height // 8, width // 8),
97
+ ).to(self.device)
98
+
99
+ latents = self.scheduler.add_noise(
100
+ encoded,
101
+ noise,
102
+ timesteps=self.scheduler.timesteps[tmax],
103
+ )
104
+
105
+ input = torch.cat([latents] * 2)
106
+
107
+ input = self.scheduler.scale_model_input(input, self.scheduler.timesteps[tmax])
108
+
109
+ with torch.no_grad():
110
+ pred = self.unet(
111
+ input,
112
+ self.scheduler.timesteps[tmax],
113
+ encoder_hidden_states=text_embeddings,
114
+ ).sample
115
+
116
+ pred_uncond, pred_text = pred.chunk(2)
117
+ pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
118
+
119
+ latents = self.scheduler.step(pred, self.scheduler.timesteps[tmax], latents).prev_sample
120
+
121
+ for i, t in enumerate(tqdm(self.scheduler.timesteps)):
122
+ if i > tmax:
123
+ if i < tmin: # layout generation phase
124
+ orig_latents = self.scheduler.add_noise(
125
+ encoded,
126
+ noise,
127
+ timesteps=t,
128
+ )
129
+
130
+ input = (mix_factor * latents) + (
131
+ 1 - mix_factor
132
+ ) * orig_latents # interpolating between layout noise and conditionally generated noise to preserve layout sematics
133
+ input = torch.cat([input] * 2)
134
+
135
+ else: # content generation phase
136
+ input = torch.cat([latents] * 2)
137
+
138
+ input = self.scheduler.scale_model_input(input, t)
139
+
140
+ with torch.no_grad():
141
+ pred = self.unet(
142
+ input,
143
+ t,
144
+ encoder_hidden_states=text_embeddings,
145
+ ).sample
146
+
147
+ pred_uncond, pred_text = pred.chunk(2)
148
+ pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
149
+
150
+ latents = self.scheduler.step(pred, t, latents).prev_sample
151
+
152
+ return self.decode(latents)
huggingface_diffusers/examples/community/multilingual_stable_diffusion.py ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Callable, List, Optional, Union
3
+
4
+ import torch
5
+
6
+ from diffusers import DiffusionPipeline
7
+ from diffusers.configuration_utils import FrozenDict
8
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
9
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
10
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
11
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
12
+ from diffusers.utils import deprecate, logging
13
+ from transformers import (
14
+ CLIPFeatureExtractor,
15
+ CLIPTextModel,
16
+ CLIPTokenizer,
17
+ MBart50TokenizerFast,
18
+ MBartForConditionalGeneration,
19
+ pipeline,
20
+ )
21
+
22
+
23
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
24
+
25
+
26
+ def detect_language(pipe, prompt, batch_size):
27
+ """helper function to detect language(s) of prompt"""
28
+
29
+ if batch_size == 1:
30
+ preds = pipe(prompt, top_k=1, truncation=True, max_length=128)
31
+ return preds[0]["label"]
32
+ else:
33
+ detected_languages = []
34
+ for p in prompt:
35
+ preds = pipe(p, top_k=1, truncation=True, max_length=128)
36
+ detected_languages.append(preds[0]["label"])
37
+
38
+ return detected_languages
39
+
40
+
41
+ def translate_prompt(prompt, translation_tokenizer, translation_model, device):
42
+ """helper function to translate prompt to English"""
43
+
44
+ encoded_prompt = translation_tokenizer(prompt, return_tensors="pt").to(device)
45
+ generated_tokens = translation_model.generate(**encoded_prompt, max_new_tokens=1000)
46
+ en_trans = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
47
+
48
+ return en_trans[0]
49
+
50
+
51
+ class MultilingualStableDiffusion(DiffusionPipeline):
52
+ r"""
53
+ Pipeline for text-to-image generation using Stable Diffusion in different languages.
54
+
55
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
56
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
57
+
58
+ Args:
59
+ detection_pipeline ([`pipeline`]):
60
+ Transformers pipeline to detect prompt's language.
61
+ translation_model ([`MBartForConditionalGeneration`]):
62
+ Model to translate prompt to English, if necessary. Please refer to the
63
+ [model card](https://huggingface.co/docs/transformers/model_doc/mbart) for details.
64
+ translation_tokenizer ([`MBart50TokenizerFast`]):
65
+ Tokenizer of the translation model.
66
+ vae ([`AutoencoderKL`]):
67
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
68
+ text_encoder ([`CLIPTextModel`]):
69
+ Frozen text-encoder. Stable Diffusion uses the text portion of
70
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
71
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
72
+ tokenizer (`CLIPTokenizer`):
73
+ Tokenizer of class
74
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
75
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
76
+ scheduler ([`SchedulerMixin`]):
77
+ A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
78
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
79
+ safety_checker ([`StableDiffusionSafetyChecker`]):
80
+ Classification module that estimates whether generated images could be considered offensive or harmful.
81
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
82
+ feature_extractor ([`CLIPFeatureExtractor`]):
83
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ detection_pipeline: pipeline,
89
+ translation_model: MBartForConditionalGeneration,
90
+ translation_tokenizer: MBart50TokenizerFast,
91
+ vae: AutoencoderKL,
92
+ text_encoder: CLIPTextModel,
93
+ tokenizer: CLIPTokenizer,
94
+ unet: UNet2DConditionModel,
95
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
96
+ safety_checker: StableDiffusionSafetyChecker,
97
+ feature_extractor: CLIPFeatureExtractor,
98
+ ):
99
+ super().__init__()
100
+
101
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
102
+ deprecation_message = (
103
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
104
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
105
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
106
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
107
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
108
+ " file"
109
+ )
110
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
111
+ new_config = dict(scheduler.config)
112
+ new_config["steps_offset"] = 1
113
+ scheduler._internal_dict = FrozenDict(new_config)
114
+
115
+ if safety_checker is None:
116
+ logger.warning(
117
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
118
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
119
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
120
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
121
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
122
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
123
+ )
124
+
125
+ self.register_modules(
126
+ detection_pipeline=detection_pipeline,
127
+ translation_model=translation_model,
128
+ translation_tokenizer=translation_tokenizer,
129
+ vae=vae,
130
+ text_encoder=text_encoder,
131
+ tokenizer=tokenizer,
132
+ unet=unet,
133
+ scheduler=scheduler,
134
+ safety_checker=safety_checker,
135
+ feature_extractor=feature_extractor,
136
+ )
137
+
138
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
139
+ r"""
140
+ Enable sliced attention computation.
141
+
142
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
143
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
144
+
145
+ Args:
146
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
147
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
148
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
149
+ `attention_head_dim` must be a multiple of `slice_size`.
150
+ """
151
+ if slice_size == "auto":
152
+ # half the attention head size is usually a good trade-off between
153
+ # speed and memory
154
+ slice_size = self.unet.config.attention_head_dim // 2
155
+ self.unet.set_attention_slice(slice_size)
156
+
157
+ def disable_attention_slicing(self):
158
+ r"""
159
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
160
+ back to computing attention in one step.
161
+ """
162
+ # set slice_size = `None` to disable `attention slicing`
163
+ self.enable_attention_slicing(None)
164
+
165
+ @torch.no_grad()
166
+ def __call__(
167
+ self,
168
+ prompt: Union[str, List[str]],
169
+ height: int = 512,
170
+ width: int = 512,
171
+ num_inference_steps: int = 50,
172
+ guidance_scale: float = 7.5,
173
+ negative_prompt: Optional[Union[str, List[str]]] = None,
174
+ num_images_per_prompt: Optional[int] = 1,
175
+ eta: float = 0.0,
176
+ generator: Optional[torch.Generator] = None,
177
+ latents: Optional[torch.FloatTensor] = None,
178
+ output_type: Optional[str] = "pil",
179
+ return_dict: bool = True,
180
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
181
+ callback_steps: Optional[int] = 1,
182
+ **kwargs,
183
+ ):
184
+ r"""
185
+ Function invoked when calling the pipeline for generation.
186
+
187
+ Args:
188
+ prompt (`str` or `List[str]`):
189
+ The prompt or prompts to guide the image generation. Can be in different languages.
190
+ height (`int`, *optional*, defaults to 512):
191
+ The height in pixels of the generated image.
192
+ width (`int`, *optional*, defaults to 512):
193
+ The width in pixels of the generated image.
194
+ num_inference_steps (`int`, *optional*, defaults to 50):
195
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
196
+ expense of slower inference.
197
+ guidance_scale (`float`, *optional*, defaults to 7.5):
198
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
199
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
200
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
201
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
202
+ usually at the expense of lower image quality.
203
+ negative_prompt (`str` or `List[str]`, *optional*):
204
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
205
+ if `guidance_scale` is less than `1`).
206
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
207
+ The number of images to generate per prompt.
208
+ eta (`float`, *optional*, defaults to 0.0):
209
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
210
+ [`schedulers.DDIMScheduler`], will be ignored for others.
211
+ generator (`torch.Generator`, *optional*):
212
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
213
+ deterministic.
214
+ latents (`torch.FloatTensor`, *optional*):
215
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
216
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
217
+ tensor will ge generated by sampling using the supplied random `generator`.
218
+ output_type (`str`, *optional*, defaults to `"pil"`):
219
+ The output format of the generate image. Choose between
220
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
221
+ return_dict (`bool`, *optional*, defaults to `True`):
222
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
223
+ plain tuple.
224
+ callback (`Callable`, *optional*):
225
+ A function that will be called every `callback_steps` steps during inference. The function will be
226
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
227
+ callback_steps (`int`, *optional*, defaults to 1):
228
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
229
+ called at every step.
230
+
231
+ Returns:
232
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
233
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
234
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
235
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
236
+ (nsfw) content, according to the `safety_checker`.
237
+ """
238
+ if isinstance(prompt, str):
239
+ batch_size = 1
240
+ elif isinstance(prompt, list):
241
+ batch_size = len(prompt)
242
+ else:
243
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
244
+
245
+ if height % 8 != 0 or width % 8 != 0:
246
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
247
+
248
+ if (callback_steps is None) or (
249
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
250
+ ):
251
+ raise ValueError(
252
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
253
+ f" {type(callback_steps)}."
254
+ )
255
+
256
+ # detect language and translate if necessary
257
+ prompt_language = detect_language(self.detection_pipeline, prompt, batch_size)
258
+ if batch_size == 1 and prompt_language != "en":
259
+ prompt = translate_prompt(prompt, self.translation_tokenizer, self.translation_model, self.device)
260
+
261
+ if isinstance(prompt, list):
262
+ for index in range(batch_size):
263
+ if prompt_language[index] != "en":
264
+ p = translate_prompt(
265
+ prompt[index], self.translation_tokenizer, self.translation_model, self.device
266
+ )
267
+ prompt[index] = p
268
+
269
+ # get prompt text embeddings
270
+ text_inputs = self.tokenizer(
271
+ prompt,
272
+ padding="max_length",
273
+ max_length=self.tokenizer.model_max_length,
274
+ return_tensors="pt",
275
+ )
276
+ text_input_ids = text_inputs.input_ids
277
+
278
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
279
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
280
+ logger.warning(
281
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
282
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
283
+ )
284
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
285
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
286
+
287
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
288
+ bs_embed, seq_len, _ = text_embeddings.shape
289
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
290
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
291
+
292
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
293
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
294
+ # corresponds to doing no classifier free guidance.
295
+ do_classifier_free_guidance = guidance_scale > 1.0
296
+ # get unconditional embeddings for classifier free guidance
297
+ if do_classifier_free_guidance:
298
+ uncond_tokens: List[str]
299
+ if negative_prompt is None:
300
+ uncond_tokens = [""] * batch_size
301
+ elif type(prompt) is not type(negative_prompt):
302
+ raise TypeError(
303
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
304
+ f" {type(prompt)}."
305
+ )
306
+ elif isinstance(negative_prompt, str):
307
+ # detect language and translate it if necessary
308
+ negative_prompt_language = detect_language(self.detection_pipeline, negative_prompt, batch_size)
309
+ if negative_prompt_language != "en":
310
+ negative_prompt = translate_prompt(
311
+ negative_prompt, self.translation_tokenizer, self.translation_model, self.device
312
+ )
313
+ if isinstance(negative_prompt, str):
314
+ uncond_tokens = [negative_prompt]
315
+ elif batch_size != len(negative_prompt):
316
+ raise ValueError(
317
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
318
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
319
+ " the batch size of `prompt`."
320
+ )
321
+ else:
322
+ # detect language and translate it if necessary
323
+ if isinstance(negative_prompt, list):
324
+ negative_prompt_languages = detect_language(self.detection_pipeline, negative_prompt, batch_size)
325
+ for index in range(batch_size):
326
+ if negative_prompt_languages[index] != "en":
327
+ p = translate_prompt(
328
+ negative_prompt[index], self.translation_tokenizer, self.translation_model, self.device
329
+ )
330
+ negative_prompt[index] = p
331
+ uncond_tokens = negative_prompt
332
+
333
+ max_length = text_input_ids.shape[-1]
334
+ uncond_input = self.tokenizer(
335
+ uncond_tokens,
336
+ padding="max_length",
337
+ max_length=max_length,
338
+ truncation=True,
339
+ return_tensors="pt",
340
+ )
341
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
342
+
343
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
344
+ seq_len = uncond_embeddings.shape[1]
345
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
346
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
347
+
348
+ # For classifier free guidance, we need to do two forward passes.
349
+ # Here we concatenate the unconditional and text embeddings into a single batch
350
+ # to avoid doing two forward passes
351
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
352
+
353
+ # get the initial random noise unless the user supplied it
354
+
355
+ # Unlike in other pipelines, latents need to be generated in the target device
356
+ # for 1-to-1 results reproducibility with the CompVis implementation.
357
+ # However this currently doesn't work in `mps`.
358
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)
359
+ latents_dtype = text_embeddings.dtype
360
+ if latents is None:
361
+ if self.device.type == "mps":
362
+ # randn does not work reproducibly on mps
363
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
364
+ self.device
365
+ )
366
+ else:
367
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
368
+ else:
369
+ if latents.shape != latents_shape:
370
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
371
+ latents = latents.to(self.device)
372
+
373
+ # set timesteps
374
+ self.scheduler.set_timesteps(num_inference_steps)
375
+
376
+ # Some schedulers like PNDM have timesteps as arrays
377
+ # It's more optimized to move all timesteps to correct device beforehand
378
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
379
+
380
+ # scale the initial noise by the standard deviation required by the scheduler
381
+ latents = latents * self.scheduler.init_noise_sigma
382
+
383
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
384
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
385
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
386
+ # and should be between [0, 1]
387
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
388
+ extra_step_kwargs = {}
389
+ if accepts_eta:
390
+ extra_step_kwargs["eta"] = eta
391
+
392
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
393
+ # expand the latents if we are doing classifier free guidance
394
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
395
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
396
+
397
+ # predict the noise residual
398
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
399
+
400
+ # perform guidance
401
+ if do_classifier_free_guidance:
402
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
403
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
404
+
405
+ # compute the previous noisy sample x_t -> x_t-1
406
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
407
+
408
+ # call the callback, if provided
409
+ if callback is not None and i % callback_steps == 0:
410
+ callback(i, t, latents)
411
+
412
+ latents = 1 / 0.18215 * latents
413
+ image = self.vae.decode(latents).sample
414
+
415
+ image = (image / 2 + 0.5).clamp(0, 1)
416
+
417
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
418
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
419
+
420
+ if self.safety_checker is not None:
421
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
422
+ self.device
423
+ )
424
+ image, has_nsfw_concept = self.safety_checker(
425
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
426
+ )
427
+ else:
428
+ has_nsfw_concept = None
429
+
430
+ if output_type == "pil":
431
+ image = self.numpy_to_pil(image)
432
+
433
+ if not return_dict:
434
+ return (image, has_nsfw_concept)
435
+
436
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
huggingface_diffusers/examples/community/one_step_unet.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import torch
3
+
4
+ from diffusers import DiffusionPipeline
5
+
6
+
7
+ class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
8
+ def __init__(self, unet, scheduler):
9
+ super().__init__()
10
+
11
+ self.register_modules(unet=unet, scheduler=scheduler)
12
+
13
+ def __call__(self):
14
+ image = torch.randn(
15
+ (1, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size),
16
+ )
17
+ timestep = 1
18
+
19
+ model_output = self.unet(image, timestep).sample
20
+ scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
21
+
22
+ result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output)
23
+
24
+ return result
huggingface_diffusers/examples/community/sd_text2img_k_diffusion.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib
16
+ import warnings
17
+ from typing import Callable, List, Optional, Union
18
+
19
+ import torch
20
+
21
+ from diffusers import DiffusionPipeline, LMSDiscreteScheduler
22
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
23
+ from diffusers.utils import is_accelerate_available, logging
24
+ from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser
25
+
26
+
27
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
+
29
+
30
+ class ModelWrapper:
31
+ def __init__(self, model, alphas_cumprod):
32
+ self.model = model
33
+ self.alphas_cumprod = alphas_cumprod
34
+
35
+ def apply_model(self, *args, **kwargs):
36
+ if len(args) == 3:
37
+ encoder_hidden_states = args[-1]
38
+ args = args[:2]
39
+ if kwargs.get("cond", None) is not None:
40
+ encoder_hidden_states = kwargs.pop("cond")
41
+ return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample
42
+
43
+
44
+ class StableDiffusionPipeline(DiffusionPipeline):
45
+ r"""
46
+ Pipeline for text-to-image generation using Stable Diffusion.
47
+
48
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
49
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
50
+
51
+ Args:
52
+ vae ([`AutoencoderKL`]):
53
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
54
+ text_encoder ([`CLIPTextModel`]):
55
+ Frozen text-encoder. Stable Diffusion uses the text portion of
56
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
57
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
58
+ tokenizer (`CLIPTokenizer`):
59
+ Tokenizer of class
60
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
61
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
62
+ scheduler ([`SchedulerMixin`]):
63
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
64
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
65
+ safety_checker ([`StableDiffusionSafetyChecker`]):
66
+ Classification module that estimates whether generated images could be considered offensive or harmful.
67
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
68
+ feature_extractor ([`CLIPFeatureExtractor`]):
69
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
70
+ """
71
+
72
+ _optional_components = ["safety_checker", "feature_extractor"]
73
+
74
+ def __init__(
75
+ self,
76
+ vae,
77
+ text_encoder,
78
+ tokenizer,
79
+ unet,
80
+ scheduler,
81
+ safety_checker,
82
+ feature_extractor,
83
+ ):
84
+ super().__init__()
85
+
86
+ if safety_checker is None:
87
+ logger.warning(
88
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
89
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
90
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
91
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
92
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
93
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
94
+ )
95
+
96
+ # get correct sigmas from LMS
97
+ scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
98
+ self.register_modules(
99
+ vae=vae,
100
+ text_encoder=text_encoder,
101
+ tokenizer=tokenizer,
102
+ unet=unet,
103
+ scheduler=scheduler,
104
+ safety_checker=safety_checker,
105
+ feature_extractor=feature_extractor,
106
+ )
107
+
108
+ model = ModelWrapper(unet, scheduler.alphas_cumprod)
109
+ if scheduler.prediction_type == "v_prediction":
110
+ self.k_diffusion_model = CompVisVDenoiser(model)
111
+ else:
112
+ self.k_diffusion_model = CompVisDenoiser(model)
113
+
114
+ def set_sampler(self, scheduler_type: str):
115
+ warnings.warn("The `set_sampler` method is deprecated, please use `set_scheduler` instead.")
116
+ return self.set_scheduler(scheduler_type)
117
+
118
+ def set_scheduler(self, scheduler_type: str):
119
+ library = importlib.import_module("k_diffusion")
120
+ sampling = getattr(library, "sampling")
121
+ self.sampler = getattr(sampling, scheduler_type)
122
+
123
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
124
+ r"""
125
+ Enable sliced attention computation.
126
+
127
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
128
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
129
+
130
+ Args:
131
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
132
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
133
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
134
+ `attention_head_dim` must be a multiple of `slice_size`.
135
+ """
136
+ if slice_size == "auto":
137
+ # half the attention head size is usually a good trade-off between
138
+ # speed and memory
139
+ slice_size = self.unet.config.attention_head_dim // 2
140
+ self.unet.set_attention_slice(slice_size)
141
+
142
+ def disable_attention_slicing(self):
143
+ r"""
144
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
145
+ back to computing attention in one step.
146
+ """
147
+ # set slice_size = `None` to disable `attention slicing`
148
+ self.enable_attention_slicing(None)
149
+
150
+ def enable_sequential_cpu_offload(self, gpu_id=0):
151
+ r"""
152
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
153
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
154
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
155
+ """
156
+ if is_accelerate_available():
157
+ from accelerate import cpu_offload
158
+ else:
159
+ raise ImportError("Please install accelerate via `pip install accelerate`")
160
+
161
+ device = torch.device(f"cuda:{gpu_id}")
162
+
163
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
164
+ if cpu_offloaded_model is not None:
165
+ cpu_offload(cpu_offloaded_model, device)
166
+
167
+ @property
168
+ def _execution_device(self):
169
+ r"""
170
+ Returns the device on which the pipeline's models will be executed. After calling
171
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
172
+ hooks.
173
+ """
174
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
175
+ return self.device
176
+ for module in self.unet.modules():
177
+ if (
178
+ hasattr(module, "_hf_hook")
179
+ and hasattr(module._hf_hook, "execution_device")
180
+ and module._hf_hook.execution_device is not None
181
+ ):
182
+ return torch.device(module._hf_hook.execution_device)
183
+ return self.device
184
+
185
+ def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
186
+ r"""
187
+ Encodes the prompt into text encoder hidden states.
188
+
189
+ Args:
190
+ prompt (`str` or `list(int)`):
191
+ prompt to be encoded
192
+ device: (`torch.device`):
193
+ torch device
194
+ num_images_per_prompt (`int`):
195
+ number of images that should be generated per prompt
196
+ do_classifier_free_guidance (`bool`):
197
+ whether to use classifier free guidance or not
198
+ negative_prompt (`str` or `List[str]`):
199
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
200
+ if `guidance_scale` is less than `1`).
201
+ """
202
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
203
+
204
+ text_inputs = self.tokenizer(
205
+ prompt,
206
+ padding="max_length",
207
+ max_length=self.tokenizer.model_max_length,
208
+ truncation=True,
209
+ return_tensors="pt",
210
+ )
211
+ text_input_ids = text_inputs.input_ids
212
+ untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
213
+
214
+ if not torch.equal(text_input_ids, untruncated_ids):
215
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
216
+ logger.warning(
217
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
218
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
219
+ )
220
+
221
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
222
+ attention_mask = text_inputs.attention_mask.to(device)
223
+ else:
224
+ attention_mask = None
225
+
226
+ text_embeddings = self.text_encoder(
227
+ text_input_ids.to(device),
228
+ attention_mask=attention_mask,
229
+ )
230
+ text_embeddings = text_embeddings[0]
231
+
232
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
233
+ bs_embed, seq_len, _ = text_embeddings.shape
234
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
235
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
236
+
237
+ # get unconditional embeddings for classifier free guidance
238
+ if do_classifier_free_guidance:
239
+ uncond_tokens: List[str]
240
+ if negative_prompt is None:
241
+ uncond_tokens = [""] * batch_size
242
+ elif type(prompt) is not type(negative_prompt):
243
+ raise TypeError(
244
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
245
+ f" {type(prompt)}."
246
+ )
247
+ elif isinstance(negative_prompt, str):
248
+ uncond_tokens = [negative_prompt]
249
+ elif batch_size != len(negative_prompt):
250
+ raise ValueError(
251
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
252
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
253
+ " the batch size of `prompt`."
254
+ )
255
+ else:
256
+ uncond_tokens = negative_prompt
257
+
258
+ max_length = text_input_ids.shape[-1]
259
+ uncond_input = self.tokenizer(
260
+ uncond_tokens,
261
+ padding="max_length",
262
+ max_length=max_length,
263
+ truncation=True,
264
+ return_tensors="pt",
265
+ )
266
+
267
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
268
+ attention_mask = uncond_input.attention_mask.to(device)
269
+ else:
270
+ attention_mask = None
271
+
272
+ uncond_embeddings = self.text_encoder(
273
+ uncond_input.input_ids.to(device),
274
+ attention_mask=attention_mask,
275
+ )
276
+ uncond_embeddings = uncond_embeddings[0]
277
+
278
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
279
+ seq_len = uncond_embeddings.shape[1]
280
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
281
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
282
+
283
+ # For classifier free guidance, we need to do two forward passes.
284
+ # Here we concatenate the unconditional and text embeddings into a single batch
285
+ # to avoid doing two forward passes
286
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
287
+
288
+ return text_embeddings
289
+
290
+ def run_safety_checker(self, image, device, dtype):
291
+ if self.safety_checker is not None:
292
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
293
+ image, has_nsfw_concept = self.safety_checker(
294
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
295
+ )
296
+ else:
297
+ has_nsfw_concept = None
298
+ return image, has_nsfw_concept
299
+
300
+ def decode_latents(self, latents):
301
+ latents = 1 / 0.18215 * latents
302
+ image = self.vae.decode(latents).sample
303
+ image = (image / 2 + 0.5).clamp(0, 1)
304
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
305
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
306
+ return image
307
+
308
+ def check_inputs(self, prompt, height, width, callback_steps):
309
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
310
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
311
+
312
+ if height % 8 != 0 or width % 8 != 0:
313
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
314
+
315
+ if (callback_steps is None) or (
316
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
317
+ ):
318
+ raise ValueError(
319
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
320
+ f" {type(callback_steps)}."
321
+ )
322
+
323
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
324
+ shape = (batch_size, num_channels_latents, height // 8, width // 8)
325
+ if latents is None:
326
+ if device.type == "mps":
327
+ # randn does not work reproducibly on mps
328
+ latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
329
+ else:
330
+ latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
331
+ else:
332
+ if latents.shape != shape:
333
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
334
+ latents = latents.to(device)
335
+
336
+ # scale the initial noise by the standard deviation required by the scheduler
337
+ return latents
338
+
339
+ @torch.no_grad()
340
+ def __call__(
341
+ self,
342
+ prompt: Union[str, List[str]],
343
+ height: int = 512,
344
+ width: int = 512,
345
+ num_inference_steps: int = 50,
346
+ guidance_scale: float = 7.5,
347
+ negative_prompt: Optional[Union[str, List[str]]] = None,
348
+ num_images_per_prompt: Optional[int] = 1,
349
+ eta: float = 0.0,
350
+ generator: Optional[torch.Generator] = None,
351
+ latents: Optional[torch.FloatTensor] = None,
352
+ output_type: Optional[str] = "pil",
353
+ return_dict: bool = True,
354
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
355
+ callback_steps: Optional[int] = 1,
356
+ **kwargs,
357
+ ):
358
+ r"""
359
+ Function invoked when calling the pipeline for generation.
360
+
361
+ Args:
362
+ prompt (`str` or `List[str]`):
363
+ The prompt or prompts to guide the image generation.
364
+ height (`int`, *optional*, defaults to 512):
365
+ The height in pixels of the generated image.
366
+ width (`int`, *optional*, defaults to 512):
367
+ The width in pixels of the generated image.
368
+ num_inference_steps (`int`, *optional*, defaults to 50):
369
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
370
+ expense of slower inference.
371
+ guidance_scale (`float`, *optional*, defaults to 7.5):
372
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
373
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
374
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
375
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
376
+ usually at the expense of lower image quality.
377
+ negative_prompt (`str` or `List[str]`, *optional*):
378
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
379
+ if `guidance_scale` is less than `1`).
380
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
381
+ The number of images to generate per prompt.
382
+ eta (`float`, *optional*, defaults to 0.0):
383
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
384
+ [`schedulers.DDIMScheduler`], will be ignored for others.
385
+ generator (`torch.Generator`, *optional*):
386
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
387
+ deterministic.
388
+ latents (`torch.FloatTensor`, *optional*):
389
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
390
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
391
+ tensor will ge generated by sampling using the supplied random `generator`.
392
+ output_type (`str`, *optional*, defaults to `"pil"`):
393
+ The output format of the generate image. Choose between
394
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
395
+ return_dict (`bool`, *optional*, defaults to `True`):
396
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
397
+ plain tuple.
398
+ callback (`Callable`, *optional*):
399
+ A function that will be called every `callback_steps` steps during inference. The function will be
400
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
401
+ callback_steps (`int`, *optional*, defaults to 1):
402
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
403
+ called at every step.
404
+
405
+ Returns:
406
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
407
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
408
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
409
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
410
+ (nsfw) content, according to the `safety_checker`.
411
+ """
412
+
413
+ # 1. Check inputs. Raise error if not correct
414
+ self.check_inputs(prompt, height, width, callback_steps)
415
+
416
+ # 2. Define call parameters
417
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
418
+ device = self._execution_device
419
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
420
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
421
+ # corresponds to doing no classifier free guidance.
422
+ do_classifier_free_guidance = True
423
+ if guidance_scale <= 1.0:
424
+ raise ValueError("has to use guidance_scale")
425
+
426
+ # 3. Encode input prompt
427
+ text_embeddings = self._encode_prompt(
428
+ prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
429
+ )
430
+
431
+ # 4. Prepare timesteps
432
+ self.scheduler.set_timesteps(num_inference_steps, device=text_embeddings.device)
433
+ sigmas = self.scheduler.sigmas
434
+ sigmas = sigmas.to(text_embeddings.dtype)
435
+
436
+ # 5. Prepare latent variables
437
+ num_channels_latents = self.unet.in_channels
438
+ latents = self.prepare_latents(
439
+ batch_size * num_images_per_prompt,
440
+ num_channels_latents,
441
+ height,
442
+ width,
443
+ text_embeddings.dtype,
444
+ device,
445
+ generator,
446
+ latents,
447
+ )
448
+ latents = latents * sigmas[0]
449
+ self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
450
+ self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device)
451
+
452
+ def model_fn(x, t):
453
+ latent_model_input = torch.cat([x] * 2)
454
+
455
+ noise_pred = self.k_diffusion_model(latent_model_input, t, cond=text_embeddings)
456
+
457
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
458
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
459
+ return noise_pred
460
+
461
+ latents = self.sampler(model_fn, latents, sigmas)
462
+
463
+ # 8. Post-processing
464
+ image = self.decode_latents(latents)
465
+
466
+ # 9. Run safety checker
467
+ image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
468
+
469
+ # 10. Convert to PIL
470
+ if output_type == "pil":
471
+ image = self.numpy_to_pil(image)
472
+
473
+ if not return_dict:
474
+ return (image, has_nsfw_concept)
475
+
476
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
huggingface_diffusers/examples/community/seed_resize_stable_diffusion.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ modified based on diffusion library from Huggingface: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py
3
+ """
4
+
5
+ import inspect
6
+ from typing import Callable, List, Optional, Union
7
+
8
+ import torch
9
+
10
+ from diffusers import DiffusionPipeline
11
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
14
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
15
+ from diffusers.utils import logging
16
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
17
+
18
+
19
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
20
+
21
+
22
+ class SeedResizeStableDiffusionPipeline(DiffusionPipeline):
23
+ r"""
24
+ Pipeline for text-to-image generation using Stable Diffusion.
25
+
26
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
27
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
28
+
29
+ Args:
30
+ vae ([`AutoencoderKL`]):
31
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
32
+ text_encoder ([`CLIPTextModel`]):
33
+ Frozen text-encoder. Stable Diffusion uses the text portion of
34
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
35
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
36
+ tokenizer (`CLIPTokenizer`):
37
+ Tokenizer of class
38
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
39
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
40
+ scheduler ([`SchedulerMixin`]):
41
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
42
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
43
+ safety_checker ([`StableDiffusionSafetyChecker`]):
44
+ Classification module that estimates whether generated images could be considered offensive or harmful.
45
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
46
+ feature_extractor ([`CLIPFeatureExtractor`]):
47
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
48
+ """
49
+
50
+ def __init__(
51
+ self,
52
+ vae: AutoencoderKL,
53
+ text_encoder: CLIPTextModel,
54
+ tokenizer: CLIPTokenizer,
55
+ unet: UNet2DConditionModel,
56
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
57
+ safety_checker: StableDiffusionSafetyChecker,
58
+ feature_extractor: CLIPFeatureExtractor,
59
+ ):
60
+ super().__init__()
61
+ self.register_modules(
62
+ vae=vae,
63
+ text_encoder=text_encoder,
64
+ tokenizer=tokenizer,
65
+ unet=unet,
66
+ scheduler=scheduler,
67
+ safety_checker=safety_checker,
68
+ feature_extractor=feature_extractor,
69
+ )
70
+
71
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
72
+ r"""
73
+ Enable sliced attention computation.
74
+
75
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
76
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
77
+
78
+ Args:
79
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
80
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
81
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
82
+ `attention_head_dim` must be a multiple of `slice_size`.
83
+ """
84
+ if slice_size == "auto":
85
+ # half the attention head size is usually a good trade-off between
86
+ # speed and memory
87
+ slice_size = self.unet.config.attention_head_dim // 2
88
+ self.unet.set_attention_slice(slice_size)
89
+
90
+ def disable_attention_slicing(self):
91
+ r"""
92
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
93
+ back to computing attention in one step.
94
+ """
95
+ # set slice_size = `None` to disable `attention slicing`
96
+ self.enable_attention_slicing(None)
97
+
98
+ @torch.no_grad()
99
+ def __call__(
100
+ self,
101
+ prompt: Union[str, List[str]],
102
+ height: int = 512,
103
+ width: int = 512,
104
+ num_inference_steps: int = 50,
105
+ guidance_scale: float = 7.5,
106
+ negative_prompt: Optional[Union[str, List[str]]] = None,
107
+ num_images_per_prompt: Optional[int] = 1,
108
+ eta: float = 0.0,
109
+ generator: Optional[torch.Generator] = None,
110
+ latents: Optional[torch.FloatTensor] = None,
111
+ output_type: Optional[str] = "pil",
112
+ return_dict: bool = True,
113
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
114
+ callback_steps: Optional[int] = 1,
115
+ text_embeddings: Optional[torch.FloatTensor] = None,
116
+ **kwargs,
117
+ ):
118
+ r"""
119
+ Function invoked when calling the pipeline for generation.
120
+
121
+ Args:
122
+ prompt (`str` or `List[str]`):
123
+ The prompt or prompts to guide the image generation.
124
+ height (`int`, *optional*, defaults to 512):
125
+ The height in pixels of the generated image.
126
+ width (`int`, *optional*, defaults to 512):
127
+ The width in pixels of the generated image.
128
+ num_inference_steps (`int`, *optional*, defaults to 50):
129
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
130
+ expense of slower inference.
131
+ guidance_scale (`float`, *optional*, defaults to 7.5):
132
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
133
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
134
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
135
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
136
+ usually at the expense of lower image quality.
137
+ negative_prompt (`str` or `List[str]`, *optional*):
138
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
139
+ if `guidance_scale` is less than `1`).
140
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
141
+ The number of images to generate per prompt.
142
+ eta (`float`, *optional*, defaults to 0.0):
143
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
144
+ [`schedulers.DDIMScheduler`], will be ignored for others.
145
+ generator (`torch.Generator`, *optional*):
146
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
147
+ deterministic.
148
+ latents (`torch.FloatTensor`, *optional*):
149
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
150
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
151
+ tensor will ge generated by sampling using the supplied random `generator`.
152
+ output_type (`str`, *optional*, defaults to `"pil"`):
153
+ The output format of the generate image. Choose between
154
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
155
+ return_dict (`bool`, *optional*, defaults to `True`):
156
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
157
+ plain tuple.
158
+ callback (`Callable`, *optional*):
159
+ A function that will be called every `callback_steps` steps during inference. The function will be
160
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
161
+ callback_steps (`int`, *optional*, defaults to 1):
162
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
163
+ called at every step.
164
+
165
+ Returns:
166
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
167
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
168
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
169
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
170
+ (nsfw) content, according to the `safety_checker`.
171
+ """
172
+
173
+ if isinstance(prompt, str):
174
+ batch_size = 1
175
+ elif isinstance(prompt, list):
176
+ batch_size = len(prompt)
177
+ else:
178
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
179
+
180
+ if height % 8 != 0 or width % 8 != 0:
181
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
182
+
183
+ if (callback_steps is None) or (
184
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
185
+ ):
186
+ raise ValueError(
187
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
188
+ f" {type(callback_steps)}."
189
+ )
190
+
191
+ # get prompt text embeddings
192
+ text_inputs = self.tokenizer(
193
+ prompt,
194
+ padding="max_length",
195
+ max_length=self.tokenizer.model_max_length,
196
+ return_tensors="pt",
197
+ )
198
+ text_input_ids = text_inputs.input_ids
199
+
200
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
201
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
202
+ logger.warning(
203
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
204
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
205
+ )
206
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
207
+
208
+ if text_embeddings is None:
209
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
210
+
211
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
212
+ bs_embed, seq_len, _ = text_embeddings.shape
213
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
214
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
215
+
216
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
217
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
218
+ # corresponds to doing no classifier free guidance.
219
+ do_classifier_free_guidance = guidance_scale > 1.0
220
+ # get unconditional embeddings for classifier free guidance
221
+ if do_classifier_free_guidance:
222
+ uncond_tokens: List[str]
223
+ if negative_prompt is None:
224
+ uncond_tokens = [""]
225
+ elif type(prompt) is not type(negative_prompt):
226
+ raise TypeError(
227
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
228
+ f" {type(prompt)}."
229
+ )
230
+ elif isinstance(negative_prompt, str):
231
+ uncond_tokens = [negative_prompt]
232
+ elif batch_size != len(negative_prompt):
233
+ raise ValueError(
234
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
235
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
236
+ " the batch size of `prompt`."
237
+ )
238
+ else:
239
+ uncond_tokens = negative_prompt
240
+
241
+ max_length = text_input_ids.shape[-1]
242
+ uncond_input = self.tokenizer(
243
+ uncond_tokens,
244
+ padding="max_length",
245
+ max_length=max_length,
246
+ truncation=True,
247
+ return_tensors="pt",
248
+ )
249
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
250
+
251
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
252
+ seq_len = uncond_embeddings.shape[1]
253
+ uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
254
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
255
+
256
+ # For classifier free guidance, we need to do two forward passes.
257
+ # Here we concatenate the unconditional and text embeddings into a single batch
258
+ # to avoid doing two forward passes
259
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
260
+
261
+ # get the initial random noise unless the user supplied it
262
+
263
+ # Unlike in other pipelines, latents need to be generated in the target device
264
+ # for 1-to-1 results reproducibility with the CompVis implementation.
265
+ # However this currently doesn't work in `mps`.
266
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)
267
+ latents_shape_reference = (batch_size * num_images_per_prompt, self.unet.in_channels, 64, 64)
268
+ latents_dtype = text_embeddings.dtype
269
+ if latents is None:
270
+ if self.device.type == "mps":
271
+ # randn does not exist on mps
272
+ latents_reference = torch.randn(
273
+ latents_shape_reference, generator=generator, device="cpu", dtype=latents_dtype
274
+ ).to(self.device)
275
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
276
+ self.device
277
+ )
278
+ else:
279
+ latents_reference = torch.randn(
280
+ latents_shape_reference, generator=generator, device=self.device, dtype=latents_dtype
281
+ )
282
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
283
+ else:
284
+ if latents_reference.shape != latents_shape:
285
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
286
+ latents_reference = latents_reference.to(self.device)
287
+ latents = latents.to(self.device)
288
+
289
+ # This is the key part of the pipeline where we
290
+ # try to ensure that the generated images w/ the same seed
291
+ # but different sizes actually result in similar images
292
+ dx = (latents_shape[3] - latents_shape_reference[3]) // 2
293
+ dy = (latents_shape[2] - latents_shape_reference[2]) // 2
294
+ w = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
295
+ h = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
296
+ tx = 0 if dx < 0 else dx
297
+ ty = 0 if dy < 0 else dy
298
+ dx = max(-dx, 0)
299
+ dy = max(-dy, 0)
300
+ # import pdb
301
+ # pdb.set_trace()
302
+ latents[:, :, ty : ty + h, tx : tx + w] = latents_reference[:, :, dy : dy + h, dx : dx + w]
303
+
304
+ # set timesteps
305
+ self.scheduler.set_timesteps(num_inference_steps)
306
+
307
+ # Some schedulers like PNDM have timesteps as arrays
308
+ # It's more optimized to move all timesteps to correct device beforehand
309
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
310
+
311
+ # scale the initial noise by the standard deviation required by the scheduler
312
+ latents = latents * self.scheduler.init_noise_sigma
313
+
314
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
315
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
316
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
317
+ # and should be between [0, 1]
318
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
319
+ extra_step_kwargs = {}
320
+ if accepts_eta:
321
+ extra_step_kwargs["eta"] = eta
322
+
323
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
324
+ # expand the latents if we are doing classifier free guidance
325
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
326
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
327
+
328
+ # predict the noise residual
329
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
330
+
331
+ # perform guidance
332
+ if do_classifier_free_guidance:
333
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
334
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
335
+
336
+ # compute the previous noisy sample x_t -> x_t-1
337
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
338
+
339
+ # call the callback, if provided
340
+ if callback is not None and i % callback_steps == 0:
341
+ callback(i, t, latents)
342
+
343
+ latents = 1 / 0.18215 * latents
344
+ image = self.vae.decode(latents).sample
345
+
346
+ image = (image / 2 + 0.5).clamp(0, 1)
347
+
348
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
349
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
350
+
351
+ if self.safety_checker is not None:
352
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
353
+ self.device
354
+ )
355
+ image, has_nsfw_concept = self.safety_checker(
356
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
357
+ )
358
+ else:
359
+ has_nsfw_concept = None
360
+
361
+ if output_type == "pil":
362
+ image = self.numpy_to_pil(image)
363
+
364
+ if not return_dict:
365
+ return (image, has_nsfw_concept)
366
+
367
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
huggingface_diffusers/examples/community/speech_to_image_diffusion.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Callable, List, Optional, Union
3
+
4
+ import torch
5
+
6
+ from diffusers import (
7
+ AutoencoderKL,
8
+ DDIMScheduler,
9
+ DiffusionPipeline,
10
+ LMSDiscreteScheduler,
11
+ PNDMScheduler,
12
+ UNet2DConditionModel,
13
+ )
14
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
15
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
16
+ from diffusers.utils import logging
17
+ from transformers import (
18
+ CLIPFeatureExtractor,
19
+ CLIPTextModel,
20
+ CLIPTokenizer,
21
+ WhisperForConditionalGeneration,
22
+ WhisperProcessor,
23
+ )
24
+
25
+
26
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
27
+
28
+
29
+ class SpeechToImagePipeline(DiffusionPipeline):
30
+ def __init__(
31
+ self,
32
+ speech_model: WhisperForConditionalGeneration,
33
+ speech_processor: WhisperProcessor,
34
+ vae: AutoencoderKL,
35
+ text_encoder: CLIPTextModel,
36
+ tokenizer: CLIPTokenizer,
37
+ unet: UNet2DConditionModel,
38
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
39
+ safety_checker: StableDiffusionSafetyChecker,
40
+ feature_extractor: CLIPFeatureExtractor,
41
+ ):
42
+ super().__init__()
43
+
44
+ if safety_checker is None:
45
+ logger.warning(
46
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
47
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
48
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
49
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
50
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
51
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
52
+ )
53
+
54
+ self.register_modules(
55
+ speech_model=speech_model,
56
+ speech_processor=speech_processor,
57
+ vae=vae,
58
+ text_encoder=text_encoder,
59
+ tokenizer=tokenizer,
60
+ unet=unet,
61
+ scheduler=scheduler,
62
+ feature_extractor=feature_extractor,
63
+ )
64
+
65
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
66
+ if slice_size == "auto":
67
+ slice_size = self.unet.config.attention_head_dim // 2
68
+ self.unet.set_attention_slice(slice_size)
69
+
70
+ def disable_attention_slicing(self):
71
+ self.enable_attention_slicing(None)
72
+
73
+ @torch.no_grad()
74
+ def __call__(
75
+ self,
76
+ audio,
77
+ sampling_rate=16_000,
78
+ height: int = 512,
79
+ width: int = 512,
80
+ num_inference_steps: int = 50,
81
+ guidance_scale: float = 7.5,
82
+ negative_prompt: Optional[Union[str, List[str]]] = None,
83
+ num_images_per_prompt: Optional[int] = 1,
84
+ eta: float = 0.0,
85
+ generator: Optional[torch.Generator] = None,
86
+ latents: Optional[torch.FloatTensor] = None,
87
+ output_type: Optional[str] = "pil",
88
+ return_dict: bool = True,
89
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
90
+ callback_steps: Optional[int] = 1,
91
+ **kwargs,
92
+ ):
93
+ inputs = self.speech_processor.feature_extractor(
94
+ audio, return_tensors="pt", sampling_rate=sampling_rate
95
+ ).input_features.to(self.device)
96
+ predicted_ids = self.speech_model.generate(inputs, max_length=480_000)
97
+
98
+ prompt = self.speech_processor.tokenizer.batch_decode(predicted_ids, skip_special_tokens=True, normalize=True)[
99
+ 0
100
+ ]
101
+
102
+ if isinstance(prompt, str):
103
+ batch_size = 1
104
+ elif isinstance(prompt, list):
105
+ batch_size = len(prompt)
106
+ else:
107
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
108
+
109
+ if height % 8 != 0 or width % 8 != 0:
110
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
111
+
112
+ if (callback_steps is None) or (
113
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
114
+ ):
115
+ raise ValueError(
116
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
117
+ f" {type(callback_steps)}."
118
+ )
119
+
120
+ # get prompt text embeddings
121
+ text_inputs = self.tokenizer(
122
+ prompt,
123
+ padding="max_length",
124
+ max_length=self.tokenizer.model_max_length,
125
+ return_tensors="pt",
126
+ )
127
+ text_input_ids = text_inputs.input_ids
128
+
129
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
130
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
131
+ logger.warning(
132
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
133
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
134
+ )
135
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
136
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
137
+
138
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
139
+ bs_embed, seq_len, _ = text_embeddings.shape
140
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
141
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
142
+
143
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
144
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
145
+ # corresponds to doing no classifier free guidance.
146
+ do_classifier_free_guidance = guidance_scale > 1.0
147
+ # get unconditional embeddings for classifier free guidance
148
+ if do_classifier_free_guidance:
149
+ uncond_tokens: List[str]
150
+ if negative_prompt is None:
151
+ uncond_tokens = [""] * batch_size
152
+ elif type(prompt) is not type(negative_prompt):
153
+ raise TypeError(
154
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
155
+ f" {type(prompt)}."
156
+ )
157
+ elif isinstance(negative_prompt, str):
158
+ uncond_tokens = [negative_prompt]
159
+ elif batch_size != len(negative_prompt):
160
+ raise ValueError(
161
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
162
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
163
+ " the batch size of `prompt`."
164
+ )
165
+ else:
166
+ uncond_tokens = negative_prompt
167
+
168
+ max_length = text_input_ids.shape[-1]
169
+ uncond_input = self.tokenizer(
170
+ uncond_tokens,
171
+ padding="max_length",
172
+ max_length=max_length,
173
+ truncation=True,
174
+ return_tensors="pt",
175
+ )
176
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
177
+
178
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
179
+ seq_len = uncond_embeddings.shape[1]
180
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
181
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
182
+
183
+ # For classifier free guidance, we need to do two forward passes.
184
+ # Here we concatenate the unconditional and text embeddings into a single batch
185
+ # to avoid doing two forward passes
186
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
187
+
188
+ # get the initial random noise unless the user supplied it
189
+
190
+ # Unlike in other pipelines, latents need to be generated in the target device
191
+ # for 1-to-1 results reproducibility with the CompVis implementation.
192
+ # However this currently doesn't work in `mps`.
193
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)
194
+ latents_dtype = text_embeddings.dtype
195
+ if latents is None:
196
+ if self.device.type == "mps":
197
+ # randn does not exist on mps
198
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
199
+ self.device
200
+ )
201
+ else:
202
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
203
+ else:
204
+ if latents.shape != latents_shape:
205
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
206
+ latents = latents.to(self.device)
207
+
208
+ # set timesteps
209
+ self.scheduler.set_timesteps(num_inference_steps)
210
+
211
+ # Some schedulers like PNDM have timesteps as arrays
212
+ # It's more optimized to move all timesteps to correct device beforehand
213
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
214
+
215
+ # scale the initial noise by the standard deviation required by the scheduler
216
+ latents = latents * self.scheduler.init_noise_sigma
217
+
218
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
219
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
220
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
221
+ # and should be between [0, 1]
222
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
223
+ extra_step_kwargs = {}
224
+ if accepts_eta:
225
+ extra_step_kwargs["eta"] = eta
226
+
227
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
228
+ # expand the latents if we are doing classifier free guidance
229
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
230
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
231
+
232
+ # predict the noise residual
233
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
234
+
235
+ # perform guidance
236
+ if do_classifier_free_guidance:
237
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
238
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
239
+
240
+ # compute the previous noisy sample x_t -> x_t-1
241
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
242
+
243
+ # call the callback, if provided
244
+ if callback is not None and i % callback_steps == 0:
245
+ callback(i, t, latents)
246
+
247
+ latents = 1 / 0.18215 * latents
248
+ image = self.vae.decode(latents).sample
249
+
250
+ image = (image / 2 + 0.5).clamp(0, 1)
251
+
252
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
253
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
254
+
255
+ if output_type == "pil":
256
+ image = self.numpy_to_pil(image)
257
+
258
+ if not return_dict:
259
+ return image
260
+
261
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
huggingface_diffusers/examples/community/stable_diffusion_comparison.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, List, Optional, Union
2
+
3
+ import torch
4
+
5
+ from diffusers import (
6
+ AutoencoderKL,
7
+ DDIMScheduler,
8
+ DiffusionPipeline,
9
+ LMSDiscreteScheduler,
10
+ PNDMScheduler,
11
+ StableDiffusionPipeline,
12
+ UNet2DConditionModel,
13
+ )
14
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
15
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
16
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
17
+
18
+
19
+ pipe1_model_id = "CompVis/stable-diffusion-v1-1"
20
+ pipe2_model_id = "CompVis/stable-diffusion-v1-2"
21
+ pipe3_model_id = "CompVis/stable-diffusion-v1-3"
22
+ pipe4_model_id = "CompVis/stable-diffusion-v1-4"
23
+
24
+
25
+ class StableDiffusionComparisonPipeline(DiffusionPipeline):
26
+ r"""
27
+ Pipeline for parallel comparison of Stable Diffusion v1-v4
28
+ This pipeline inherits from DiffusionPipeline and depends on the use of an Auth Token for
29
+ downloading pre-trained checkpoints from Hugging Face Hub.
30
+ If using Hugging Face Hub, pass the Model ID for Stable Diffusion v1.4 as the previous 3 checkpoints will be loaded
31
+ automatically.
32
+ Args:
33
+ vae ([`AutoencoderKL`]):
34
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
35
+ text_encoder ([`CLIPTextModel`]):
36
+ Frozen text-encoder. Stable Diffusion uses the text portion of
37
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
38
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
39
+ tokenizer (`CLIPTokenizer`):
40
+ Tokenizer of class
41
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
42
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
43
+ scheduler ([`SchedulerMixin`]):
44
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
45
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
46
+ safety_checker ([`StableDiffusionMegaSafetyChecker`]):
47
+ Classification module that estimates whether generated images could be considered offensive or harmful.
48
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
49
+ feature_extractor ([`CLIPFeatureExtractor`]):
50
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ vae: AutoencoderKL,
56
+ text_encoder: CLIPTextModel,
57
+ tokenizer: CLIPTokenizer,
58
+ unet: UNet2DConditionModel,
59
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
60
+ safety_checker: StableDiffusionSafetyChecker,
61
+ feature_extractor: CLIPFeatureExtractor,
62
+ requires_safety_checker: bool = True,
63
+ ):
64
+ super()._init_()
65
+
66
+ self.pipe1 = StableDiffusionPipeline.from_pretrained(pipe1_model_id)
67
+ self.pipe2 = StableDiffusionPipeline.from_pretrained(pipe2_model_id)
68
+ self.pipe3 = StableDiffusionPipeline.from_pretrained(pipe3_model_id)
69
+ self.pipe4 = StableDiffusionPipeline(
70
+ vae=vae,
71
+ text_encoder=text_encoder,
72
+ tokenizer=tokenizer,
73
+ unet=unet,
74
+ scheduler=scheduler,
75
+ safety_checker=safety_checker,
76
+ feature_extractor=feature_extractor,
77
+ requires_safety_checker=requires_safety_checker,
78
+ )
79
+
80
+ self.register_modules(pipeline1=self.pipe1, pipeline2=self.pipe2, pipeline3=self.pipe3, pipeline4=self.pipe4)
81
+
82
+ @property
83
+ def layers(self) -> Dict[str, Any]:
84
+ return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")}
85
+
86
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
87
+ r"""
88
+ Enable sliced attention computation.
89
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
90
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
91
+ Args:
92
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
93
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
94
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
95
+ `attention_head_dim` must be a multiple of `slice_size`.
96
+ """
97
+ if slice_size == "auto":
98
+ # half the attention head size is usually a good trade-off between
99
+ # speed and memory
100
+ slice_size = self.unet.config.attention_head_dim // 2
101
+ self.unet.set_attention_slice(slice_size)
102
+
103
+ def disable_attention_slicing(self):
104
+ r"""
105
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
106
+ back to computing attention in one step.
107
+ """
108
+ # set slice_size = `None` to disable `attention slicing`
109
+ self.enable_attention_slicing(None)
110
+
111
+ @torch.no_grad()
112
+ def text2img_sd1_1(
113
+ self,
114
+ prompt: Union[str, List[str]],
115
+ height: int = 512,
116
+ width: int = 512,
117
+ num_inference_steps: int = 50,
118
+ guidance_scale: float = 7.5,
119
+ negative_prompt: Optional[Union[str, List[str]]] = None,
120
+ num_images_per_prompt: Optional[int] = 1,
121
+ eta: float = 0.0,
122
+ generator: Optional[torch.Generator] = None,
123
+ latents: Optional[torch.FloatTensor] = None,
124
+ output_type: Optional[str] = "pil",
125
+ return_dict: bool = True,
126
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
127
+ callback_steps: Optional[int] = 1,
128
+ **kwargs,
129
+ ):
130
+ return self.pipe1(
131
+ prompt=prompt,
132
+ height=height,
133
+ width=width,
134
+ num_inference_steps=num_inference_steps,
135
+ guidance_scale=guidance_scale,
136
+ negative_prompt=negative_prompt,
137
+ num_images_per_prompt=num_images_per_prompt,
138
+ eta=eta,
139
+ generator=generator,
140
+ latents=latents,
141
+ output_type=output_type,
142
+ return_dict=return_dict,
143
+ callback=callback,
144
+ callback_steps=callback_steps,
145
+ **kwargs,
146
+ )
147
+
148
+ @torch.no_grad()
149
+ def text2img_sd1_2(
150
+ self,
151
+ prompt: Union[str, List[str]],
152
+ height: int = 512,
153
+ width: int = 512,
154
+ num_inference_steps: int = 50,
155
+ guidance_scale: float = 7.5,
156
+ negative_prompt: Optional[Union[str, List[str]]] = None,
157
+ num_images_per_prompt: Optional[int] = 1,
158
+ eta: float = 0.0,
159
+ generator: Optional[torch.Generator] = None,
160
+ latents: Optional[torch.FloatTensor] = None,
161
+ output_type: Optional[str] = "pil",
162
+ return_dict: bool = True,
163
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
164
+ callback_steps: Optional[int] = 1,
165
+ **kwargs,
166
+ ):
167
+ return self.pipe2(
168
+ prompt=prompt,
169
+ height=height,
170
+ width=width,
171
+ num_inference_steps=num_inference_steps,
172
+ guidance_scale=guidance_scale,
173
+ negative_prompt=negative_prompt,
174
+ num_images_per_prompt=num_images_per_prompt,
175
+ eta=eta,
176
+ generator=generator,
177
+ latents=latents,
178
+ output_type=output_type,
179
+ return_dict=return_dict,
180
+ callback=callback,
181
+ callback_steps=callback_steps,
182
+ **kwargs,
183
+ )
184
+
185
+ @torch.no_grad()
186
+ def text2img_sd1_3(
187
+ self,
188
+ prompt: Union[str, List[str]],
189
+ height: int = 512,
190
+ width: int = 512,
191
+ num_inference_steps: int = 50,
192
+ guidance_scale: float = 7.5,
193
+ negative_prompt: Optional[Union[str, List[str]]] = None,
194
+ num_images_per_prompt: Optional[int] = 1,
195
+ eta: float = 0.0,
196
+ generator: Optional[torch.Generator] = None,
197
+ latents: Optional[torch.FloatTensor] = None,
198
+ output_type: Optional[str] = "pil",
199
+ return_dict: bool = True,
200
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
201
+ callback_steps: Optional[int] = 1,
202
+ **kwargs,
203
+ ):
204
+ return self.pipe3(
205
+ prompt=prompt,
206
+ height=height,
207
+ width=width,
208
+ num_inference_steps=num_inference_steps,
209
+ guidance_scale=guidance_scale,
210
+ negative_prompt=negative_prompt,
211
+ num_images_per_prompt=num_images_per_prompt,
212
+ eta=eta,
213
+ generator=generator,
214
+ latents=latents,
215
+ output_type=output_type,
216
+ return_dict=return_dict,
217
+ callback=callback,
218
+ callback_steps=callback_steps,
219
+ **kwargs,
220
+ )
221
+
222
+ @torch.no_grad()
223
+ def text2img_sd1_4(
224
+ self,
225
+ prompt: Union[str, List[str]],
226
+ height: int = 512,
227
+ width: int = 512,
228
+ num_inference_steps: int = 50,
229
+ guidance_scale: float = 7.5,
230
+ negative_prompt: Optional[Union[str, List[str]]] = None,
231
+ num_images_per_prompt: Optional[int] = 1,
232
+ eta: float = 0.0,
233
+ generator: Optional[torch.Generator] = None,
234
+ latents: Optional[torch.FloatTensor] = None,
235
+ output_type: Optional[str] = "pil",
236
+ return_dict: bool = True,
237
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
238
+ callback_steps: Optional[int] = 1,
239
+ **kwargs,
240
+ ):
241
+ return self.pipe4(
242
+ prompt=prompt,
243
+ height=height,
244
+ width=width,
245
+ num_inference_steps=num_inference_steps,
246
+ guidance_scale=guidance_scale,
247
+ negative_prompt=negative_prompt,
248
+ num_images_per_prompt=num_images_per_prompt,
249
+ eta=eta,
250
+ generator=generator,
251
+ latents=latents,
252
+ output_type=output_type,
253
+ return_dict=return_dict,
254
+ callback=callback,
255
+ callback_steps=callback_steps,
256
+ **kwargs,
257
+ )
258
+
259
+ @torch.no_grad()
260
+ def _call_(
261
+ self,
262
+ prompt: Union[str, List[str]],
263
+ height: int = 512,
264
+ width: int = 512,
265
+ num_inference_steps: int = 50,
266
+ guidance_scale: float = 7.5,
267
+ negative_prompt: Optional[Union[str, List[str]]] = None,
268
+ num_images_per_prompt: Optional[int] = 1,
269
+ eta: float = 0.0,
270
+ generator: Optional[torch.Generator] = None,
271
+ latents: Optional[torch.FloatTensor] = None,
272
+ output_type: Optional[str] = "pil",
273
+ return_dict: bool = True,
274
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
275
+ callback_steps: Optional[int] = 1,
276
+ **kwargs,
277
+ ):
278
+ r"""
279
+ Function invoked when calling the pipeline for generation. This function will generate 4 results as part
280
+ of running all the 4 pipelines for SD1.1-1.4 together in a serial-processing, parallel-invocation fashion.
281
+ Args:
282
+ prompt (`str` or `List[str]`):
283
+ The prompt or prompts to guide the image generation.
284
+ height (`int`, optional, defaults to 512):
285
+ The height in pixels of the generated image.
286
+ width (`int`, optional, defaults to 512):
287
+ The width in pixels of the generated image.
288
+ num_inference_steps (`int`, optional, defaults to 50):
289
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
290
+ expense of slower inference.
291
+ guidance_scale (`float`, optional, defaults to 7.5):
292
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
293
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
294
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
295
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
296
+ usually at the expense of lower image quality.
297
+ eta (`float`, optional, defaults to 0.0):
298
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
299
+ [`schedulers.DDIMScheduler`], will be ignored for others.
300
+ generator (`torch.Generator`, optional):
301
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
302
+ deterministic.
303
+ latents (`torch.FloatTensor`, optional):
304
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
305
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
306
+ tensor will ge generated by sampling using the supplied random `generator`.
307
+ output_type (`str`, optional, defaults to `"pil"`):
308
+ The output format of the generate image. Choose between
309
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
310
+ return_dict (`bool`, optional, defaults to `True`):
311
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
312
+ plain tuple.
313
+ Returns:
314
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
315
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
316
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
317
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
318
+ (nsfw) content, according to the `safety_checker`.
319
+ """
320
+
321
+ device = "cuda" if torch.cuda.is_available() else "cpu"
322
+ self.to(device)
323
+
324
+ # Checks if the height and width are divisible by 8 or not
325
+ if height % 8 != 0 or width % 8 != 0:
326
+ raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}.")
327
+
328
+ # Get first result from Stable Diffusion Checkpoint v1.1
329
+ res1 = self.text2img_sd1_1(
330
+ prompt=prompt,
331
+ height=height,
332
+ width=width,
333
+ num_inference_steps=num_inference_steps,
334
+ guidance_scale=guidance_scale,
335
+ negative_prompt=negative_prompt,
336
+ num_images_per_prompt=num_images_per_prompt,
337
+ eta=eta,
338
+ generator=generator,
339
+ latents=latents,
340
+ output_type=output_type,
341
+ return_dict=return_dict,
342
+ callback=callback,
343
+ callback_steps=callback_steps,
344
+ **kwargs,
345
+ )
346
+
347
+ # Get first result from Stable Diffusion Checkpoint v1.2
348
+ res2 = self.text2img_sd1_2(
349
+ prompt=prompt,
350
+ height=height,
351
+ width=width,
352
+ num_inference_steps=num_inference_steps,
353
+ guidance_scale=guidance_scale,
354
+ negative_prompt=negative_prompt,
355
+ num_images_per_prompt=num_images_per_prompt,
356
+ eta=eta,
357
+ generator=generator,
358
+ latents=latents,
359
+ output_type=output_type,
360
+ return_dict=return_dict,
361
+ callback=callback,
362
+ callback_steps=callback_steps,
363
+ **kwargs,
364
+ )
365
+
366
+ # Get first result from Stable Diffusion Checkpoint v1.3
367
+ res3 = self.text2img_sd1_3(
368
+ prompt=prompt,
369
+ height=height,
370
+ width=width,
371
+ num_inference_steps=num_inference_steps,
372
+ guidance_scale=guidance_scale,
373
+ negative_prompt=negative_prompt,
374
+ num_images_per_prompt=num_images_per_prompt,
375
+ eta=eta,
376
+ generator=generator,
377
+ latents=latents,
378
+ output_type=output_type,
379
+ return_dict=return_dict,
380
+ callback=callback,
381
+ callback_steps=callback_steps,
382
+ **kwargs,
383
+ )
384
+
385
+ # Get first result from Stable Diffusion Checkpoint v1.4
386
+ res4 = self.text2img_sd1_4(
387
+ prompt=prompt,
388
+ height=height,
389
+ width=width,
390
+ num_inference_steps=num_inference_steps,
391
+ guidance_scale=guidance_scale,
392
+ negative_prompt=negative_prompt,
393
+ num_images_per_prompt=num_images_per_prompt,
394
+ eta=eta,
395
+ generator=generator,
396
+ latents=latents,
397
+ output_type=output_type,
398
+ return_dict=return_dict,
399
+ callback=callback,
400
+ callback_steps=callback_steps,
401
+ **kwargs,
402
+ )
403
+
404
+ # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
405
+ return StableDiffusionPipelineOutput([res1[0], res2[0], res3[0], res4[0]])
huggingface_diffusers/examples/community/stable_diffusion_mega.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, List, Optional, Union
2
+
3
+ import torch
4
+
5
+ import PIL.Image
6
+ from diffusers import (
7
+ AutoencoderKL,
8
+ DDIMScheduler,
9
+ DiffusionPipeline,
10
+ LMSDiscreteScheduler,
11
+ PNDMScheduler,
12
+ StableDiffusionImg2ImgPipeline,
13
+ StableDiffusionInpaintPipelineLegacy,
14
+ StableDiffusionPipeline,
15
+ UNet2DConditionModel,
16
+ )
17
+ from diffusers.configuration_utils import FrozenDict
18
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
19
+ from diffusers.utils import deprecate, logging
20
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
21
+
22
+
23
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
24
+
25
+
26
+ class StableDiffusionMegaPipeline(DiffusionPipeline):
27
+ r"""
28
+ Pipeline for text-to-image generation using Stable Diffusion.
29
+
30
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
31
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
32
+
33
+ Args:
34
+ vae ([`AutoencoderKL`]):
35
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
36
+ text_encoder ([`CLIPTextModel`]):
37
+ Frozen text-encoder. Stable Diffusion uses the text portion of
38
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
39
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
40
+ tokenizer (`CLIPTokenizer`):
41
+ Tokenizer of class
42
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
43
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
44
+ scheduler ([`SchedulerMixin`]):
45
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
46
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
47
+ safety_checker ([`StableDiffusionMegaSafetyChecker`]):
48
+ Classification module that estimates whether generated images could be considered offensive or harmful.
49
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
50
+ feature_extractor ([`CLIPFeatureExtractor`]):
51
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
52
+ """
53
+
54
+ _optional_components = ["safety_checker", "feature_extractor"]
55
+
56
+ def __init__(
57
+ self,
58
+ vae: AutoencoderKL,
59
+ text_encoder: CLIPTextModel,
60
+ tokenizer: CLIPTokenizer,
61
+ unet: UNet2DConditionModel,
62
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
63
+ safety_checker: StableDiffusionSafetyChecker,
64
+ feature_extractor: CLIPFeatureExtractor,
65
+ requires_safety_checker: bool = True,
66
+ ):
67
+ super().__init__()
68
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
69
+ deprecation_message = (
70
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
71
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
72
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
73
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
74
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
75
+ " file"
76
+ )
77
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
78
+ new_config = dict(scheduler.config)
79
+ new_config["steps_offset"] = 1
80
+ scheduler._internal_dict = FrozenDict(new_config)
81
+
82
+ self.register_modules(
83
+ vae=vae,
84
+ text_encoder=text_encoder,
85
+ tokenizer=tokenizer,
86
+ unet=unet,
87
+ scheduler=scheduler,
88
+ safety_checker=safety_checker,
89
+ feature_extractor=feature_extractor,
90
+ )
91
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
92
+
93
+ @property
94
+ def components(self) -> Dict[str, Any]:
95
+ return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")}
96
+
97
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
98
+ r"""
99
+ Enable sliced attention computation.
100
+
101
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
102
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
103
+
104
+ Args:
105
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
106
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
107
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
108
+ `attention_head_dim` must be a multiple of `slice_size`.
109
+ """
110
+ if slice_size == "auto":
111
+ # half the attention head size is usually a good trade-off between
112
+ # speed and memory
113
+ slice_size = self.unet.config.attention_head_dim // 2
114
+ self.unet.set_attention_slice(slice_size)
115
+
116
+ def disable_attention_slicing(self):
117
+ r"""
118
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
119
+ back to computing attention in one step.
120
+ """
121
+ # set slice_size = `None` to disable `attention slicing`
122
+ self.enable_attention_slicing(None)
123
+
124
+ @torch.no_grad()
125
+ def inpaint(
126
+ self,
127
+ prompt: Union[str, List[str]],
128
+ image: Union[torch.FloatTensor, PIL.Image.Image],
129
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
130
+ strength: float = 0.8,
131
+ num_inference_steps: Optional[int] = 50,
132
+ guidance_scale: Optional[float] = 7.5,
133
+ negative_prompt: Optional[Union[str, List[str]]] = None,
134
+ num_images_per_prompt: Optional[int] = 1,
135
+ eta: Optional[float] = 0.0,
136
+ generator: Optional[torch.Generator] = None,
137
+ output_type: Optional[str] = "pil",
138
+ return_dict: bool = True,
139
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
140
+ callback_steps: Optional[int] = 1,
141
+ ):
142
+ # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline
143
+ return StableDiffusionInpaintPipelineLegacy(**self.components)(
144
+ prompt=prompt,
145
+ image=image,
146
+ mask_image=mask_image,
147
+ strength=strength,
148
+ num_inference_steps=num_inference_steps,
149
+ guidance_scale=guidance_scale,
150
+ negative_prompt=negative_prompt,
151
+ num_images_per_prompt=num_images_per_prompt,
152
+ eta=eta,
153
+ generator=generator,
154
+ output_type=output_type,
155
+ return_dict=return_dict,
156
+ callback=callback,
157
+ )
158
+
159
+ @torch.no_grad()
160
+ def img2img(
161
+ self,
162
+ prompt: Union[str, List[str]],
163
+ image: Union[torch.FloatTensor, PIL.Image.Image],
164
+ strength: float = 0.8,
165
+ num_inference_steps: Optional[int] = 50,
166
+ guidance_scale: Optional[float] = 7.5,
167
+ negative_prompt: Optional[Union[str, List[str]]] = None,
168
+ num_images_per_prompt: Optional[int] = 1,
169
+ eta: Optional[float] = 0.0,
170
+ generator: Optional[torch.Generator] = None,
171
+ output_type: Optional[str] = "pil",
172
+ return_dict: bool = True,
173
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
174
+ callback_steps: Optional[int] = 1,
175
+ **kwargs,
176
+ ):
177
+ # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline
178
+ return StableDiffusionImg2ImgPipeline(**self.components)(
179
+ prompt=prompt,
180
+ image=image,
181
+ strength=strength,
182
+ num_inference_steps=num_inference_steps,
183
+ guidance_scale=guidance_scale,
184
+ negative_prompt=negative_prompt,
185
+ num_images_per_prompt=num_images_per_prompt,
186
+ eta=eta,
187
+ generator=generator,
188
+ output_type=output_type,
189
+ return_dict=return_dict,
190
+ callback=callback,
191
+ callback_steps=callback_steps,
192
+ )
193
+
194
+ @torch.no_grad()
195
+ def text2img(
196
+ self,
197
+ prompt: Union[str, List[str]],
198
+ height: int = 512,
199
+ width: int = 512,
200
+ num_inference_steps: int = 50,
201
+ guidance_scale: float = 7.5,
202
+ negative_prompt: Optional[Union[str, List[str]]] = None,
203
+ num_images_per_prompt: Optional[int] = 1,
204
+ eta: float = 0.0,
205
+ generator: Optional[torch.Generator] = None,
206
+ latents: Optional[torch.FloatTensor] = None,
207
+ output_type: Optional[str] = "pil",
208
+ return_dict: bool = True,
209
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
210
+ callback_steps: Optional[int] = 1,
211
+ ):
212
+ # For more information on how this function https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionPipeline
213
+ return StableDiffusionPipeline(**self.components)(
214
+ prompt=prompt,
215
+ height=height,
216
+ width=width,
217
+ num_inference_steps=num_inference_steps,
218
+ guidance_scale=guidance_scale,
219
+ negative_prompt=negative_prompt,
220
+ num_images_per_prompt=num_images_per_prompt,
221
+ eta=eta,
222
+ generator=generator,
223
+ latents=latents,
224
+ output_type=output_type,
225
+ return_dict=return_dict,
226
+ callback=callback,
227
+ callback_steps=callback_steps,
228
+ )