| """
|
| Self-contained DiffusionSat text-to-image pipeline that can be loaded directly
|
| from the checkpoint folder without importing the project package.
|
| """
|
|
|
| from __future__ import annotations
|
|
|
| import inspect
|
| from typing import Any, Callable, Dict, List, Optional, Union
|
|
|
| import torch
|
| from packaging import version
|
| try:
|
| from transformers import CLIPFeatureExtractor
|
| except ImportError:
|
| from transformers import CLIPImageProcessor as CLIPFeatureExtractor
|
| from transformers import CLIPTextModel, CLIPTokenizer
|
|
|
| from diffusers.configuration_utils import FrozenDict
|
| from diffusers.models import AutoencoderKL
|
| from diffusers.schedulers import KarrasDiffusionSchedulers
|
| from diffusers.utils import (
|
| deprecate,
|
| logging,
|
| replace_example_docstring,
|
| is_accelerate_available,
|
| )
|
| from diffusers.utils.torch_utils import randn_tensor
|
| from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import (
|
| StableDiffusionPipeline as DiffusersStableDiffusionPipeline,
|
| )
|
|
|
| logger = logging.get_logger(__name__)
|
|
|
| EXAMPLE_DOC_STRING = """
|
| Examples:
|
| ```py
|
| >>> import torch
|
| >>> from diffusers import DiffusionPipeline
|
|
|
| >>> pipe = DiffusionPipeline.from_pretrained("path/to/ckpt/diffusionsat", torch_dtype=torch.float16)
|
| >>> pipe = pipe.to("cuda")
|
|
|
| >>> prompt = "a photo of an astronaut riding a horse on mars"
|
| >>> image = pipe(prompt).images[0]
|
| ```
|
| """
|
|
|
|
|
| class DiffusionSatPipeline(DiffusionPipeline):
|
| """
|
| Pipeline for text-to-image generation using the DiffusionSat UNet with optional metadata.
|
| """
|
|
|
| _optional_components = ["safety_checker", "feature_extractor"]
|
|
|
| @classmethod
|
| def _get_signature_types(cls):
|
| """
|
| Override to skip strict type resolution when loading via diffusers 0.36,
|
| which cannot resolve the forward references in these custom modules.
|
| """
|
| required, optional = DiffusionPipeline._get_signature_keys(cls)
|
| keys = list(required) + list(optional)
|
| return {key: (inspect.Signature.empty,) for key in keys}
|
|
|
| def __init__(
|
| self,
|
| vae: AutoencoderKL,
|
| text_encoder: CLIPTextModel,
|
| tokenizer: CLIPTokenizer,
|
| unet: Any,
|
| scheduler: KarrasDiffusionSchedulers,
|
| safety_checker: StableDiffusionSafetyChecker,
|
| feature_extractor: CLIPFeatureExtractor,
|
| requires_safety_checker: bool = True,
|
| ):
|
| super().__init__()
|
|
|
| if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| deprecation_message = (
|
| f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| " file"
|
| )
|
| deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| new_config = dict(scheduler.config)
|
| new_config["steps_offset"] = 1
|
| scheduler._internal_dict = FrozenDict(new_config)
|
|
|
| if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
| deprecation_message = (
|
| f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| )
|
| deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| new_config = dict(scheduler.config)
|
| new_config["clip_sample"] = False
|
| scheduler._internal_dict = FrozenDict(new_config)
|
|
|
| if safety_checker is None and requires_safety_checker:
|
| logger.warning(
|
| f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| " results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| )
|
|
|
| if safety_checker is not None and feature_extractor is None:
|
| raise ValueError(
|
| "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| )
|
|
|
| is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
| version.parse(unet.config._diffusers_version).base_version
|
| ) < version.parse("0.9.0.dev0")
|
| is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| deprecation_message = (
|
| "The configuration file of the unet has set the default `sample_size` to smaller than"
|
| " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
| " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| " in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| " the `unet/config.json` file"
|
| )
|
| deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| new_config = dict(unet.config)
|
| new_config["sample_size"] = 64
|
| unet._internal_dict = FrozenDict(new_config)
|
|
|
| self.register_modules(
|
| vae=vae,
|
| text_encoder=text_encoder,
|
| tokenizer=tokenizer,
|
| unet=unet,
|
| scheduler=scheduler,
|
| safety_checker=safety_checker,
|
| feature_extractor=feature_extractor,
|
| )
|
| self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| self.register_to_config(requires_safety_checker=requires_safety_checker)
|
|
|
|
|
| enable_vae_slicing = DiffusersStableDiffusionPipeline.enable_vae_slicing
|
| disable_vae_slicing = DiffusersStableDiffusionPipeline.disable_vae_slicing
|
| enable_sequential_cpu_offload = DiffusersStableDiffusionPipeline.enable_sequential_cpu_offload
|
| _execution_device = DiffusersStableDiffusionPipeline._execution_device
|
| encode_prompt = DiffusersStableDiffusionPipeline.encode_prompt
|
| _encode_prompt = DiffusersStableDiffusionPipeline._encode_prompt
|
| run_safety_checker = DiffusersStableDiffusionPipeline.run_safety_checker
|
| decode_latents = DiffusersStableDiffusionPipeline.decode_latents
|
| prepare_extra_step_kwargs = DiffusersStableDiffusionPipeline.prepare_extra_step_kwargs
|
| check_inputs = DiffusersStableDiffusionPipeline.check_inputs
|
| prepare_latents = DiffusersStableDiffusionPipeline.prepare_latents
|
|
|
| def prepare_metadata(
|
| self, batch_size, metadata, do_classifier_free_guidance, device, dtype,
|
| ):
|
| has_metadata = getattr(self.unet.config, "use_metadata", False)
|
| num_metadata = getattr(self.unet.config, "num_metadata", 0)
|
|
|
| if metadata is None and has_metadata and num_metadata > 0:
|
| metadata = torch.zeros((batch_size, num_metadata), device=device, dtype=dtype)
|
|
|
| if metadata is None:
|
| return None
|
|
|
| md = torch.tensor(metadata) if not torch.is_tensor(metadata) else metadata
|
| if len(md.shape) == 1:
|
| md = md.unsqueeze(0).expand(batch_size, -1)
|
| md = md.to(device=device, dtype=dtype)
|
|
|
| if do_classifier_free_guidance:
|
| md = torch.cat([torch.zeros_like(md), md])
|
|
|
| return md
|
|
|
| @torch.no_grad()
|
| def __call__(
|
| self,
|
| prompt: Union[str, List[str]] = None,
|
| height: Optional[int] = None,
|
| width: Optional[int] = None,
|
| num_inference_steps: int = 50,
|
| guidance_scale: float = 7.5,
|
| negative_prompt: Optional[Union[str, List[str]]] = None,
|
| num_images_per_prompt: Optional[int] = 1,
|
| eta: float = 0.0,
|
| generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| latents: Optional[torch.FloatTensor] = None,
|
| prompt_embeds: Optional[torch.FloatTensor] = None,
|
| negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| output_type: Optional[str] = "pil",
|
| return_dict: bool = True,
|
| callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| callback_steps: Optional[int] = 1,
|
| cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| metadata: Optional[List[float]] = None,
|
| ):
|
|
|
| height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| width = width or self.unet.config.sample_size * self.vae_scale_factor
|
|
|
|
|
| self.check_inputs(
|
| prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
| )
|
|
|
|
|
| if prompt is not None and isinstance(prompt, str):
|
| batch_size = 1
|
| elif prompt is not None and isinstance(prompt, list):
|
| batch_size = len(prompt)
|
| else:
|
| batch_size = prompt_embeds.shape[0]
|
|
|
| device = self._execution_device
|
| do_classifier_free_guidance = guidance_scale > 1.0
|
|
|
|
|
| prompt_embeds = self._encode_prompt(
|
| prompt,
|
| device,
|
| num_images_per_prompt,
|
| do_classifier_free_guidance,
|
| negative_prompt,
|
| prompt_embeds=prompt_embeds,
|
| negative_prompt_embeds=negative_prompt_embeds,
|
| )
|
|
|
|
|
| self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| timesteps = self.scheduler.timesteps
|
|
|
|
|
| num_channels_latents = self.unet.in_channels if hasattr(self.unet, "in_channels") else self.unet.config.in_channels
|
| latents = self.prepare_latents(
|
| batch_size * num_images_per_prompt,
|
| num_channels_latents,
|
| height,
|
| width,
|
| prompt_embeds.dtype,
|
| device,
|
| generator,
|
| latents,
|
| )
|
|
|
|
|
| extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
|
|
|
|
| input_metadata = self.prepare_metadata(
|
| batch_size, metadata, do_classifier_free_guidance, device, prompt_embeds.dtype
|
| )
|
| if input_metadata is not None:
|
| assert input_metadata.shape[-1] == getattr(self.unet.config, "num_metadata", input_metadata.shape[-1])
|
| assert input_metadata.shape[0] == prompt_embeds.shape[0]
|
|
|
|
|
| num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| for i, t in enumerate(timesteps):
|
| latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
|
|
| noise_pred = self.unet(
|
| latent_model_input,
|
| t,
|
| metadata=input_metadata,
|
| encoder_hidden_states=prompt_embeds,
|
| cross_attention_kwargs=cross_attention_kwargs,
|
| ).sample
|
|
|
| if do_classifier_free_guidance:
|
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
|
|
| latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
|
|
| if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| progress_bar.update()
|
| if callback is not None and i % callback_steps == 0:
|
| callback(i, t, latents)
|
|
|
| if output_type == "latent":
|
| image = latents
|
| has_nsfw_concept = None
|
| elif output_type == "pil":
|
| image = self.decode_latents(latents)
|
| image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| image = self.numpy_to_pil(image)
|
| else:
|
| image = self.decode_latents(latents)
|
| image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
|
|
| if not return_dict:
|
| return (image, has_nsfw_concept)
|
|
|
| return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
| __all__ = ["DiffusionSatPipeline"]
|
|
|