text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers.utils.import_utils import is_invisible_watermark_available from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from ...models import AutoencoderKL, ControlNetModel, ImageProjection, MultiControlNetModel, UNet2DConditionModel from ...models.attention_processor import ( AttnProcessor2_0, XFormersAttnProcessor, ) from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker from ...utils import is_torch_xla_available if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> # !pip install opencv-python transformers accelerate >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL >>> from diffusers.utils import load_image >>> import numpy as np >>> import torch >>> import cv2 >>> from PIL import Image >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" >>> negative_prompt = "low quality, bad quality, sketches" >>> # download an image >>> image = load_image( ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" ... ) >>> # initialize the models and pipeline >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization >>> controlnet = ControlNetModel.from_pretrained( ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 ... ) >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained( ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16 ... ) >>> pipe.enable_model_cpu_offload() >>> # get canny image >>> image = np.array(image) >>> image = cv2.Canny(image, 100, 200) >>> image = image[:, :, None] >>> image = np.concatenate([image, image, image], axis=2) >>> canny_image = Image.fromarray(image) >>> # generate image >>> image = pipe( ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image ... ).images[0] ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class StableDiffusionXLControlNetPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): Second frozen text-encoder ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. tokenizer_2 ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): Provides additional conditioning to the `unet` during the denoising process. If you set multiple ControlNets as a list, the outputs from each ControlNet are added together to create one combined additional conditioning. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings should always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no watermarker is used. """ # leave controlnet out on purpose because it iterates with unet model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "feature_extractor", "image_encoder", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "negative_add_time_ids", "image", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, feature_extractor: CLIPImageProcessor = None, image_encoder: CLIPVisionModelWithProjection = None, ): super().__init__() if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2: pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2: negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance ): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for i, single_image_embeds in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, image, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, negative_pooled_prompt_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None, ): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) # `prompt` needs more sophisticated handling when there are multiple # conditionings. if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning( f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" " prompts. The conditionings will be fixed across the prompts." ) # Check `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): self.check_image(image, prompt, prompt_embeds) elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if not isinstance(image, list): raise TypeError("For multiple controlnets: `image` must be type `list`") # When `image` is a nested list: # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) elif any(isinstance(i, list) for i in image): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif len(image) != len(self.controlnet.nets): raise ValueError( f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." ) for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError( f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." ) if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError( f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" ) elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image def prepare_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, sigmas: List[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition to provide guidance to the `unet` for generation. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 5.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, pooled text embeddings are generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): The ControlNet encoder tries to recognize the content of the input image even if you remove all prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned containing the output images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, image, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, negative_pooled_prompt_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = guess_mode or global_pool_conditions # 3.1 Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt, prompt_2, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) # 3.2 Encode ip_adapter_image if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # 4. Prepare image if isinstance(controlnet, ControlNetModel): image = self.prepare_image( image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) height, width = image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): images = [] for image_ in image: image_ = self.prepare_image( image=image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) images.append(image_) image = images height, width = image[0].shape[-2:] else: assert False # 5. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas ) self._num_timesteps = len(timesteps) # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6.5 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 7.2 Prepare added time ids & embeddings if isinstance(image, list): original_size = original_size or image[0].shape[-2:] else: original_size = original_size or image.shape[-2:] target_size = target_size or (height, width) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order # 8.1 Apply denoising_end if ( self.denoising_end is not None and isinstance(self.denoising_end, float) and self.denoising_end > 0 and self.denoising_end < 1 ): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # Relevant thread: # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 if ( torch.cuda.is_available() and (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1 ): torch._inductor.cudagraph_mark_step_begin() # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} # controlnet(s) inference if guess_mode and self.do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] controlnet_added_cond_kwargs = { "text_embeds": add_text_embeds.chunk(2)[1], "time_ids": add_time_ids.chunk(2)[1], } else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=image, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False, ) if guess_mode and self.do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs["image_embeds"] = image_embeds # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) image = callback_outputs.pop("image", image) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py", "repo_id": "diffusers", "token_count": 37208 }
165
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import List, Optional, Tuple, Union import torch from ....models import UNet2DModel, VQModel from ....schedulers import DDIMScheduler from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput class LDMPipeline(DiffusionPipeline): r""" Pipeline for unconditional image generation using latent diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: vqvae ([`VQModel`]): Vector-quantized (VQ) model to encode and decode images to and from latent representations. unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): [`DDIMScheduler`] is used in combination with `unet` to denoise the encoded image latents. """ def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): super().__init__() self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, eta: float = 0.0, num_inference_steps: int = 50, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[Tuple, ImagePipelineOutput]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): Number of images to generate. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Example: ```py >>> from diffusers import LDMPipeline >>> # load model and scheduler >>> pipe = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256") >>> # run pipeline in inference (sample random noise and denoise) >>> image = pipe().images[0] ``` Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ latents = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=generator, ) latents = latents.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(num_inference_steps) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_kwargs = {} if accepts_eta: extra_kwargs["eta"] = eta for t in self.progress_bar(self.scheduler.timesteps): latent_model_input = self.scheduler.scale_model_input(latents, t) # predict the noise residual noise_prediction = self.unet(latent_model_input, t).sample # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample # adjust latents with inverse of vae scale latents = latents / self.vqvae.config.scaling_factor # decode the image latents with the VAE image = self.vqvae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py", "repo_id": "diffusers", "token_count": 2148 }
166
# Copyright 2025 The EasyAnimate team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, Dict, List, Optional, Union import torch from transformers import ( BertModel, BertTokenizer, Qwen2Tokenizer, Qwen2VLForConditionalGeneration, ) from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...models import AutoencoderKLMagvit, EasyAnimateTransformer3DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from .pipeline_output import EasyAnimatePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import EasyAnimatePipeline >>> from diffusers.utils import export_to_video >>> # Models: "alibaba-pai/EasyAnimateV5.1-12b-zh" >>> pipe = EasyAnimatePipeline.from_pretrained( ... "alibaba-pai/EasyAnimateV5.1-7b-zh-diffusers", torch_dtype=torch.float16 ... ).to("cuda") >>> prompt = ( ... "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. " ... "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other " ... "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, " ... "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. " ... "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical " ... "atmosphere of this unique musical performance." ... ) >>> sample_size = (512, 512) >>> video = pipe( ... prompt=prompt, ... guidance_scale=6, ... negative_prompt="bad detailed", ... height=sample_size[0], ... width=sample_size[1], ... num_inference_steps=50, ... ).frames[0] >>> export_to_video(video, "output.mp4", fps=8) ``` """ # Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): tw = tgt_width th = tgt_height h, w = src r = h / w if r > (th / tw): resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class EasyAnimatePipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using EasyAnimate. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) EasyAnimate uses one text encoder [qwen2 vl](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) in V5.1. Args: vae ([`AutoencoderKLMagvit`]): Variational Auto-Encoder (VAE) Model to encode and decode video to and from latent representations. text_encoder (Optional[`~transformers.Qwen2VLForConditionalGeneration`, `~transformers.BertModel`]): EasyAnimate uses [qwen2 vl](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) in V5.1. tokenizer (Optional[`~transformers.Qwen2Tokenizer`, `~transformers.BertTokenizer`]): A `Qwen2Tokenizer` or `BertTokenizer` to tokenize text. transformer ([`EasyAnimateTransformer3DModel`]): The EasyAnimate model designed by EasyAnimate Team. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with EasyAnimate to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, vae: AutoencoderKLMagvit, text_encoder: Union[Qwen2VLForConditionalGeneration, BertModel], tokenizer: Union[Qwen2Tokenizer, BertTokenizer], transformer: EasyAnimateTransformer3DModel, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.enable_text_attention_mask = ( self.transformer.config.enable_text_attention_mask if getattr(self, "transformer", None) is not None else True ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 8 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 4 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) def encode_prompt( self, prompt: Union[str, List[str]], num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[Union[str, List[str]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, max_sequence_length: int = 256, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device dtype (`torch.dtype`): torch dtype num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the prompt. Required when `prompt_embeds` is passed directly. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. max_sequence_length (`int`, *optional*): maximum sequence length to use for the prompt. """ dtype = dtype or self.text_encoder.dtype device = device or self.text_encoder.device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(prompt, str): messages = [ { "role": "user", "content": [{"type": "text", "text": prompt}], } ] else: messages = [ { "role": "user", "content": [{"type": "text", "text": _prompt}], } for _prompt in prompt ] text = [ self.tokenizer.apply_chat_template([m], tokenize=False, add_generation_prompt=True) for m in messages ] text_inputs = self.tokenizer( text=text, padding="max_length", max_length=max_sequence_length, truncation=True, return_attention_mask=True, padding_side="right", return_tensors="pt", ) text_inputs = text_inputs.to(self.text_encoder.device) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask if self.enable_text_attention_mask: # Inference: Generation of the output prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True ).hidden_states[-2] else: raise ValueError("LLM needs attention_mask") prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.to(device=device) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: if negative_prompt is not None and isinstance(negative_prompt, str): messages = [ { "role": "user", "content": [{"type": "text", "text": negative_prompt}], } ] else: messages = [ { "role": "user", "content": [{"type": "text", "text": _negative_prompt}], } for _negative_prompt in negative_prompt ] text = [ self.tokenizer.apply_chat_template([m], tokenize=False, add_generation_prompt=True) for m in messages ] text_inputs = self.tokenizer( text=text, padding="max_length", max_length=max_sequence_length, truncation=True, return_attention_mask=True, padding_side="right", return_tensors="pt", ) text_inputs = text_inputs.to(self.text_encoder.device) text_input_ids = text_inputs.input_ids negative_prompt_attention_mask = text_inputs.attention_mask if self.enable_text_attention_mask: # Inference: Generation of the output negative_prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=negative_prompt_attention_mask, output_hidden_states=True, ).hidden_states[-2] else: raise ValueError("LLM needs attention_mask") negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.to(device=device) return prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, callback_on_step_end_tensor_inputs=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, (num_frames - 1) // self.vae_temporal_compression_ratio + 1, height // self.vae_spatial_compression_ratio, width // self.vae_spatial_compression_ratio, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler if hasattr(self.scheduler, "init_noise_sigma"): latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, num_frames: Optional[int] = 49, height: Optional[int] = 512, width: Optional[int] = 512, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, timesteps: Optional[List[int]] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], guidance_rescale: float = 0.0, ): r""" Generates images or video using the EasyAnimate pipeline based on the provided prompts. Examples: prompt (`str` or `List[str]`, *optional*): Text prompts to guide the image or video generation. If not provided, use `prompt_embeds` instead. num_frames (`int`, *optional*): Length of the generated video (in frames). height (`int`, *optional*): Height of the generated image in pixels. width (`int`, *optional*): Width of the generated image in pixels. num_inference_steps (`int`, *optional*, defaults to 50): Number of denoising steps during generation. More steps generally yield higher quality images but slow down inference. guidance_scale (`float`, *optional*, defaults to 5.0): Encourages the model to align outputs with prompts. A higher value may decrease image quality. negative_prompt (`str` or `List[str]`, *optional*): Prompts indicating what to exclude in generation. If not specified, use `negative_prompt_embeds`. num_images_per_prompt (`int`, *optional*, defaults to 1): Number of images to generate for each prompt. eta (`float`, *optional*, defaults to 0.0): Applies to DDIM scheduling. Controlled by the eta parameter from the related literature. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A generator to ensure reproducibility in image generation. latents (`torch.Tensor`, *optional*): Predefined latent tensors to condition generation. prompt_embeds (`torch.Tensor`, *optional*): Text embeddings for the prompts. Overrides prompt string inputs for more flexibility. negative_prompt_embeds (`torch.Tensor`, *optional*): Embeddings for negative prompts. Overrides string inputs if defined. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the primary prompt embeddings. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for negative prompt embeddings. output_type (`str`, *optional*, defaults to "latent"): Format of the generated output, either as a PIL image or as a NumPy array. return_dict (`bool`, *optional*, defaults to `True`): If `True`, returns a structured output. Otherwise returns a simple tuple. callback_on_step_end (`Callable`, *optional*): Functions called at the end of each denoising step. callback_on_step_end_tensor_inputs (`List[str]`, *optional*): Tensor names to be included in callback function calls. guidance_rescale (`float`, *optional*, defaults to 0.0): Adjusts noise levels based on guidance scale. original_size (`Tuple[int, int]`, *optional*, defaults to `(1024, 1024)`): Original dimensions of the output. target_size (`Tuple[int, int]`, *optional*): Desired output dimensions for calculations. crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to `(0, 0)`): Coordinates for cropping. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. default height and width height = int((height // 16) * 16) width = int((width // 16) * 16) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = self.transformer.dtype # 3. Encode input prompt ( prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, device=device, dtype=dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) # 4. Prepare timesteps if isinstance(self.scheduler, FlowMatchEulerDiscreteScheduler): timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, mu=1 ) else: timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, num_frames, height, width, dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) prompt_embeds = prompt_embeds.to(device=device) prompt_attention_mask = prompt_attention_mask.to(device=device) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents if hasattr(self.scheduler, "scale_model_input"): latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # expand scalar t to 1-D tensor to match the 1st dim of latent_model_input t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to( dtype=latent_model_input.dtype ) # predict the noise residual noise_pred = self.transformer( latent_model_input, t_expand, encoder_hidden_states=prompt_embeds, return_dict=False, )[0] if noise_pred.size()[1] != self.vae.config.latent_channels: noise_pred, _ = noise_pred.chunk(2, dim=1) # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": latents = 1 / self.vae.config.scaling_factor * latents video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return EasyAnimatePipelineOutput(frames=video)
diffusers/src/diffusers/pipelines/easyanimate/pipeline_easyanimate.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/easyanimate/pipeline_easyanimate.py", "repo_id": "diffusers", "token_count": 15875 }
167
# Copyright 2025 HunyuanDiT Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import BertModel, BertTokenizer, CLIPImageProcessor, MT5Tokenizer, T5EncoderModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, HunyuanDiT2DModel from ...models.embeddings import get_2d_rotary_pos_embed from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDPMScheduler from ...utils import ( is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import HunyuanDiTPipeline >>> pipe = HunyuanDiTPipeline.from_pretrained( ... "Tencent-Hunyuan/HunyuanDiT-Diffusers", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> # You may also use English prompt as HunyuanDiT supports both English and Chinese >>> # prompt = "An astronaut riding a horse" >>> prompt = "一个宇航员在骑马" >>> image = pipe(prompt).images[0] ``` """ STANDARD_RATIO = np.array( [ 1.0, # 1:1 4.0 / 3.0, # 4:3 3.0 / 4.0, # 3:4 16.0 / 9.0, # 16:9 9.0 / 16.0, # 9:16 ] ) STANDARD_SHAPE = [ [(1024, 1024), (1280, 1280)], # 1:1 [(1024, 768), (1152, 864), (1280, 960)], # 4:3 [(768, 1024), (864, 1152), (960, 1280)], # 3:4 [(1280, 768)], # 16:9 [(768, 1280)], # 9:16 ] STANDARD_AREA = [np.array([w * h for w, h in shapes]) for shapes in STANDARD_SHAPE] SUPPORTED_SHAPE = [ (1024, 1024), (1280, 1280), # 1:1 (1024, 768), (1152, 864), (1280, 960), # 4:3 (768, 1024), (864, 1152), (960, 1280), # 3:4 (1280, 768), # 16:9 (768, 1280), # 9:16 ] def map_to_standard_shapes(target_width, target_height): target_ratio = target_width / target_height closest_ratio_idx = np.argmin(np.abs(STANDARD_RATIO - target_ratio)) closest_area_idx = np.argmin(np.abs(STANDARD_AREA[closest_ratio_idx] - target_width * target_height)) width, height = STANDARD_SHAPE[closest_ratio_idx][closest_area_idx] return width, height def get_resize_crop_region_for_grid(src, tgt_size): th = tw = tgt_size h, w = src r = h / w # resize if r > 1: resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class HunyuanDiTPipeline(DiffusionPipeline): r""" Pipeline for English/Chinese-to-image generation using HunyuanDiT. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) HunyuanDiT uses two text encoders: [mT5](https://huggingface.co/google/mt5-base) and [bilingual CLIP](fine-tuned by ourselves) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. We use `sdxl-vae-fp16-fix`. text_encoder (Optional[`~transformers.BertModel`, `~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). HunyuanDiT uses a fine-tuned [bilingual CLIP]. tokenizer (Optional[`~transformers.BertTokenizer`, `~transformers.CLIPTokenizer`]): A `BertTokenizer` or `CLIPTokenizer` to tokenize text. transformer ([`HunyuanDiT2DModel`]): The HunyuanDiT model designed by Tencent Hunyuan. text_encoder_2 (`T5EncoderModel`): The mT5 embedder. Specifically, it is 't5-v1_1-xxl'. tokenizer_2 (`MT5Tokenizer`): The tokenizer for the mT5 embedder. scheduler ([`DDPMScheduler`]): A scheduler to be used in combination with HunyuanDiT to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _optional_components = [ "safety_checker", "feature_extractor", "text_encoder_2", "tokenizer_2", "text_encoder", "tokenizer", ] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "prompt_embeds_2", "negative_prompt_embeds_2", ] def __init__( self, vae: AutoencoderKL, text_encoder: BertModel, tokenizer: BertTokenizer, transformer: HunyuanDiT2DModel, scheduler: DDPMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, text_encoder_2: Optional[T5EncoderModel] = None, tokenizer_2: Optional[MT5Tokenizer] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, text_encoder_2=text_encoder_2, ) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.default_sample_size = ( self.transformer.config.sample_size if hasattr(self, "transformer") and self.transformer is not None else 128 ) def encode_prompt( self, prompt: str, device: torch.device = None, dtype: torch.dtype = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, max_sequence_length: Optional[int] = None, text_encoder_index: int = 0, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device dtype (`torch.dtype`): torch dtype num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the prompt. Required when `prompt_embeds` is passed directly. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. max_sequence_length (`int`, *optional*): maximum sequence length to use for the prompt. text_encoder_index (`int`, *optional*): Index of the text encoder to use. `0` for clip and `1` for T5. """ if dtype is None: if self.text_encoder_2 is not None: dtype = self.text_encoder_2.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None if device is None: device = self._execution_device tokenizers = [self.tokenizer, self.tokenizer_2] text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = tokenizers[text_encoder_index] text_encoder = text_encoders[text_encoder_index] if max_sequence_length is None: if text_encoder_index == 0: max_length = 77 if text_encoder_index == 1: max_length = 256 else: max_length = max_sequence_length if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = tokenizer( prompt, padding="max_length", max_length=max_length, truncation=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = text_encoder( text_input_ids.to(device), attention_mask=prompt_attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, prompt_embeds_2=None, negative_prompt_embeds_2=None, prompt_attention_mask_2=None, negative_prompt_attention_mask_2=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is None and prompt_embeds_2 is None: raise ValueError( "Provide either `prompt` or `prompt_embeds_2`. Cannot leave both `prompt` and `prompt_embeds_2` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if prompt_embeds_2 is not None and prompt_attention_mask_2 is None: raise ValueError("Must provide `prompt_attention_mask_2` when specifying `prompt_embeds_2`.") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if negative_prompt_embeds_2 is not None and negative_prompt_attention_mask_2 is None: raise ValueError( "Must provide `negative_prompt_attention_mask_2` when specifying `negative_prompt_embeds_2`." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds_2 is not None and negative_prompt_embeds_2 is not None: if prompt_embeds_2.shape != negative_prompt_embeds_2.shape: raise ValueError( "`prompt_embeds_2` and `negative_prompt_embeds_2` must have the same shape when passed directly, but" f" got: `prompt_embeds_2` {prompt_embeds_2.shape} != `negative_prompt_embeds_2`" f" {negative_prompt_embeds_2.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_embeds_2: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds_2: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, prompt_attention_mask_2: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask_2: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = (1024, 1024), target_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), use_resolution_binning: bool = True, ): r""" The call function to the pipeline for generation with HunyuanDiT. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`): The height in pixels of the generated image. width (`int`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter is modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. prompt_embeds_2 (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. negative_prompt_embeds_2 (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the prompt. Required when `prompt_embeds` is passed directly. prompt_attention_mask_2 (`torch.Tensor`, *optional*): Attention mask for the prompt. Required when `prompt_embeds_2` is passed directly. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. negative_prompt_attention_mask_2 (`torch.Tensor`, *optional*): Attention mask for the negative prompt. Required when `negative_prompt_embeds_2` is passed directly. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable[[int, int, Dict], None]`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A callback function or a list of callback functions to be called at the end of each denoising step. callback_on_step_end_tensor_inputs (`List[str]`, *optional*): A list of tensor inputs that should be passed to the callback function. If not defined, all tensor inputs will be passed. guidance_rescale (`float`, *optional*, defaults to 0.0): Rescale the noise_cfg according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). See Section 3.4 original_size (`Tuple[int, int]`, *optional*, defaults to `(1024, 1024)`): The original size of the image. Used to calculate the time ids. target_size (`Tuple[int, int]`, *optional*): The target size of the image. Used to calculate the time ids. crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to `(0, 0)`): The top left coordinates of the crop. Used to calculate the time ids. use_resolution_binning (`bool`, *optional*, defaults to `True`): Whether to use resolution binning or not. If `True`, the input resolution will be mapped to the closest standard resolution. Supported resolutions are 1024x1024, 1280x1280, 1024x768, 1152x864, 1280x960, 768x1024, 864x1152, 960x1280, 1280x768, and 768x1280. It is recommended to set this to `True`. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. default height and width height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor height = int((height // 16) * 16) width = int((width // 16) * 16) if use_resolution_binning and (height, width) not in SUPPORTED_SHAPE: width, height = map_to_standard_shapes(width, height) height = int(height) width = int(width) logger.warning(f"Reshaped to (height, width)=({height}, {width}), Supported shapes are {SUPPORTED_SHAPE}") # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt ( prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, device=device, dtype=self.transformer.dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=77, text_encoder_index=0, ) ( prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2, ) = self.encode_prompt( prompt=prompt, device=device, dtype=self.transformer.dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds_2, negative_prompt_embeds=negative_prompt_embeds_2, prompt_attention_mask=prompt_attention_mask_2, negative_prompt_attention_mask=negative_prompt_attention_mask_2, max_sequence_length=256, text_encoder_index=1, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7 create image_rotary_emb, style embedding & time ids grid_height = height // 8 // self.transformer.config.patch_size grid_width = width // 8 // self.transformer.config.patch_size base_size = 512 // 8 // self.transformer.config.patch_size grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size) image_rotary_emb = get_2d_rotary_pos_embed( self.transformer.inner_dim // self.transformer.num_heads, grid_crops_coords, (grid_height, grid_width), device=device, output_type="pt", ) style = torch.tensor([0], device=device) target_size = target_size or (height, width) add_time_ids = list(original_size + target_size + crops_coords_top_left) add_time_ids = torch.tensor([add_time_ids], dtype=prompt_embeds.dtype) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2]) prompt_attention_mask_2 = torch.cat([negative_prompt_attention_mask_2, prompt_attention_mask_2]) add_time_ids = torch.cat([add_time_ids] * 2, dim=0) style = torch.cat([style] * 2, dim=0) prompt_embeds = prompt_embeds.to(device=device) prompt_attention_mask = prompt_attention_mask.to(device=device) prompt_embeds_2 = prompt_embeds_2.to(device=device) prompt_attention_mask_2 = prompt_attention_mask_2.to(device=device) add_time_ids = add_time_ids.to(dtype=prompt_embeds.dtype, device=device).repeat( batch_size * num_images_per_prompt, 1 ) style = style.to(device=device).repeat(batch_size * num_images_per_prompt) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # expand scalar t to 1-D tensor to match the 1st dim of latent_model_input t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to( dtype=latent_model_input.dtype ) # predict the noise residual noise_pred = self.transformer( latent_model_input, t_expand, encoder_hidden_states=prompt_embeds, text_embedding_mask=prompt_attention_mask, encoder_hidden_states_t5=prompt_embeds_2, text_embedding_mask_t5=prompt_attention_mask_2, image_meta_size=add_time_ids, style=style, image_rotary_emb=image_rotary_emb, return_dict=False, )[0] noise_pred, _ = noise_pred.chunk(2, dim=1) # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) prompt_embeds_2 = callback_outputs.pop("prompt_embeds_2", prompt_embeds_2) negative_prompt_embeds_2 = callback_outputs.pop( "negative_prompt_embeds_2", negative_prompt_embeds_2 ) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/hunyuandit/pipeline_hunyuandit.py", "repo_id": "diffusers", "token_count": 19406 }
168
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy from typing import Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from packaging import version from PIL import Image from ... import __version__ from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler from ...utils import deprecate, is_torch_xla_available, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> import numpy as np >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "a hat" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22InpaintPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/cat.png" ... ) >>> mask = np.zeros((768, 768), dtype=np.float32) >>> mask[:250, 250:-250] = 1 >>> out = pipe( ... image=init_image, ... mask_image=mask, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ) >>> image = out.images[0] >>> image.save("cat_with_hat.png") ``` """ # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width def downscale_height_and_width(height, width, scale_factor=8): new_height = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 new_width = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_inpaint.prepare_mask def prepare_mask(masks): prepared_masks = [] for mask in masks: old_mask = deepcopy(mask) for i in range(mask.shape[1]): for j in range(mask.shape[2]): if old_mask[0][i][j] == 1: continue if i != 0: mask[:, i - 1, j] = 0 if j != 0: mask[:, i, j - 1] = 0 if i != 0 and j != 0: mask[:, i - 1, j - 1] = 0 if i != mask.shape[1] - 1: mask[:, i + 1, j] = 0 if j != mask.shape[2] - 1: mask[:, i, j + 1] = 0 if i != mask.shape[1] - 1 and j != mask.shape[2] - 1: mask[:, i + 1, j + 1] = 0 prepared_masks.append(mask) return torch.stack(prepared_masks, dim=0) # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_inpaint.prepare_mask_and_masked_image def prepare_mask_and_masked_image(image, mask, height, width): r""" Prepares a pair (mask, image) to be consumed by the Kandinsky inpaint pipeline. This means that those inputs will be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the ``image`` and ``1`` for the ``mask``. The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. Args: image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. mask (_type_): The mask to apply to the image, i.e. regions to inpaint. It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. Raises: ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not (ot the other way around). Returns: tuple[torch.Tensor]: The pair (mask, image) as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. """ if image is None: raise ValueError("`image` input cannot be undefined.") if mask is None: raise ValueError("`mask_image` input cannot be undefined.") if isinstance(image, torch.Tensor): if not isinstance(mask, torch.Tensor): raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") # Batch single image if image.ndim == 3: assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" image = image.unsqueeze(0) # Batch and add channel dim for single mask if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) # Batch single mask or add channel dim if mask.ndim == 3: # Single batched mask, no channel dim or single mask not batched but channel dim if mask.shape[0] == 1: mask = mask.unsqueeze(0) # Batched masks no channel dim else: mask = mask.unsqueeze(1) assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" # Check image is in [-1, 1] if image.min() < -1 or image.max() > 1: raise ValueError("Image should be in [-1, 1] range") # Check mask is in [0, 1] if mask.min() < 0 or mask.max() > 1: raise ValueError("Mask should be in [0, 1] range") # Binarize mask mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 # Image as float32 image = image.to(dtype=torch.float32) elif isinstance(mask, torch.Tensor): raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): # resize all images w.r.t passed height an width image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image] image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 # preprocess mask if isinstance(mask, (PIL.Image.Image, np.ndarray)): mask = [mask] if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): mask = np.concatenate([m[None, None, :] for m in mask], axis=0) mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) mask = 1 - mask return mask, image class KandinskyV22InpaintPipeline(DiffusionPipeline): """ Pipeline for text-guided image inpainting using Kandinsky2.1 This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: scheduler ([`DDIMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. """ model_cpu_offload_seq = "unet->movq" _callback_tensor_inputs = ["latents", "image_embeds", "negative_image_embeds", "masked_image", "mask_image"] def __init__( self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, ): super().__init__() self.register_modules( unet=unet, scheduler=scheduler, movq=movq, ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) self._warn_has_been_called = False # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() def __call__( self, image_embeds: Union[torch.Tensor, List[torch.Tensor]], image: Union[torch.Tensor, PIL.Image.Image], mask_image: Union[torch.Tensor, PIL.Image.Image, np.ndarray], negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], height: int = 512, width: int = 512, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """ Function invoked when calling the pipeline for generation. Args: image_embeds (`torch.Tensor` or `List[torch.Tensor]`): The clip image embeddings for text prompt, that will be used to condition the image generation. image (`PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. mask_image (`np.array`): Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): The clip image embeddings for negative text prompt, will be used to condition the image generation. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( "0.23.0.dev0" ): logger.warning( "Please note that the expected format of `mask_image` has recently been changed. " "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. " "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. " "This warning will be suppressed after the first inference call and will be removed in diffusers>0.23.0" ) self._warn_has_been_called = True callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) self._guidance_scale = guidance_scale device = self._execution_device if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) batch_size = image_embeds.shape[0] * num_images_per_prompt if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if self.do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( dtype=self.unet.dtype, device=device ) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # preprocess image and mask mask_image, image = prepare_mask_and_masked_image(image, mask_image, height, width) image = image.to(dtype=image_embeds.dtype, device=device) image = self.movq.encode(image)["latents"] mask_image = mask_image.to(dtype=image_embeds.dtype, device=device) image_shape = tuple(image.shape[-2:]) mask_image = F.interpolate( mask_image, image_shape, mode="nearest", ) mask_image = prepare_mask(mask_image) masked_image = image * mask_image mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0) if self.do_classifier_free_guidance: mask_image = mask_image.repeat(2, 1, 1, 1) masked_image = masked_image.repeat(2, 1, 1, 1) num_channels_latents = self.movq.config.latent_channels height, width = downscale_height_and_width(height, width, self.movq_scale_factor) # create initial latent latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), image_embeds.dtype, device, generator, latents, self.scheduler, ) noise = torch.clone(latents) self._num_timesteps = len(timesteps) for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1) added_cond_kwargs = {"image_embeds": image_embeds} noise_pred = self.unet( sample=latent_model_input, timestep=t, encoder_hidden_states=None, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) _, variance_pred_text = variance_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not ( hasattr(self.scheduler.config, "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, generator=generator, )[0] init_latents_proper = image[:1] init_mask = mask_image[:1] if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise( init_latents_proper, noise, torch.tensor([noise_timestep]) ) latents = init_mask * init_latents_proper + (1 - init_mask) * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) image_embeds = callback_outputs.pop("image_embeds", image_embeds) negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds) masked_image = callback_outputs.pop("masked_image", masked_image) mask_image = callback_outputs.pop("mask_image", mask_image) if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() # post-processing latents = mask_image[:1] * image[:1] + (1 - mask_image[:1]) * latents if output_type not in ["pt", "np", "pil", "latent"]: raise ValueError( f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}" ) if not output_type == "latent": image = self.movq.decode(latents, force_not_quantize=True)["sample"] if output_type in ["np", "pil"]: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) else: image = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py", "repo_id": "diffusers", "token_count": 11160 }
169
# Copyright 2025 OmniGen team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import Dict, List import numpy as np import torch from PIL import Image from ...utils import is_torchvision_available if is_torchvision_available(): from torchvision import transforms def crop_image(pil_image, max_image_size): """ Crop the image so that its height and width does not exceed `max_image_size`, while ensuring both the height and width are multiples of 16. """ while min(*pil_image.size) >= 2 * max_image_size: pil_image = pil_image.resize(tuple(x // 2 for x in pil_image.size), resample=Image.BOX) if max(*pil_image.size) > max_image_size: scale = max_image_size / max(*pil_image.size) pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC) if min(*pil_image.size) < 16: scale = 16 / min(*pil_image.size) pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC) arr = np.array(pil_image) crop_y1 = (arr.shape[0] % 16) // 2 crop_y2 = arr.shape[0] % 16 - crop_y1 crop_x1 = (arr.shape[1] % 16) // 2 crop_x2 = arr.shape[1] % 16 - crop_x1 arr = arr[crop_y1 : arr.shape[0] - crop_y2, crop_x1 : arr.shape[1] - crop_x2] return Image.fromarray(arr) class OmniGenMultiModalProcessor: def __init__(self, text_tokenizer, max_image_size: int = 1024): self.text_tokenizer = text_tokenizer self.max_image_size = max_image_size self.image_transform = transforms.Compose( [ transforms.Lambda(lambda pil_image: crop_image(pil_image, max_image_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), ] ) self.collator = OmniGenCollator() def reset_max_image_size(self, max_image_size): self.max_image_size = max_image_size self.image_transform = transforms.Compose( [ transforms.Lambda(lambda pil_image: crop_image(pil_image, max_image_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), ] ) def process_image(self, image): if isinstance(image, str): image = Image.open(image).convert("RGB") return self.image_transform(image) def process_multi_modal_prompt(self, text, input_images): text = self.add_prefix_instruction(text) if input_images is None or len(input_images) == 0: model_inputs = self.text_tokenizer(text) return {"input_ids": model_inputs.input_ids, "pixel_values": None, "image_sizes": None} pattern = r"<\|image_\d+\|>" prompt_chunks = [self.text_tokenizer(chunk).input_ids for chunk in re.split(pattern, text)] for i in range(1, len(prompt_chunks)): if prompt_chunks[i][0] == 1: prompt_chunks[i] = prompt_chunks[i][1:] image_tags = re.findall(pattern, text) image_ids = [int(s.split("|")[1].split("_")[-1]) for s in image_tags] unique_image_ids = sorted(set(image_ids)) assert unique_image_ids == list(range(1, len(unique_image_ids) + 1)), ( f"image_ids must start from 1, and must be continuous int, e.g. [1, 2, 3], cannot be {unique_image_ids}" ) # total images must be the same as the number of image tags assert len(unique_image_ids) == len(input_images), ( f"total images must be the same as the number of image tags, got {len(unique_image_ids)} image tags and {len(input_images)} images" ) input_images = [input_images[x - 1] for x in image_ids] all_input_ids = [] img_inx = [] for i in range(len(prompt_chunks)): all_input_ids.extend(prompt_chunks[i]) if i != len(prompt_chunks) - 1: start_inx = len(all_input_ids) size = input_images[i].size(-2) * input_images[i].size(-1) // 16 // 16 img_inx.append([start_inx, start_inx + size]) all_input_ids.extend([0] * size) return {"input_ids": all_input_ids, "pixel_values": input_images, "image_sizes": img_inx} def add_prefix_instruction(self, prompt): user_prompt = "<|user|>\n" generation_prompt = "Generate an image according to the following instructions\n" assistant_prompt = "<|assistant|>\n<|diffusion|>" prompt_suffix = "<|end|>\n" prompt = f"{user_prompt}{generation_prompt}{prompt}{prompt_suffix}{assistant_prompt}" return prompt def __call__( self, instructions: List[str], input_images: List[List[str]] = None, height: int = 1024, width: int = 1024, negative_prompt: str = "low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers.", use_img_cfg: bool = True, separate_cfg_input: bool = False, use_input_image_size_as_output: bool = False, num_images_per_prompt: int = 1, ) -> Dict: if isinstance(instructions, str): instructions = [instructions] input_images = [input_images] input_data = [] for i in range(len(instructions)): cur_instruction = instructions[i] cur_input_images = None if input_images is None else input_images[i] if cur_input_images is not None and len(cur_input_images) > 0: cur_input_images = [self.process_image(x) for x in cur_input_images] else: cur_input_images = None assert "<img><|image_1|></img>" not in cur_instruction mllm_input = self.process_multi_modal_prompt(cur_instruction, cur_input_images) neg_mllm_input, img_cfg_mllm_input = None, None neg_mllm_input = self.process_multi_modal_prompt(negative_prompt, None) if use_img_cfg: if cur_input_images is not None and len(cur_input_images) >= 1: img_cfg_prompt = [f"<img><|image_{i + 1}|></img>" for i in range(len(cur_input_images))] img_cfg_mllm_input = self.process_multi_modal_prompt(" ".join(img_cfg_prompt), cur_input_images) else: img_cfg_mllm_input = neg_mllm_input for _ in range(num_images_per_prompt): if use_input_image_size_as_output: input_data.append( ( mllm_input, neg_mllm_input, img_cfg_mllm_input, [mllm_input["pixel_values"][0].size(-2), mllm_input["pixel_values"][0].size(-1)], ) ) else: input_data.append((mllm_input, neg_mllm_input, img_cfg_mllm_input, [height, width])) return self.collator(input_data) class OmniGenCollator: def __init__(self, pad_token_id=2, hidden_size=3072): self.pad_token_id = pad_token_id self.hidden_size = hidden_size def create_position(self, attention_mask, num_tokens_for_output_images): position_ids = [] text_length = attention_mask.size(-1) img_length = max(num_tokens_for_output_images) for mask in attention_mask: temp_l = torch.sum(mask) temp_position = [0] * (text_length - temp_l) + list( range(temp_l + img_length + 1) ) # we add a time embedding into the sequence, so add one more token position_ids.append(temp_position) return torch.LongTensor(position_ids) def create_mask(self, attention_mask, num_tokens_for_output_images): """ OmniGen applies causal attention to each element in the sequence, but applies bidirectional attention within each image sequence References: [OmniGen](https://huggingface.co/papers/2409.11340) """ extended_mask = [] padding_images = [] text_length = attention_mask.size(-1) img_length = max(num_tokens_for_output_images) seq_len = text_length + img_length + 1 # we add a time embedding into the sequence, so add one more token inx = 0 for mask in attention_mask: temp_l = torch.sum(mask) pad_l = text_length - temp_l temp_mask = torch.tril(torch.ones(size=(temp_l + 1, temp_l + 1))) image_mask = torch.zeros(size=(temp_l + 1, img_length)) temp_mask = torch.cat([temp_mask, image_mask], dim=-1) image_mask = torch.ones(size=(img_length, temp_l + img_length + 1)) temp_mask = torch.cat([temp_mask, image_mask], dim=0) if pad_l > 0: pad_mask = torch.zeros(size=(temp_l + 1 + img_length, pad_l)) temp_mask = torch.cat([pad_mask, temp_mask], dim=-1) pad_mask = torch.ones(size=(pad_l, seq_len)) temp_mask = torch.cat([pad_mask, temp_mask], dim=0) true_img_length = num_tokens_for_output_images[inx] pad_img_length = img_length - true_img_length if pad_img_length > 0: temp_mask[:, -pad_img_length:] = 0 temp_padding_imgs = torch.zeros(size=(1, pad_img_length, self.hidden_size)) else: temp_padding_imgs = None extended_mask.append(temp_mask.unsqueeze(0)) padding_images.append(temp_padding_imgs) inx += 1 return torch.cat(extended_mask, dim=0), padding_images def adjust_attention_for_input_images(self, attention_mask, image_sizes): for b_inx in image_sizes.keys(): for start_inx, end_inx in image_sizes[b_inx]: attention_mask[b_inx][start_inx:end_inx, start_inx:end_inx] = 1 return attention_mask def pad_input_ids(self, input_ids, image_sizes): max_l = max([len(x) for x in input_ids]) padded_ids = [] attention_mask = [] for i in range(len(input_ids)): temp_ids = input_ids[i] temp_l = len(temp_ids) pad_l = max_l - temp_l if pad_l == 0: attention_mask.append([1] * max_l) padded_ids.append(temp_ids) else: attention_mask.append([0] * pad_l + [1] * temp_l) padded_ids.append([self.pad_token_id] * pad_l + temp_ids) if i in image_sizes: new_inx = [] for old_inx in image_sizes[i]: new_inx.append([x + pad_l for x in old_inx]) image_sizes[i] = new_inx return torch.LongTensor(padded_ids), torch.LongTensor(attention_mask), image_sizes def process_mllm_input(self, mllm_inputs, target_img_size): num_tokens_for_output_images = [] for img_size in target_img_size: num_tokens_for_output_images.append(img_size[0] * img_size[1] // 16 // 16) pixel_values, image_sizes = [], {} b_inx = 0 for x in mllm_inputs: if x["pixel_values"] is not None: pixel_values.extend(x["pixel_values"]) for size in x["image_sizes"]: if b_inx not in image_sizes: image_sizes[b_inx] = [size] else: image_sizes[b_inx].append(size) b_inx += 1 pixel_values = [x.unsqueeze(0) for x in pixel_values] input_ids = [x["input_ids"] for x in mllm_inputs] padded_input_ids, attention_mask, image_sizes = self.pad_input_ids(input_ids, image_sizes) position_ids = self.create_position(attention_mask, num_tokens_for_output_images) attention_mask, padding_images = self.create_mask(attention_mask, num_tokens_for_output_images) attention_mask = self.adjust_attention_for_input_images(attention_mask, image_sizes) return padded_input_ids, position_ids, attention_mask, padding_images, pixel_values, image_sizes def __call__(self, features): mllm_inputs = [f[0] for f in features] cfg_mllm_inputs = [f[1] for f in features] img_cfg_mllm_input = [f[2] for f in features] target_img_size = [f[3] for f in features] if img_cfg_mllm_input[0] is not None: mllm_inputs = mllm_inputs + cfg_mllm_inputs + img_cfg_mllm_input target_img_size = target_img_size + target_img_size + target_img_size else: mllm_inputs = mllm_inputs + cfg_mllm_inputs target_img_size = target_img_size + target_img_size ( all_padded_input_ids, all_position_ids, all_attention_mask, all_padding_images, all_pixel_values, all_image_sizes, ) = self.process_mllm_input(mllm_inputs, target_img_size) data = { "input_ids": all_padded_input_ids, "attention_mask": all_attention_mask, "position_ids": all_position_ids, "input_pixel_values": all_pixel_values, "input_image_sizes": all_image_sizes, } return data
diffusers/src/diffusers/pipelines/omnigen/processor_omnigen.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/omnigen/processor_omnigen.py", "repo_id": "diffusers", "token_count": 6675 }
170
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["QwenImagePipelineOutput", "QwenImagePriorReduxPipelineOutput"]} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["modeling_qwenimage"] = ["ReduxImageEncoder"] _import_structure["pipeline_qwenimage"] = ["QwenImagePipeline"] _import_structure["pipeline_qwenimage_controlnet"] = ["QwenImageControlNetPipeline"] _import_structure["pipeline_qwenimage_edit"] = ["QwenImageEditPipeline"] _import_structure["pipeline_qwenimage_img2img"] = ["QwenImageImg2ImgPipeline"] _import_structure["pipeline_qwenimage_inpaint"] = ["QwenImageInpaintPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_qwenimage import QwenImagePipeline from .pipeline_qwenimage_controlnet import QwenImageControlNetPipeline from .pipeline_qwenimage_edit import QwenImageEditPipeline from .pipeline_qwenimage_img2img import QwenImageImg2ImgPipeline from .pipeline_qwenimage_inpaint import QwenImageInpaintPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for name, value in _additional_imports.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/qwenimage/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/qwenimage/__init__.py", "repo_id": "diffusers", "token_count": 874 }
171
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["camera"] = ["create_pan_cameras"] _import_structure["pipeline_shap_e"] = ["ShapEPipeline"] _import_structure["pipeline_shap_e_img2img"] = ["ShapEImg2ImgPipeline"] _import_structure["renderer"] = [ "BoundingBoxVolume", "ImportanceRaySampler", "MLPNeRFModelOutput", "MLPNeRSTFModel", "ShapEParamsProjModel", "ShapERenderer", "StratifiedRaySampler", "VoidNeRFModel", ] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImg2ImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, ) else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/shap_e/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/shap_e/__init__.py", "repo_id": "diffusers", "token_count": 939 }
172
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, List, Optional, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer from ...models import StableCascadeUNet from ...schedulers import DDPMWuerstchenScheduler from ...utils import is_torch_version, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableCascadePriorPipeline, StableCascadeDecoderPipeline >>> prior_pipe = StableCascadePriorPipeline.from_pretrained( ... "stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16 ... ).to("cuda") >>> gen_pipe = StableCascadeDecoderPipeline.from_pretrain( ... "stabilityai/stable-cascade", torch_dtype=torch.float16 ... ).to("cuda") >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" >>> prior_output = pipe(prompt) >>> images = gen_pipe(prior_output.image_embeddings, prompt=prompt) ``` """ class StableCascadeDecoderPipeline(DiffusionPipeline): """ Pipeline for generating images from the Stable Cascade model. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: tokenizer (`CLIPTokenizer`): The CLIP tokenizer. text_encoder (`CLIPTextModelWithProjection`): The CLIP text encoder. decoder ([`StableCascadeUNet`]): The Stable Cascade decoder unet. vqgan ([`PaellaVQModel`]): The VQGAN model. scheduler ([`DDPMWuerstchenScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. latent_dim_scale (float, `optional`, defaults to 10.67): Multiplier to determine the VQ latent space size from the image embeddings. If the image embeddings are height=24 and width=24, the VQ latent shape needs to be height=int(24*10.67)=256 and width=int(24*10.67)=256 in order to match the training conditions. """ unet_name = "decoder" text_encoder_name = "text_encoder" model_cpu_offload_seq = "text_encoder->decoder->vqgan" _callback_tensor_inputs = [ "latents", "prompt_embeds_pooled", "negative_prompt_embeds", "image_embeddings", ] def __init__( self, decoder: StableCascadeUNet, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, scheduler: DDPMWuerstchenScheduler, vqgan: PaellaVQModel, latent_dim_scale: float = 10.67, ) -> None: super().__init__() self.register_modules( decoder=decoder, tokenizer=tokenizer, text_encoder=text_encoder, scheduler=scheduler, vqgan=vqgan, ) self.register_to_config(latent_dim_scale=latent_dim_scale) def prepare_latents( self, batch_size, image_embeddings, num_images_per_prompt, dtype, device, generator, latents, scheduler ): _, channels, height, width = image_embeddings.shape latents_shape = ( batch_size * num_images_per_prompt, 4, int(height * self.config.latent_dim_scale), int(width * self.config.latent_dim_scale), ) if latents is None: latents = randn_tensor(latents_shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def encode_prompt( self, device, batch_size, num_images_per_prompt, do_classifier_free_guidance, prompt=None, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, prompt_embeds_pooled: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds_pooled: Optional[torch.Tensor] = None, ): if prompt_embeds is None: # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] attention_mask = attention_mask[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask.to(device), output_hidden_states=True ) prompt_embeds = text_encoder_output.hidden_states[-1] if prompt_embeds_pooled is None: prompt_embeds_pooled = text_encoder_output.text_embeds.unsqueeze(1) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds_pooled = prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) prompt_embeds_pooled = prompt_embeds_pooled.repeat_interleave(num_images_per_prompt, dim=0) if negative_prompt_embeds is None and do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds_text_encoder_output = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device), output_hidden_states=True, ) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.hidden_states[-1] negative_prompt_embeds_pooled = negative_prompt_embeds_text_encoder_output.text_embeds.unsqueeze(1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) seq_len = negative_prompt_embeds_pooled.shape[1] negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.to( dtype=self.text_encoder.dtype, device=device ) negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.view( batch_size * num_images_per_prompt, seq_len, -1 ) # done duplicates return prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled def check_inputs( self, prompt, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps def get_timestep_ratio_conditioning(self, t, alphas_cumprod): s = torch.tensor([0.008]) clamp_range = [0, 1] min_var = torch.cos(s / (1 + s) * torch.pi * 0.5) ** 2 var = alphas_cumprod[t] var = var.clamp(*clamp_range) s, min_var = s.to(var.device), min_var.to(var.device) ratio = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s return ratio @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image_embeddings: Union[torch.Tensor, List[torch.Tensor]], prompt: Union[str, List[str]] = None, num_inference_steps: int = 10, guidance_scale: float = 0.0, negative_prompt: Optional[Union[str, List[str]]] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_embeds_pooled: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds_pooled: Optional[torch.Tensor] = None, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], ): """ Function invoked when calling the pipeline for generation. Args: image_embedding (`torch.Tensor` or `List[torch.Tensor]`): Image Embeddings either extracted from an image or generated by a Prior Model. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. num_inference_steps (`int`, *optional*, defaults to 12): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 0.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `decoder_guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_embeds_pooled (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_embeds_pooled (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds_pooled will be generated from `negative_prompt` input argument. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image embeddings. """ # 0. Define commonly used variables device = self._execution_device dtype = self.decoder.dtype self._guidance_scale = guidance_scale if is_torch_version("<", "2.2.0") and dtype == torch.bfloat16: raise ValueError("`StableCascadeDecoderPipeline` requires torch>=2.2.0 when using `torch.bfloat16` dtype.") # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) if isinstance(image_embeddings, list): image_embeddings = torch.cat(image_embeddings, dim=0) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Compute the effective number of images per prompt # We must account for the fact that the image embeddings from the prior can be generated with num_images_per_prompt > 1 # This results in a case where a single prompt is associated with multiple image embeddings # Divide the number of image embeddings by the batch size to determine if this is the case. num_images_per_prompt = num_images_per_prompt * (image_embeddings.shape[0] // batch_size) # 2. Encode caption if prompt_embeds is None and negative_prompt_embeds is None: _, prompt_embeds_pooled, _, negative_prompt_embeds_pooled = self.encode_prompt( prompt=prompt, device=device, batch_size=batch_size, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, ) # The pooled embeds from the prior are pooled again before being passed to the decoder prompt_embeds_pooled = ( torch.cat([prompt_embeds_pooled, negative_prompt_embeds_pooled]) if self.do_classifier_free_guidance else prompt_embeds_pooled ) effnet = ( torch.cat([image_embeddings, torch.zeros_like(image_embeddings)]) if self.do_classifier_free_guidance else image_embeddings ) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latents latents = self.prepare_latents( batch_size, image_embeddings, num_images_per_prompt, dtype, device, generator, latents, self.scheduler ) if isinstance(self.scheduler, DDPMWuerstchenScheduler): timesteps = timesteps[:-1] else: if hasattr(self.scheduler.config, "clip_sample") and self.scheduler.config.clip_sample: self.scheduler.config.clip_sample = False # disample sample clipping logger.warning(" set `clip_sample` to be False") # 6. Run denoising loop if hasattr(self.scheduler, "betas"): alphas = 1.0 - self.scheduler.betas alphas_cumprod = torch.cumprod(alphas, dim=0) else: alphas_cumprod = [] self._num_timesteps = len(timesteps) for i, t in enumerate(self.progress_bar(timesteps)): if not isinstance(self.scheduler, DDPMWuerstchenScheduler): if len(alphas_cumprod) > 0: timestep_ratio = self.get_timestep_ratio_conditioning(t.long().cpu(), alphas_cumprod) timestep_ratio = timestep_ratio.expand(latents.size(0)).to(dtype).to(device) else: timestep_ratio = t.float().div(self.scheduler.timesteps[-1]).expand(latents.size(0)).to(dtype) else: timestep_ratio = t.expand(latents.size(0)).to(dtype) # 7. Denoise latents predicted_latents = self.decoder( sample=torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, timestep_ratio=torch.cat([timestep_ratio] * 2) if self.do_classifier_free_guidance else timestep_ratio, clip_text_pooled=prompt_embeds_pooled, effnet=effnet, return_dict=False, )[0] # 8. Check for classifier free guidance and apply it if self.do_classifier_free_guidance: predicted_latents_text, predicted_latents_uncond = predicted_latents.chunk(2) predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale) # 9. Renoise latents to next timestep if not isinstance(self.scheduler, DDPMWuerstchenScheduler): timestep_ratio = t latents = self.scheduler.step( model_output=predicted_latents, timestep=timestep_ratio, sample=latents, generator=generator, ).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if XLA_AVAILABLE: xm.mark_step() if output_type not in ["pt", "np", "pil", "latent"]: raise ValueError( f"Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}" ) if not output_type == "latent": # 10. Scale and decode the image latents with vq-vae latents = self.vqgan.config.scale_factor * latents images = self.vqgan.decode(latents).sample.clamp(0, 1) if output_type == "np": images = images.permute(0, 2, 3, 1).cpu().float().numpy() # float() as bfloat16-> numpy doesn't work elif output_type == "pil": images = images.permute(0, 2, 3, 1).cpu().float().numpy() # float() as bfloat16-> numpy doesn't work images = self.numpy_to_pil(images) else: images = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return images return ImagePipelineOutput(images)
diffusers/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py", "repo_id": "diffusers", "token_count": 11856 }
173
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPTextModel, CLIPTokenizer, DPTForDepthEstimation, DPTImageProcessor from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, is_torch_xla_available, logging, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess def preprocess(image): deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): w, h = image[0].size w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): r""" Pipeline for text-guided depth-based image-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "text_encoder->unet->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "depth_mask"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, depth_estimator: DPTForDepthEstimation, feature_extractor: DPTImageProcessor, ): super().__init__() is_unet_version_less_0_9_0 = ( unet is not None and hasattr(unet.config, "_diffusers_version") and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") ) is_unet_sample_size_less_64 = ( unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 ) if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5" " \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, depth_estimator=depth_estimator, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " ) init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def prepare_depth_map(self, image, depth_map, batch_size, do_classifier_free_guidance, dtype, device): if isinstance(image, PIL.Image.Image): image = [image] else: image = list(image) if isinstance(image[0], PIL.Image.Image): width, height = image[0].size elif isinstance(image[0], np.ndarray): width, height = image[0].shape[:-1] else: height, width = image[0].shape[-2:] if depth_map is None: pixel_values = self.feature_extractor(images=image, return_tensors="pt").pixel_values pixel_values = pixel_values.to(device=device, dtype=dtype) # The DPT-Hybrid model uses batch-norm layers which are not compatible with fp16. # So we use `torch.autocast` here for half precision inference. if torch.backends.mps.is_available(): autocast_ctx = contextlib.nullcontext() logger.warning( "The DPT-Hybrid model uses batch-norm layers which are not compatible with fp16, but autocast is not yet supported on MPS." ) else: autocast_ctx = torch.autocast(device.type, dtype=dtype) with autocast_ctx: depth_map = self.depth_estimator(pixel_values).predicted_depth else: depth_map = depth_map.to(device=device, dtype=dtype) depth_map = torch.nn.functional.interpolate( depth_map.unsqueeze(1), size=(height // self.vae_scale_factor, width // self.vae_scale_factor), mode="bicubic", align_corners=False, ) depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) depth_map = 2.0 * (depth_map - depth_min) / (depth_max - depth_min) - 1.0 depth_map = depth_map.to(dtype) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if depth_map.shape[0] < batch_size: repeat_by = batch_size // depth_map.shape[0] depth_map = depth_map.repeat(repeat_by, 1, 1, 1) depth_map = torch.cat([depth_map] * 2) if do_classifier_free_guidance else depth_map return depth_map @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, depth_map: Optional[torch.Tensor] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image` or tensor representing an image batch to be used as the starting point. Can accept image latents as `image` only if `depth_map` is not `None`. depth_map (`torch.Tensor`, *optional*): Depth prediction to be used as additional conditioning for the image generation process. If not defined, it automatically predicts the depth with `self.depth_estimator`. strength (`float`, *optional*, defaults to 0.8): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter is modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: ```py >>> import torch >>> import requests >>> from PIL import Image >>> from diffusers import StableDiffusionDepth2ImgPipeline >>> pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( ... "stabilityai/stable-diffusion-2-depth", ... torch_dtype=torch.float16, ... ) >>> pipe.to("cuda") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> init_image = Image.open(requests.get(url, stream=True).raw) >>> prompt = "two tigers" >>> n_prompt = "bad, deformed, ugly, bad anotomy" >>> image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0] ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) # 1. Check inputs self.check_inputs( prompt, strength, callback_steps, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs if image is None: raise ValueError("`image` input cannot be undefined.") # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Prepare depth mask depth_mask = self.prepare_depth_map( image, depth_map, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, prompt_embeds.dtype, device, ) # 5. Preprocess image image = self.image_processor.preprocess(image) # 6. Set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 7. Prepare latent variables latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator ) # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 9. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, depth_mask], dim=1) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) depth_mask = callback_outputs.pop("depth_mask", depth_mask) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py", "repo_id": "diffusers", "token_count": 19606 }
174
# Copyright 2025 Stability AI and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import ( CLIPTextModelWithProjection, CLIPTokenizer, SiglipImageProcessor, SiglipVisionModel, T5EncoderModel, T5TokenizerFast, ) from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, SD3IPAdapterMixin, SD3LoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import SD3Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import StableDiffusion3PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusion3InpaintPipeline >>> from diffusers.utils import load_image >>> pipe = StableDiffusion3InpaintPipeline.from_pretrained( ... "stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" >>> source = load_image(img_url) >>> mask = load_image(mask_url) >>> image = pipe(prompt=prompt, image=source, mask_image=mask).images[0] >>> image.save("sd3_inpainting.png") ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class StableDiffusion3InpaintPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin, SD3IPAdapterMixin): r""" Args: transformer ([`SD3Transformer2DModel`]): Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModelWithProjection`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant, with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size` as its dimension. text_encoder_2 ([`CLIPTextModelWithProjection`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. text_encoder_3 ([`T5EncoderModel`]): Frozen text-encoder. Stable Diffusion 3 uses [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_3 (`T5TokenizerFast`): Tokenizer of class [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). image_encoder (`SiglipVisionModel`, *optional*): Pre-trained Vision Model for IP Adapter. feature_extractor (`SiglipImageProcessor`, *optional*): Image processor for IP Adapter. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->image_encoder->transformer->vae" _optional_components = ["image_encoder", "feature_extractor"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"] def __init__( self, transformer: SD3Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_encoder_2: CLIPTextModelWithProjection, tokenizer_2: CLIPTokenizer, text_encoder_3: T5EncoderModel, tokenizer_3: T5TokenizerFast, image_encoder: Optional[SiglipVisionModel] = None, feature_extractor: Optional[SiglipImageProcessor] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, text_encoder_3=text_encoder_3, tokenizer=tokenizer, tokenizer_2=tokenizer_2, tokenizer_3=tokenizer_3, transformer=transformer, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 latent_channels = self.vae.config.latent_channels if getattr(self, "vae", None) else 16 self.image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, vae_latent_channels=latent_channels ) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, vae_latent_channels=latent_channels, do_normalize=False, do_binarize=True, do_convert_grayscale=True, ) self.tokenizer_max_length = ( self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 ) self.default_sample_size = ( self.transformer.config.sample_size if hasattr(self, "transformer") and self.transformer is not None else 128 ) self.patch_size = ( self.transformer.config.patch_size if hasattr(self, "transformer") and self.transformer is not None else 2 ) # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_images_per_prompt: int = 1, max_sequence_length: int = 256, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if self.text_encoder_3 is None: return torch.zeros( ( batch_size * num_images_per_prompt, self.tokenizer_max_length, self.transformer.config.joint_attention_dim, ), device=device, dtype=dtype, ) text_inputs = self.tokenizer_3( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] dtype = self.text_encoder_3.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) _, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_clip_prompt_embeds def _get_clip_prompt_embeds( self, prompt: Union[str, List[str]], num_images_per_prompt: int = 1, device: Optional[torch.device] = None, clip_skip: Optional[int] = None, clip_model_index: int = 0, ): device = device or self._execution_device clip_tokenizers = [self.tokenizer, self.tokenizer_2] clip_text_encoders = [self.text_encoder, self.text_encoder_2] tokenizer = clip_tokenizers[clip_model_index] text_encoder = clip_text_encoders[clip_model_index] prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = tokenizer( prompt, padding="max_length", max_length=self.tokenizer_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) _, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) return prompt_embeds, pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], prompt_3: Union[str, List[str]], device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, negative_prompt_3: Optional[Union[str, List[str]]] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, clip_skip: Optional[int] = None, max_sequence_length: int = 256, lora_scale: Optional[float] = None, ): r""" Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in all text-encoders prompt_3 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is used in all text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. negative_prompt_3 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `negative_prompt` is used in all the text-encoders. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 prompt_3 = prompt_3 or prompt prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=0, ) prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds( prompt=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, clip_model_index=1, ) clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) t5_prompt_embed = self._get_t5_prompt_embeds( prompt=prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, ) clip_prompt_embeds = torch.nn.functional.pad( clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1]) ) prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt_3 = negative_prompt_3 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) negative_prompt_3 = ( batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 ) if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds( negative_prompt, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=0, ) negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds( negative_prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, clip_skip=None, clip_model_index=1, ) negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) t5_negative_prompt_embed = self._get_t5_prompt_embeds( prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, ) negative_clip_prompt_embeds = torch.nn.functional.pad( negative_clip_prompt_embeds, (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]), ) negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) negative_pooled_prompt_embeds = torch.cat( [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1 ) if self.text_encoder is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.check_inputs def check_inputs( self, prompt, prompt_2, prompt_3, height, width, strength, negative_prompt=None, negative_prompt_2=None, negative_prompt_3=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): if ( height % (self.vae_scale_factor * self.patch_size) != 0 or width % (self.vae_scale_factor * self.patch_size) != 0 ): raise ValueError( f"`height` and `width` have to be divisible by {self.vae_scale_factor * self.patch_size} but are {height} and {width}." f"You can use height {height - height % (self.vae_scale_factor * self.patch_size)} and width {width - width % (self.vae_scale_factor * self.patch_size)}." ) if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_3 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)): raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_3 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, image=None, timestep=None, is_strength_max=True, return_noise=False, return_image_latents=False, ): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if (image is None or timestep is None) and not is_strength_max: raise ValueError( "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." "However, either the image or the noise timestep has not been provided." ) if return_image_latents or (latents is None and not is_strength_max): image = image.to(device=device, dtype=dtype) if image.shape[1] == 16: image_latents = image else: image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # if strength is 1. then initialise the latents to noise, else initial to image + noise latents = noise if is_strength_max else self.scheduler.scale_noise(image_latents, timestep, noise) else: noise = latents.to(device) latents = noise outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor return image_latents def prepare_mask_latents( self, mask, masked_image, batch_size, num_images_per_prompt, height, width, dtype, device, generator, do_classifier_free_guidance, ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == 16: masked_image_latents = masked_image else: masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator) masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.encode_image def encode_image(self, image: PipelineImageInput, device: torch.device) -> torch.Tensor: """Encodes the given image into a feature representation using a pre-trained image encoder. Args: image (`PipelineImageInput`): Input image to be encoded. device: (`torch.device`): Torch device. Returns: `torch.Tensor`: The encoded image feature representation. """ if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=self.dtype) return self.image_encoder(image, output_hidden_states=True).hidden_states[-2] # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[torch.Tensor] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, ) -> torch.Tensor: """Prepares image embeddings for use in the IP-Adapter. Either `ip_adapter_image` or `ip_adapter_image_embeds` must be passed. Args: ip_adapter_image (`PipelineImageInput`, *optional*): The input image to extract features from for IP-Adapter. ip_adapter_image_embeds (`torch.Tensor`, *optional*): Precomputed image embeddings. device: (`torch.device`, *optional*): Torch device. num_images_per_prompt (`int`, defaults to 1): Number of images that should be generated per prompt. do_classifier_free_guidance (`bool`, defaults to True): Whether to use classifier free guidance or not. """ device = device or self._execution_device if ip_adapter_image_embeds is not None: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = ip_adapter_image_embeds.chunk(2) else: single_image_embeds = ip_adapter_image_embeds elif ip_adapter_image is not None: single_image_embeds = self.encode_image(ip_adapter_image, device) if do_classifier_free_guidance: single_negative_image_embeds = torch.zeros_like(single_image_embeds) else: raise ValueError("Neither `ip_adapter_image_embeds` or `ip_adapter_image_embeds` were provided.") image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: negative_image_embeds = torch.cat([single_negative_image_embeds] * num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) return image_embeds.to(device=device) # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.enable_sequential_cpu_offload def enable_sequential_cpu_offload(self, *args, **kwargs): if self.image_encoder is not None and "image_encoder" not in self._exclude_from_cpu_offload: logger.warning( "`pipe.enable_sequential_cpu_offload()` might fail for `image_encoder` if it uses " "`torch.nn.MultiheadAttention`. You can exclude `image_encoder` from CPU offloading by calling " "`pipe._exclude_from_cpu_offload.append('image_encoder')` before `pipe.enable_sequential_cpu_offload()`." ) super().enable_sequential_cpu_offload(*args, **kwargs) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, prompt_3: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, mask_image: PipelineImageInput = None, masked_image_latents: PipelineImageInput = None, height: int = None, width: int = None, padding_mask_crop: Optional[int] = None, strength: float = 0.6, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, guidance_scale: float = 7.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, negative_prompt_3: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, joint_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 256, mu: Optional[float] = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is will be used instead prompt_3 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is will be used instead image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, 1)`, or `(H, W)`. mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask latents tensor will ge generated by `mask_image`. height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. padding_mask_crop (`int`, *optional*, defaults to `None`): The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region with the same aspect ration of the image and contains all masked area, and then expand that area based on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before resizing to the original image size for inpainting. This is useful when the masked area is small while the image is large and contain information irrelevant for inpainting, such as background. strength (`float`, *optional*, defaults to 1.0): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 7.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used instead negative_prompt_3 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `negative_prompt` is used instead num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. ip_adapter_image (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`torch.Tensor`, *optional*): Pre-generated image embeddings for IP-Adapter. Should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. mu (`float`, *optional*): `mu` value used for `dynamic_shifting`. Examples: Returns: [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, prompt_3, height, width, strength, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, prompt_3=prompt_3, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, clip_skip=self.clip_skip, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) # 3. Prepare timesteps scheduler_kwargs = {} if self.scheduler.config.get("use_dynamic_shifting", None) and mu is None: image_seq_len = (int(height) // self.vae_scale_factor // self.transformer.config.patch_size) * ( int(width) // self.vae_scale_factor // self.transformer.config.patch_size ) mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.16), ) scheduler_kwargs["mu"] = mu elif mu is not None: scheduler_kwargs["mu"] = mu timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, **scheduler_kwargs ) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) # check that number of inference steps is not < 1 - as this doesn't make sense if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise is_strength_max = strength == 1.0 # 4. Preprocess mask and image if padding_mask_crop is not None: crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) resize_mode = "fill" else: crops_coords = None resize_mode = "default" original_image = image init_image = self.image_processor.preprocess( image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode ) init_image = init_image.to(dtype=torch.float32) # 5. Prepare latent variables num_channels_latents = self.vae.config.latent_channels num_channels_transformer = self.transformer.config.in_channels return_image_latents = num_channels_transformer == 16 latents_outputs = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, image=init_image, timestep=latent_timestep, is_strength_max=is_strength_max, return_noise=True, return_image_latents=return_image_latents, ) if return_image_latents: latents, noise, image_latents = latents_outputs else: latents, noise = latents_outputs # 6. Prepare mask latent variables mask_condition = self.mask_processor.preprocess( mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords ) if masked_image_latents is None: masked_image = init_image * (mask_condition < 0.5) else: masked_image = masked_image_latents mask, masked_image_latents = self.prepare_mask_latents( mask_condition, masked_image, batch_size, num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance, ) # match the inpainting pipeline and will be updated with input + mask inpainting model later if num_channels_transformer == 33: # default case for runwayml/stable-diffusion-inpainting num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if ( num_channels_latents + num_channels_mask + num_channels_masked_image != self.transformer.config.in_channels ): raise ValueError( f"Incorrect configuration settings! The config of `pipeline.transformer`: {self.transformer.config} expects" f" {self.transformer.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of" " `pipeline.transformer` or your `mask_image` or `image` input." ) elif num_channels_transformer != 16: raise ValueError( f"The transformer {self.transformer.__class__} should have 16 input channels or 33 input channels, not {self.transformer.config.in_channels}." ) # 7. Prepare image embeddings if (ip_adapter_image is not None and self.is_ip_adapter_active) or ip_adapter_image_embeds is not None: ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) if self.joint_attention_kwargs is None: self._joint_attention_kwargs = {"ip_adapter_image_embeds": ip_adapter_image_embeds} else: self._joint_attention_kwargs.update(ip_adapter_image_embeds=ip_adapter_image_embeds) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) if num_channels_transformer == 33: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, pooled_projections=pooled_prompt_embeds, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if num_channels_transformer == 16: init_latents_proper = image_latents if self.do_classifier_free_guidance: init_mask, _ = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.scale_noise( init_latents_proper, torch.tensor([noise_timestep]), noise ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) mask = callback_outputs.pop("mask", mask) masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] else: image = latents do_denormalize = [True] * image.shape[0] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if padding_mask_crop is not None: image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusion3PipelineOutput(images=image)
diffusers/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py", "repo_id": "diffusers", "token_count": 31196 }
175
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image from ...utils import ( BaseOutput, ) @dataclass class StableDiffusionSafePipelineOutput(BaseOutput): """ Output class for Safe Stable Diffusion pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. nsfw_content_detected (`List[bool]`) List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, or `None` if safety checking could not be performed. images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images that were flagged by the safety checker any may contain "not-safe-for-work" (nsfw) content, or `None` if no safety check was performed or no images were flagged. applied_safety_concept (`str`) The safety concept that was applied for safety guidance, or `None` if safety guidance was disabled """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] unsafe_images: Optional[Union[List[PIL.Image.Image], np.ndarray]] applied_safety_concept: Optional[str]
diffusers/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py", "repo_id": "diffusers", "token_count": 527 }
176
import inspect from dataclasses import dataclass from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, GPT2Tokenizer, ) from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, logging, scale_lora_layers, unscale_lora_layers, ) from ...utils.outputs import BaseOutput from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name # New BaseOutput child class for joint image-text output @dataclass class ImageTextPipelineOutput(BaseOutput): """ Output class for joint image-text pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. text (`List[str]` or `List[List[str]]`) List of generated text strings of length `batch_size` or a list of list of strings whose outer list has length `batch_size`. """ images: Optional[Union[List[PIL.Image.Image], np.ndarray]] text: Optional[Union[List[str], List[List[str]]]] class UniDiffuserPipeline(DeprecatedPipelineMixin, DiffusionPipeline): r""" Pipeline for a bimodal image-text model which supports unconditional text and image generation, text-conditioned image generation, image-conditioned text generation, and joint image-text generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. This is part of the UniDiffuser image representation along with the CLIP vision encoding. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). image_encoder ([`CLIPVisionModel`]): A [`~transformers.CLIPVisionModel`] to encode images as part of its image representation along with the VAE latent representation. image_processor ([`CLIPImageProcessor`]): [`~transformers.CLIPImageProcessor`] to preprocess an image before CLIP encoding it with `image_encoder`. clip_tokenizer ([`CLIPTokenizer`]): A [`~transformers.CLIPTokenizer`] to tokenize the prompt before encoding it with `text_encoder`. text_decoder ([`UniDiffuserTextDecoder`]): Frozen text decoder. This is a GPT-style model which is used to generate text from the UniDiffuser embedding. text_tokenizer ([`GPT2Tokenizer`]): A [`~transformers.GPT2Tokenizer`] to decode text for text generation; used along with the `text_decoder`. unet ([`UniDiffuserModel`]): A [U-ViT](https://github.com/baofff/U-ViT) model with UNNet-style skip connections between transformer layers to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image and/or text latents. The original UniDiffuser paper uses the [`DPMSolverMultistepScheduler`] scheduler. """ _last_supported_version = "0.33.1" # TODO: support for moving submodules for components with enable_model_cpu_offload model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae->text_decoder" def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, image_encoder: CLIPVisionModelWithProjection, clip_image_processor: CLIPImageProcessor, clip_tokenizer: CLIPTokenizer, text_decoder: UniDiffuserTextDecoder, text_tokenizer: GPT2Tokenizer, unet: UniDiffuserModel, scheduler: KarrasDiffusionSchedulers, ): super().__init__() if text_encoder.config.hidden_size != text_decoder.prefix_inner_dim: raise ValueError( f"The text encoder hidden size and text decoder prefix inner dim must be the same, but" f" `text_encoder.config.hidden_size`: {text_encoder.config.hidden_size} and `text_decoder.prefix_inner_dim`: {text_decoder.prefix_inner_dim}" ) self.register_modules( vae=vae, text_encoder=text_encoder, image_encoder=image_encoder, clip_image_processor=clip_image_processor, clip_tokenizer=clip_tokenizer, text_decoder=text_decoder, text_tokenizer=text_tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.num_channels_latents = vae.config.latent_channels self.text_encoder_seq_len = text_encoder.config.max_position_embeddings self.text_encoder_hidden_size = text_encoder.config.hidden_size self.image_encoder_projection_dim = image_encoder.config.projection_dim self.unet_resolution = unet.config.sample_size self.text_intermediate_dim = self.text_encoder_hidden_size if self.text_decoder.prefix_hidden_dim is not None: self.text_intermediate_dim = self.text_decoder.prefix_hidden_dim self.mode = None # TODO: handle safety checking? self.safety_checker = None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def _infer_mode(self, prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents): r""" Infer the generation task ('mode') from the inputs to `__call__`. If the mode has been manually set, the set mode will be used. """ prompt_available = (prompt is not None) or (prompt_embeds is not None) image_available = image is not None input_available = prompt_available or image_available prompt_latents_available = prompt_latents is not None vae_latents_available = vae_latents is not None clip_latents_available = clip_latents is not None full_latents_available = latents is not None image_latents_available = vae_latents_available and clip_latents_available all_indv_latents_available = prompt_latents_available and image_latents_available if self.mode is not None: # Preferentially use the mode set by the user mode = self.mode elif prompt_available: mode = "text2img" elif image_available: mode = "img2text" else: # Neither prompt nor image supplied, infer based on availability of latents if full_latents_available or all_indv_latents_available: mode = "joint" elif prompt_latents_available: mode = "text" elif image_latents_available: mode = "img" else: # No inputs or latents available mode = "joint" # Give warnings for ambiguous cases if self.mode is None and prompt_available and image_available: logger.warning( f"You have supplied both a text prompt and image to the pipeline and mode has not been set manually," f" defaulting to mode '{mode}'." ) if self.mode is None and not input_available: if vae_latents_available != clip_latents_available: # Exactly one of vae_latents and clip_latents is supplied logger.warning( f"You have supplied exactly one of `vae_latents` and `clip_latents`, whereas either both or none" f" are expected to be supplied. Defaulting to mode '{mode}'." ) elif not prompt_latents_available and not vae_latents_available and not clip_latents_available: # No inputs or latents supplied logger.warning( f"No inputs or latents have been supplied, and mode has not been manually set," f" defaulting to mode '{mode}'." ) return mode # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_tiling def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_tiling def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() # Functions to manually set the mode def set_text_mode(self): r"""Manually set the generation mode to unconditional ("marginal") text generation.""" self.mode = "text" def set_image_mode(self): r"""Manually set the generation mode to unconditional ("marginal") image generation.""" self.mode = "img" def set_text_to_image_mode(self): r"""Manually set the generation mode to text-conditioned image generation.""" self.mode = "text2img" def set_image_to_text_mode(self): r"""Manually set the generation mode to image-conditioned text generation.""" self.mode = "img2text" def set_joint_mode(self): r"""Manually set the generation mode to unconditional joint image-text generation.""" self.mode = "joint" def reset_mode(self): r"""Removes a manually set mode; after calling this, the pipeline will infer the mode from inputs.""" self.mode = None def _infer_batch_size( self, mode, prompt, prompt_embeds, image, num_images_per_prompt, num_prompts_per_image, latents, prompt_latents, vae_latents, clip_latents, ): r"""Infers the batch size and multiplier depending on mode and supplied arguments to `__call__`.""" if num_images_per_prompt is None: num_images_per_prompt = 1 if num_prompts_per_image is None: num_prompts_per_image = 1 assert num_images_per_prompt > 0, "num_images_per_prompt must be a positive integer" assert num_prompts_per_image > 0, "num_prompts_per_image must be a positive integer" if mode in ["text2img"]: if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: # Either prompt or prompt_embeds must be present for text2img. batch_size = prompt_embeds.shape[0] multiplier = num_images_per_prompt elif mode in ["img2text"]: if isinstance(image, PIL.Image.Image): batch_size = 1 else: # Image must be available and type either PIL.Image.Image or torch.Tensor. # Not currently supporting something like image_embeds. batch_size = image.shape[0] multiplier = num_prompts_per_image elif mode in ["img"]: if vae_latents is not None: batch_size = vae_latents.shape[0] elif clip_latents is not None: batch_size = clip_latents.shape[0] else: batch_size = 1 multiplier = num_images_per_prompt elif mode in ["text"]: if prompt_latents is not None: batch_size = prompt_latents.shape[0] else: batch_size = 1 multiplier = num_prompts_per_image elif mode in ["joint"]: if latents is not None: batch_size = latents.shape[0] elif prompt_latents is not None: batch_size = prompt_latents.shape[0] elif vae_latents is not None: batch_size = vae_latents.shape[0] elif clip_latents is not None: batch_size = clip_latents.shape[0] else: batch_size = 1 if num_images_per_prompt == num_prompts_per_image: multiplier = num_images_per_prompt else: multiplier = min(num_images_per_prompt, num_prompts_per_image) logger.warning( f"You are using mode `{mode}` and `num_images_per_prompt`: {num_images_per_prompt} and" f" num_prompts_per_image: {num_prompts_per_image} are not equal. Using batch size equal to" f" `min(num_images_per_prompt, num_prompts_per_image) = {batch_size}." ) return batch_size, multiplier # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with self.tokenizer->self.clip_tokenizer def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.clip_tokenizer) text_inputs = self.clip_tokenizer( prompt, padding="max_length", max_length=self.clip_tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.clip_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.clip_tokenizer.batch_decode( untruncated_ids[:, self.clip_tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.clip_tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.clip_tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.clip_tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_instruct_pix2pix.StableDiffusionInstructPix2PixPipeline.prepare_image_latents # Add num_prompts_per_image argument, sample from autoencoder moment distribution def encode_image_vae_latents( self, image, batch_size, num_prompts_per_image, dtype, device, do_classifier_free_guidance, generator=None, ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_prompts_per_image if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if isinstance(generator, list): image_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i]) * self.vae.config.scaling_factor for i in range(batch_size) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = self.vae.encode(image).latent_dist.sample(generator=generator) # Scale image_latents by the VAE's scaling factor image_latents = image_latents * self.vae.config.scaling_factor if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand image_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) if do_classifier_free_guidance: uncond_image_latents = torch.zeros_like(image_latents) image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) return image_latents def encode_image_clip_latents( self, image, batch_size, num_prompts_per_image, dtype, device, generator=None, ): # Map image to CLIP embedding. if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) preprocessed_image = self.clip_image_processor.preprocess( image, return_tensors="pt", ) preprocessed_image = preprocessed_image.to(device=device, dtype=dtype) batch_size = batch_size * num_prompts_per_image if isinstance(generator, list): image_latents = [ self.image_encoder(**preprocessed_image[i : i + 1]).image_embeds for i in range(batch_size) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = self.image_encoder(**preprocessed_image).image_embeds if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand image_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) return image_latents def prepare_text_latents( self, batch_size, num_images_per_prompt, seq_len, hidden_size, dtype, device, generator, latents=None ): # Prepare latents for the CLIP embedded prompt. shape = (batch_size * num_images_per_prompt, seq_len, hidden_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: # latents is assumed to have shace (B, L, D) latents = latents.repeat(num_images_per_prompt, 1, 1) latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents # Rename prepare_latents -> prepare_image_vae_latents and add num_prompts_per_image argument. def prepare_image_vae_latents( self, batch_size, num_prompts_per_image, num_channels_latents, height, width, dtype, device, generator, latents=None, ): shape = ( batch_size * num_prompts_per_image, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: # latents is assumed to have shape (B, C, H, W) latents = latents.repeat(num_prompts_per_image, 1, 1, 1) latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_image_clip_latents( self, batch_size, num_prompts_per_image, clip_img_dim, dtype, device, generator, latents=None ): # Prepare latents for the CLIP embedded image. shape = (batch_size * num_prompts_per_image, 1, clip_img_dim) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: # latents is assumed to have shape (B, L, D) latents = latents.repeat(num_prompts_per_image, 1, 1) latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def decode_text_latents(self, text_latents, device): output_token_list, seq_lengths = self.text_decoder.generate_captions( text_latents, self.text_tokenizer.eos_token_id, device=device ) output_list = output_token_list.cpu().numpy() generated_text = [ self.text_tokenizer.decode(output[: int(length)], skip_special_tokens=True) for output, length in zip(output_list, seq_lengths) ] return generated_text def _split(self, x, height, width): r""" Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim) into two tensors of shape (B, C, H, W) and (B, 1, clip_img_dim) """ batch_size = x.shape[0] latent_height = height // self.vae_scale_factor latent_width = width // self.vae_scale_factor img_vae_dim = self.num_channels_latents * latent_height * latent_width img_vae, img_clip = x.split([img_vae_dim, self.image_encoder_projection_dim], dim=1) img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) return img_vae, img_clip def _combine(self, img_vae, img_clip): r""" Combines a latent image img_vae of shape (B, C, H, W) and a CLIP-embedded image img_clip of shape (B, 1, clip_img_dim) into a single tensor of shape (B, C * H * W + clip_img_dim). """ img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) return torch.concat([img_vae, img_clip], dim=-1) def _split_joint(self, x, height, width): r""" Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim + text_seq_len * text_dim] into (img_vae, img_clip, text) where img_vae is of shape (B, C, H, W), img_clip is of shape (B, 1, clip_img_dim), and text is of shape (B, text_seq_len, text_dim). """ batch_size = x.shape[0] latent_height = height // self.vae_scale_factor latent_width = width // self.vae_scale_factor img_vae_dim = self.num_channels_latents * latent_height * latent_width text_dim = self.text_encoder_seq_len * self.text_intermediate_dim img_vae, img_clip, text = x.split([img_vae_dim, self.image_encoder_projection_dim, text_dim], dim=1) img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) text = torch.reshape(text, (batch_size, self.text_encoder_seq_len, self.text_intermediate_dim)) return img_vae, img_clip, text def _combine_joint(self, img_vae, img_clip, text): r""" Combines a latent image img_vae of shape (B, C, H, W), a CLIP-embedded image img_clip of shape (B, L_img, clip_img_dim), and a text embedding text of shape (B, L_text, text_dim) into a single embedding x of shape (B, C * H * W + L_img * clip_img_dim + L_text * text_dim). """ img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) text = torch.reshape(text, (text.shape[0], -1)) return torch.concat([img_vae, img_clip, text], dim=-1) def _get_noise_pred( self, mode, latents, t, prompt_embeds, img_vae, img_clip, max_timestep, data_type, guidance_scale, generator, device, height, width, ): r""" Gets the noise prediction using the `unet` and performs classifier-free guidance, if necessary. """ if mode == "joint": # Joint text-image generation img_vae_latents, img_clip_latents, text_latents = self._split_joint(latents, height, width) img_vae_out, img_clip_out, text_out = self.unet( img_vae_latents, img_clip_latents, text_latents, timestep_img=t, timestep_text=t, data_type=data_type ) x_out = self._combine_joint(img_vae_out, img_clip_out, text_out) if guidance_scale <= 1.0: return x_out # Classifier-free guidance img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) _, _, text_out_uncond = self.unet( img_vae_T, img_clip_T, text_latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type ) img_vae_out_uncond, img_clip_out_uncond, _ = self.unet( img_vae_latents, img_clip_latents, text_T, timestep_img=t, timestep_text=max_timestep, data_type=data_type, ) x_out_uncond = self._combine_joint(img_vae_out_uncond, img_clip_out_uncond, text_out_uncond) return guidance_scale * x_out + (1.0 - guidance_scale) * x_out_uncond elif mode == "text2img": # Text-conditioned image generation img_vae_latents, img_clip_latents = self._split(latents, height, width) img_vae_out, img_clip_out, text_out = self.unet( img_vae_latents, img_clip_latents, prompt_embeds, timestep_img=t, timestep_text=0, data_type=data_type ) img_out = self._combine(img_vae_out, img_clip_out) if guidance_scale <= 1.0: return img_out # Classifier-free guidance text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet( img_vae_latents, img_clip_latents, text_T, timestep_img=t, timestep_text=max_timestep, data_type=data_type, ) img_out_uncond = self._combine(img_vae_out_uncond, img_clip_out_uncond) return guidance_scale * img_out + (1.0 - guidance_scale) * img_out_uncond elif mode == "img2text": # Image-conditioned text generation img_vae_out, img_clip_out, text_out = self.unet( img_vae, img_clip, latents, timestep_img=0, timestep_text=t, data_type=data_type ) if guidance_scale <= 1.0: return text_out # Classifier-free guidance img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet( img_vae_T, img_clip_T, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type ) return guidance_scale * text_out + (1.0 - guidance_scale) * text_out_uncond elif mode == "text": # Unconditional ("marginal") text generation (no CFG) img_vae_out, img_clip_out, text_out = self.unet( img_vae, img_clip, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type ) return text_out elif mode == "img": # Unconditional ("marginal") image generation (no CFG) img_vae_latents, img_clip_latents = self._split(latents, height, width) img_vae_out, img_clip_out, text_out = self.unet( img_vae_latents, img_clip_latents, prompt_embeds, timestep_img=t, timestep_text=max_timestep, data_type=data_type, ) img_out = self._combine(img_vae_out, img_clip_out) return img_out def check_latents_shape(self, latents_name, latents, expected_shape): latents_shape = latents.shape expected_num_dims = len(expected_shape) + 1 # expected dimensions plus the batch dimension expected_shape_str = ", ".join(str(dim) for dim in expected_shape) if len(latents_shape) != expected_num_dims: raise ValueError( f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape" f" {latents_shape} has {len(latents_shape)} dimensions." ) for i in range(1, expected_num_dims): if latents_shape[i] != expected_shape[i - 1]: raise ValueError( f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape" f" {latents_shape} has {latents_shape[i]} != {expected_shape[i - 1]} at dimension {i}." ) def check_inputs( self, mode, prompt, image, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, latents=None, prompt_latents=None, vae_latents=None, clip_latents=None, ): # Check inputs before running the generative process. if height % self.vae_scale_factor != 0 or width % self.vae_scale_factor != 0: raise ValueError( f"`height` and `width` have to be divisible by {self.vae_scale_factor} but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if mode == "text2img": if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if mode == "img2text": if image is None: raise ValueError("`img2text` mode requires an image to be provided.") # Check provided latents latent_height = height // self.vae_scale_factor latent_width = width // self.vae_scale_factor full_latents_available = latents is not None prompt_latents_available = prompt_latents is not None vae_latents_available = vae_latents is not None clip_latents_available = clip_latents is not None if full_latents_available: individual_latents_available = ( prompt_latents is not None or vae_latents is not None or clip_latents is not None ) if individual_latents_available: logger.warning( "You have supplied both `latents` and at least one of `prompt_latents`, `vae_latents`, and" " `clip_latents`. The value of `latents` will override the value of any individually supplied latents." ) # Check shape of full latents img_vae_dim = self.num_channels_latents * latent_height * latent_width text_dim = self.text_encoder_seq_len * self.text_encoder_hidden_size latents_dim = img_vae_dim + self.image_encoder_projection_dim + text_dim latents_expected_shape = (latents_dim,) self.check_latents_shape("latents", latents, latents_expected_shape) # Check individual latent shapes, if present if prompt_latents_available: prompt_latents_expected_shape = (self.text_encoder_seq_len, self.text_encoder_hidden_size) self.check_latents_shape("prompt_latents", prompt_latents, prompt_latents_expected_shape) if vae_latents_available: vae_latents_expected_shape = (self.num_channels_latents, latent_height, latent_width) self.check_latents_shape("vae_latents", vae_latents, vae_latents_expected_shape) if clip_latents_available: clip_latents_expected_shape = (1, self.image_encoder_projection_dim) self.check_latents_shape("clip_latents", clip_latents, clip_latents_expected_shape) if mode in ["text2img", "img"] and vae_latents_available and clip_latents_available: if vae_latents.shape[0] != clip_latents.shape[0]: raise ValueError( f"Both `vae_latents` and `clip_latents` are supplied, but their batch dimensions are not equal:" f" {vae_latents.shape[0]} != {clip_latents.shape[0]}." ) if mode == "joint" and prompt_latents_available and vae_latents_available and clip_latents_available: if prompt_latents.shape[0] != vae_latents.shape[0] or prompt_latents.shape[0] != clip_latents.shape[0]: raise ValueError( f"All of `prompt_latents`, `vae_latents`, and `clip_latents` are supplied, but their batch" f" dimensions are not equal: {prompt_latents.shape[0]} != {vae_latents.shape[0]}" f" != {clip_latents.shape[0]}." ) @torch.no_grad() def __call__( self, prompt: Optional[Union[str, List[str]]] = None, image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None, height: Optional[int] = None, width: Optional[int] = None, data_type: Optional[int] = 1, num_inference_steps: int = 50, guidance_scale: float = 8.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, num_prompts_per_image: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_latents: Optional[torch.Tensor] = None, vae_latents: Optional[torch.Tensor] = None, clip_latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. Required for text-conditioned image generation (`text2img`) mode. image (`torch.Tensor` or `PIL.Image.Image`, *optional*): `Image` or tensor representing an image batch. Required for image-conditioned text generation (`img2text`) mode. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. data_type (`int`, *optional*, defaults to 1): The data type (either 0 or 1). Only used if you are loading a checkpoint which supports a data type embedding; this is added for compatibility with the [UniDiffuser-v1](https://huggingface.co/thu-ml/unidiffuser-v1) checkpoint. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 8.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). Used in text-conditioned image generation (`text2img`) mode. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. Used in `text2img` (text-conditioned image generation) and `img` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated. num_prompts_per_image (`int`, *optional*, defaults to 1): The number of prompts to generate per image. Used in `img2text` (image-conditioned text generation) and `text` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for joint image-text generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. This assumes a full set of VAE, CLIP, and text latents, if supplied, overrides the value of `prompt_latents`, `vae_latents`, and `clip_latents`. prompt_latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for text generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. vae_latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. clip_latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. Used in text-conditioned image generation (`text2img`) mode. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are be generated from the `negative_prompt` input argument. Used in text-conditioned image generation (`text2img`) mode. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImageTextPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Returns: [`~pipelines.unidiffuser.ImageTextPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.unidiffuser.ImageTextPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of generated texts. """ # 0. Default height and width to unet height = height or self.unet_resolution * self.vae_scale_factor width = width or self.unet_resolution * self.vae_scale_factor # 1. Check inputs # Recalculate mode for each call to the pipeline. mode = self._infer_mode(prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents) self.check_inputs( mode, prompt, image, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, latents, prompt_latents, vae_latents, clip_latents, ) # 2. Define call parameters batch_size, multiplier = self._infer_batch_size( mode, prompt, prompt_embeds, image, num_images_per_prompt, num_prompts_per_image, latents, prompt_latents, vae_latents, clip_latents, ) device = self._execution_device reduce_text_emb_dim = self.text_intermediate_dim < self.text_encoder_hidden_size or self.mode != "text2img" # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. # Note that this differs from the formulation in the unidiffusers paper! do_classifier_free_guidance = guidance_scale > 1.0 # check if scheduler is in sigmas space # scheduler_is_in_sigma_space = hasattr(self.scheduler, "sigmas") # 3. Encode input prompt, if available; otherwise prepare text latents if latents is not None: # Overwrite individual latents vae_latents, clip_latents, prompt_latents = self._split_joint(latents, height, width) if mode in ["text2img"]: # 3.1. Encode input prompt, if available assert prompt is not None or prompt_embeds is not None prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=multiplier, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # if do_classifier_free_guidance: # prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) else: # 3.2. Prepare text latent variables, if input not available prompt_embeds = self.prepare_text_latents( batch_size=batch_size, num_images_per_prompt=multiplier, seq_len=self.text_encoder_seq_len, hidden_size=self.text_encoder_hidden_size, dtype=self.text_encoder.dtype, # Should work with both full precision and mixed precision device=device, generator=generator, latents=prompt_latents, ) if reduce_text_emb_dim: prompt_embeds = self.text_decoder.encode(prompt_embeds) # 4. Encode image, if available; otherwise prepare image latents if mode in ["img2text"]: # 4.1. Encode images, if available assert image is not None, "`img2text` requires a conditioning image" # Encode image using VAE image_vae = self.image_processor.preprocess(image) height, width = image_vae.shape[-2:] image_vae_latents = self.encode_image_vae_latents( image=image_vae, batch_size=batch_size, num_prompts_per_image=multiplier, dtype=prompt_embeds.dtype, device=device, do_classifier_free_guidance=False, # Copied from InstructPix2Pix, don't use their version of CFG generator=generator, ) # Encode image using CLIP image_clip_latents = self.encode_image_clip_latents( image=image, batch_size=batch_size, num_prompts_per_image=multiplier, dtype=prompt_embeds.dtype, device=device, generator=generator, ) # (batch_size, clip_hidden_size) => (batch_size, 1, clip_hidden_size) image_clip_latents = image_clip_latents.unsqueeze(1) else: # 4.2. Prepare image latent variables, if input not available # Prepare image VAE latents in latent space image_vae_latents = self.prepare_image_vae_latents( batch_size=batch_size, num_prompts_per_image=multiplier, num_channels_latents=self.num_channels_latents, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=vae_latents, ) # Prepare image CLIP latents image_clip_latents = self.prepare_image_clip_latents( batch_size=batch_size, num_prompts_per_image=multiplier, clip_img_dim=self.image_encoder_projection_dim, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=clip_latents, ) # 5. Set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # max_timestep = timesteps[0] max_timestep = self.scheduler.config.num_train_timesteps # 6. Prepare latent variables if mode == "joint": latents = self._combine_joint(image_vae_latents, image_clip_latents, prompt_embeds) elif mode in ["text2img", "img"]: latents = self._combine(image_vae_latents, image_clip_latents) elif mode in ["img2text", "text"]: latents = prompt_embeds # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) logger.debug(f"Scheduler extra step kwargs: {extra_step_kwargs}") # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # predict the noise residual # Also applies classifier-free guidance as described in the UniDiffuser paper noise_pred = self._get_noise_pred( mode, latents, t, prompt_embeds, image_vae_latents, image_clip_latents, max_timestep, data_type, guidance_scale, generator, device, height, width, ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() # 9. Post-processing image = None text = None if mode == "joint": image_vae_latents, image_clip_latents, text_latents = self._split_joint(latents, height, width) if not output_type == "latent": # Map latent VAE image back to pixel space image = self.vae.decode(image_vae_latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = image_vae_latents text = self.decode_text_latents(text_latents, device) elif mode in ["text2img", "img"]: image_vae_latents, image_clip_latents = self._split(latents, height, width) if not output_type == "latent": # Map latent VAE image back to pixel space image = self.vae.decode(image_vae_latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = image_vae_latents elif mode in ["img2text", "text"]: text_latents = latents text = self.decode_text_latents(text_latents, device) self.maybe_free_model_hooks() # 10. Postprocess the image, if necessary if image is not None: do_denormalize = [True] * image.shape[0] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, text) return ImageTextPipelineOutput(images=image, text=text)
diffusers/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py", "repo_id": "diffusers", "token_count": 31194 }
177
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer from ...schedulers import DDPMWuerstchenScheduler from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import WuerstchenPriorPipeline, WuerstchenDecoderPipeline >>> prior_pipe = WuerstchenPriorPipeline.from_pretrained( ... "warp-ai/wuerstchen-prior", torch_dtype=torch.float16 ... ).to("cuda") >>> gen_pipe = WuerstchenDecoderPipeline.from_pretrain("warp-ai/wuerstchen", torch_dtype=torch.float16).to( ... "cuda" ... ) >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" >>> prior_output = pipe(prompt) >>> images = gen_pipe(prior_output.image_embeddings, prompt=prompt) ``` """ class WuerstchenDecoderPipeline(DeprecatedPipelineMixin, DiffusionPipeline): """ Pipeline for generating images from the Wuerstchen model. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: tokenizer (`CLIPTokenizer`): The CLIP tokenizer. text_encoder (`CLIPTextModel`): The CLIP text encoder. decoder ([`WuerstchenDiffNeXt`]): The WuerstchenDiffNeXt unet decoder. vqgan ([`PaellaVQModel`]): The VQGAN model. scheduler ([`DDPMWuerstchenScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. latent_dim_scale (float, `optional`, defaults to 10.67): Multiplier to determine the VQ latent space size from the image embeddings. If the image embeddings are height=24 and width=24, the VQ latent shape needs to be height=int(24*10.67)=256 and width=int(24*10.67)=256 in order to match the training conditions. """ model_cpu_offload_seq = "text_encoder->decoder->vqgan" _callback_tensor_inputs = [ "latents", "text_encoder_hidden_states", "negative_prompt_embeds", "image_embeddings", ] def __init__( self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, decoder: WuerstchenDiffNeXt, scheduler: DDPMWuerstchenScheduler, vqgan: PaellaVQModel, latent_dim_scale: float = 10.67, ) -> None: super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, decoder=decoder, scheduler=scheduler, vqgan=vqgan, ) self.register_to_config(latent_dim_scale=latent_dim_scale) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, ): batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] attention_mask = attention_mask[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask.to(device)) text_encoder_hidden_states = text_encoder_output.last_hidden_state text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_text_encoder_hidden_states = None if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds_text_encoder_output = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device) ) uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes return text_encoder_hidden_states, uncond_text_encoder_hidden_states @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image_embeddings: Union[torch.Tensor, List[torch.Tensor]], prompt: Union[str, List[str]] = None, num_inference_steps: int = 12, timesteps: Optional[List[float]] = None, guidance_scale: float = 0.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """ Function invoked when calling the pipeline for generation. Args: image_embedding (`torch.Tensor` or `List[torch.Tensor]`): Image Embeddings either extracted from an image or generated by a Prior Model. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. num_inference_steps (`int`, *optional*, defaults to 12): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 0.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `decoder_guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image embeddings. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) # 0. Define commonly used variables device = self._execution_device dtype = self.decoder.dtype self._guidance_scale = guidance_scale # 1. Check inputs. Raise error if not correct if not isinstance(prompt, list): if isinstance(prompt, str): prompt = [prompt] else: raise TypeError(f"'prompt' must be of type 'list' or 'str', but got {type(prompt)}.") if self.do_classifier_free_guidance: if negative_prompt is not None and not isinstance(negative_prompt, list): if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] else: raise TypeError( f"'negative_prompt' must be of type 'list' or 'str', but got {type(negative_prompt)}." ) if isinstance(image_embeddings, list): image_embeddings = torch.cat(image_embeddings, dim=0) if isinstance(image_embeddings, np.ndarray): image_embeddings = torch.Tensor(image_embeddings, device=device).to(dtype=dtype) if not isinstance(image_embeddings, torch.Tensor): raise TypeError( f"'image_embeddings' must be of type 'torch.Tensor' or 'np.array', but got {type(image_embeddings)}." ) if not isinstance(num_inference_steps, int): raise TypeError( f"'num_inference_steps' must be of type 'int', but got {type(num_inference_steps)}\ In Case you want to provide explicit timesteps, please use the 'timesteps' argument." ) # 2. Encode caption prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, image_embeddings.size(0) * num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, ) text_encoder_hidden_states = ( torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds ) effnet = ( torch.cat([image_embeddings, torch.zeros_like(image_embeddings)]) if self.do_classifier_free_guidance else image_embeddings ) # 3. Determine latent shape of latents latent_height = int(image_embeddings.size(2) * self.config.latent_dim_scale) latent_width = int(image_embeddings.size(3) * self.config.latent_dim_scale) latent_features_shape = (image_embeddings.size(0) * num_images_per_prompt, 4, latent_height, latent_width) # 4. Prepare and set timesteps if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latents latents = self.prepare_latents(latent_features_shape, dtype, device, generator, latents, self.scheduler) # 6. Run denoising loop self._num_timesteps = len(timesteps[:-1]) for i, t in enumerate(self.progress_bar(timesteps[:-1])): ratio = t.expand(latents.size(0)).to(dtype) # 7. Denoise latents predicted_latents = self.decoder( torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, r=torch.cat([ratio] * 2) if self.do_classifier_free_guidance else ratio, effnet=effnet, clip=text_encoder_hidden_states, ) # 8. Check for classifier free guidance and apply it if self.do_classifier_free_guidance: predicted_latents_text, predicted_latents_uncond = predicted_latents.chunk(2) predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale) # 9. Renoise latents to next timestep latents = self.scheduler.step( model_output=predicted_latents, timestep=ratio, sample=latents, generator=generator, ).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) image_embeddings = callback_outputs.pop("image_embeddings", image_embeddings) text_encoder_hidden_states = callback_outputs.pop( "text_encoder_hidden_states", text_encoder_hidden_states ) if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if output_type not in ["pt", "np", "pil", "latent"]: raise ValueError( f"Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}" ) if not output_type == "latent": # 10. Scale and decode the image latents with vq-vae latents = self.vqgan.config.scale_factor * latents images = self.vqgan.decode(latents).sample.clamp(0, 1) if output_type == "np": images = images.permute(0, 2, 3, 1).cpu().float().numpy() elif output_type == "pil": images = images.permute(0, 2, 3, 1).cpu().float().numpy() images = self.numpy_to_pil(images) else: images = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return images return ImagePipelineOutput(images)
diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py", "repo_id": "diffusers", "token_count": 9354 }
178
from typing import TYPE_CHECKING, Any, Dict, List, Union from diffusers.utils.import_utils import is_optimum_quanto_version from ...utils import ( get_module_from_name, is_accelerate_available, is_accelerate_version, is_optimum_quanto_available, is_torch_available, logging, ) from ..base import DiffusersQuantizer if TYPE_CHECKING: from ...models.modeling_utils import ModelMixin if is_torch_available(): import torch if is_accelerate_available(): from accelerate.utils import CustomDtype, set_module_tensor_to_device if is_optimum_quanto_available(): from .utils import _replace_with_quanto_layers logger = logging.get_logger(__name__) class QuantoQuantizer(DiffusersQuantizer): r""" Diffusers Quantizer for Optimum Quanto """ use_keep_in_fp32_modules = True requires_calibration = False required_packages = ["quanto", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) def validate_environment(self, *args, **kwargs): if not is_optimum_quanto_available(): raise ImportError( "Loading an optimum-quanto quantized model requires optimum-quanto library (`pip install optimum-quanto`)" ) if not is_optimum_quanto_version(">=", "0.2.6"): raise ImportError( "Loading an optimum-quanto quantized model requires `optimum-quanto>=0.2.6`. " "Please upgrade your installation with `pip install --upgrade optimum-quanto" ) if not is_accelerate_available(): raise ImportError( "Loading an optimum-quanto quantized model requires accelerate library (`pip install accelerate`)" ) device_map = kwargs.get("device_map", None) if isinstance(device_map, dict) and len(device_map.keys()) > 1: raise ValueError( "`device_map` for multi-GPU inference or CPU/disk offload is currently not supported with Diffusers and the Quanto backend" ) def check_if_quantized_param( self, model: "ModelMixin", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ): # Quanto imports diffusers internally. This is here to prevent circular imports from optimum.quanto import QModuleMixin, QTensor from optimum.quanto.tensor.packed import PackedTensor module, tensor_name = get_module_from_name(model, param_name) if self.pre_quantized and any(isinstance(module, t) for t in [QTensor, PackedTensor]): return True elif isinstance(module, QModuleMixin) and "weight" in tensor_name: return not module.frozen return False def create_quantized_param( self, model: "ModelMixin", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", *args, **kwargs, ): """ Create the quantized parameter by calling .freeze() after setting it to the module. """ dtype = kwargs.get("dtype", torch.float32) module, tensor_name = get_module_from_name(model, param_name) if self.pre_quantized: setattr(module, tensor_name, param_value) else: set_module_tensor_to_device(model, param_name, target_device, param_value, dtype) module.freeze() module.weight.requires_grad = False def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if is_accelerate_version(">=", "0.27.0"): mapping = { "int8": torch.int8, "float8": CustomDtype.FP8, "int4": CustomDtype.INT4, "int2": CustomDtype.INT2, } target_dtype = mapping[self.quantization_config.weights_dtype] return target_dtype def update_torch_dtype(self, torch_dtype: "torch.dtype" = None) -> "torch.dtype": if torch_dtype is None: logger.info("You did not specify `torch_dtype` in `from_pretrained`. Setting it to `torch.float32`.") torch_dtype = torch.float32 return torch_dtype def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]: # Quanto imports diffusers internally. This is here to prevent circular imports from optimum.quanto import QModuleMixin not_missing_keys = [] for name, module in model.named_modules(): if isinstance(module, QModuleMixin): for missing in missing_keys: if ( (name in missing or name in f"{prefix}.{missing}") and not missing.endswith(".weight") and not missing.endswith(".bias") ): not_missing_keys.append(missing) return [k for k in missing_keys if k not in not_missing_keys] def _process_model_before_weight_loading( self, model: "ModelMixin", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): self.modules_to_not_convert = self.quantization_config.modules_to_not_convert if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert] self.modules_to_not_convert.extend(keep_in_fp32_modules) model = _replace_with_quanto_layers( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config, pre_quantized=self.pre_quantized, ) model.config.quantization_config = self.quantization_config def _process_model_after_weight_loading(self, model, **kwargs): return model @property def is_trainable(self): return True @property def is_serializable(self): return True @property def is_compileable(self) -> bool: return True
diffusers/src/diffusers/quantizers/quanto/quanto_quantizer.py/0
{ "file_path": "diffusers/src/diffusers/quantizers/quanto/quanto_quantizer.py", "repo_id": "diffusers", "token_count": 2803 }
179
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class DDIMSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr def rescale_zero_terminal_snr(betas): """ Rescales betas to have zero terminal SNR Based on https://huggingface.co/papers/2305.08891 (Algorithm 1) Args: betas (`torch.Tensor`): the betas that the scheduler is being initialized with. Returns: `torch.Tensor`: rescaled betas with zero terminal SNR """ # Convert betas to alphas_bar_sqrt alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt**2 # Revert sqrt alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DDIMInverseScheduler(SchedulerMixin, ConfigMixin): """ `DDIMInverseScheduler` is the reverse scheduler of [`DDIMScheduler`]. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. clip_sample (`bool`, defaults to `True`): Clip the predicted sample for numerical stability. clip_sample_range (`float`, defaults to 1.0): The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. set_alpha_to_one (`bool`, defaults to `True`): Each diffusion step uses the alphas product value at that step and at the previous one. For the final step there is no previous alpha. When this option is `True` the previous alpha product is fixed to 0, otherwise it uses the alpha value at step `num_train_timesteps - 1`. steps_offset (`int`, defaults to 0): An offset added to the inference steps, as required by some model families. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). timestep_spacing (`str`, defaults to `"leading"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. rescale_betas_zero_snr (`bool`, defaults to `False`): Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and dark samples instead of limiting it to samples with medium brightness. Loosely related to [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). """ order = 1 ignore_for_config = ["kwargs"] _deprecated_kwargs = ["set_alpha_to_zero"] @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, clip_sample: bool = True, set_alpha_to_one: bool = True, steps_offset: int = 0, prediction_type: str = "epsilon", clip_sample_range: float = 1.0, timestep_spacing: str = "leading", rescale_betas_zero_snr: bool = False, **kwargs, ): if kwargs.get("set_alpha_to_zero", None) is not None: deprecation_message = ( "The `set_alpha_to_zero` argument is deprecated. Please use `set_alpha_to_one` instead." ) deprecate("set_alpha_to_zero", "1.0.0", deprecation_message, standard_warn=False) set_alpha_to_one = kwargs["set_alpha_to_zero"] if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") # Rescale for zero SNR if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the initial step, there is no current alphas_cumprod, and the index is out of bounds # `set_alpha_to_one` decides whether we set this parameter simply to one # in this case, self.step() just output the predicted noise # or whether we use the initial alpha used in training the diffusion model. self.initial_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps).copy().astype(np.int64)) # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.scale_model_input def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.Tensor`: A scaled input sample. """ return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. """ if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) self.num_inference_steps = num_inference_steps # "leading" and "trailing" corresponds to annotation of Table 2. of https://huggingface.co/papers/2305.08891 if self.config.timestep_spacing == "leading": step_ratio = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(0, num_inference_steps) * step_ratio).round().copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)[::-1]).astype(np.int64) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." ) self.timesteps = torch.from_numpy(timesteps).to(device) def step( self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, return_dict: bool = True, ) -> Union[DDIMSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. timestep (`float`): The current discrete timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. eta (`float`): The weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`, defaults to `False`): If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would coincide with the one provided as input and `use_clipped_model_output` has no effect. variance_noise (`torch.Tensor`): Alternative to generating noise with `generator` by directly providing the noise for the variance itself. Useful for methods such as [`CycleDiffusion`]. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ # 1. get previous step value (=t+1) prev_timestep = timestep timestep = min( timestep - self.config.num_train_timesteps // self.num_inference_steps, self.config.num_train_timesteps - 1 ) # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process alpha_prod_t = self.alphas_cumprod[timestep] if timestep >= 0 else self.initial_alpha_cumprod alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502 if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) pred_epsilon = model_output elif self.config.prediction_type == "sample": pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://huggingface.co/papers/2010.02502 pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://huggingface.co/papers/2010.02502 prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_ddim_inverse.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_ddim_inverse.py", "repo_id": "diffusers", "token_count": 7304 }
180
# Copyright 2025 Shuchen Xue, etc. in University of Chinese Academy of Sciences Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: check https://huggingface.co/papers/2309.05019 # The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py import math from typing import Callable, List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate, is_scipy_available from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput if is_scipy_available(): import scipy.stats # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class SASolverScheduler(SchedulerMixin, ConfigMixin): """ `SASolverScheduler` is a fast dedicated high-order solver for diffusion SDEs. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. predictor_order (`int`, defaults to 2): The predictor order which can be `1` or `2` or `3` or '4'. It is recommended to use `predictor_order=2` for guided sampling, and `predictor_order=3` for unconditional sampling. corrector_order (`int`, defaults to 2): The corrector order which can be `1` or `2` or `3` or '4'. It is recommended to use `corrector_order=2` for guided sampling, and `corrector_order=3` for unconditional sampling. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). tau_func (`Callable`, *optional*): Stochasticity during the sampling. Default in init is `lambda t: 1 if t >= 200 and t <= 800 else 0`. SA-Solver will sample from vanilla diffusion ODE if tau_func is set to `lambda t: 0`. SA-Solver will sample from vanilla diffusion SDE if tau_func is set to `lambda t: 1`. For more details, please check https://huggingface.co/papers/2309.05019 thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. dynamic_thresholding_ratio (`float`, defaults to 0.995): The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. sample_max_value (`float`, defaults to 1.0): The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `algorithm_type="dpmsolver++"`. algorithm_type (`str`, defaults to `data_prediction`): Algorithm type for the solver; can be `data_prediction` or `noise_prediction`. It is recommended to use `data_prediction` with `solver_order=2` for guided sampling like in Stable Diffusion. lower_order_final (`bool`, defaults to `True`): Whether to use lower-order solvers in the final steps. Default = True. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, the sigmas are determined according to a sequence of noise levels {σi}. use_exponential_sigmas (`bool`, *optional*, defaults to `False`): Whether to use exponential sigmas for step sizes in the noise schedule during the sampling process. use_beta_sigmas (`bool`, *optional*, defaults to `False`): Whether to use beta sigmas for step sizes in the noise schedule during the sampling process. Refer to [Beta Sampling is All You Need](https://huggingface.co/papers/2407.12173) for more information. lambda_min_clipped (`float`, defaults to `-inf`): Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the cosine (`squaredcos_cap_v2`) noise schedule. variance_type (`str`, *optional*): Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output contains the predicted Gaussian variance. timestep_spacing (`str`, defaults to `"linspace"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. steps_offset (`int`, defaults to 0): An offset added to the inference steps, as required by some model families. """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, predictor_order: int = 2, corrector_order: int = 2, prediction_type: str = "epsilon", tau_func: Optional[Callable] = None, thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, algorithm_type: str = "data_prediction", lower_order_final: bool = True, use_karras_sigmas: Optional[bool] = False, use_exponential_sigmas: Optional[bool] = False, use_beta_sigmas: Optional[bool] = False, use_flow_sigmas: Optional[bool] = False, flow_shift: Optional[float] = 1.0, lambda_min_clipped: float = -float("inf"), variance_type: Optional[str] = None, timestep_spacing: str = "linspace", steps_offset: int = 0, ): if self.config.use_beta_sigmas and not is_scipy_available(): raise ImportError("Make sure to install scipy if you want to use beta sigmas.") if sum([self.config.use_beta_sigmas, self.config.use_exponential_sigmas, self.config.use_karras_sigmas]) > 1: raise ValueError( "Only one of `config.use_beta_sigmas`, `config.use_exponential_sigmas`, `config.use_karras_sigmas` can be used." ) if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = ( torch.linspace( beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32, ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # Currently we only support VP-type noise schedule self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 if algorithm_type not in ["data_prediction", "noise_prediction"]: raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") # setable values self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.timestep_list = [None] * max(predictor_order, corrector_order - 1) self.model_outputs = [None] * max(predictor_order, corrector_order - 1) if tau_func is None: self.tau_func = lambda t: 1 if t >= 200 and t <= 800 else 0 else: self.tau_func = tau_func self.predict_x0 = algorithm_type == "data_prediction" self.lower_order_nums = 0 self.last_sample = None self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication @property def step_index(self): """ The index counter for current timestep. It will increase 1 after each scheduler step. """ return self._step_index @property def begin_index(self): """ The index for the first timestep. It should be set from pipeline with `set_begin_index` method. """ return self._begin_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`): The begin index for the scheduler. """ self._begin_index = begin_index def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ # Clipping the minimum of all lambda(t) for numerical stability. # This is critical for cosine (squaredcos_cap_v2) noise schedule. clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item() # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://huggingface.co/papers/2305.08891 if self.config.timestep_spacing == "linspace": timesteps = ( np.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64) ) elif self.config.timestep_spacing == "leading": step_ratio = last_timestep // (num_inference_steps + 1) # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) if self.config.use_karras_sigmas: sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) elif self.config.use_exponential_sigmas: sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) elif self.config.use_beta_sigmas: sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) elif self.config.use_flow_sigmas: alphas = np.linspace(1, 1 / self.config.num_train_timesteps, num_inference_steps + 1) sigmas = 1.0 - alphas sigmas = np.flip(self.config.flow_shift * sigmas / (1 + (self.config.flow_shift - 1) * sigmas))[:-1].copy() timesteps = (sigmas * self.config.num_train_timesteps).copy() sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) self.model_outputs = [ None, ] * max(self.config.predictor_order, self.config.corrector_order - 1) self.lower_order_nums = 0 self.last_sample = None # add an index counter for schedulers that allow duplicated timesteps self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://huggingface.co/papers/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(np.maximum(sigma, 1e-10)) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t def _sigma_to_alpha_sigma_t(self, sigma): if self.config.use_flow_sigmas: alpha_t = 1 - sigma sigma_t = sigma else: alpha_t = 1 / ((sigma**2 + 1) ** 0.5) sigma_t = sigma * alpha_t return alpha_t, sigma_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: """Constructs the noise schedule of Karras et al. (2022).""" # Hack to make sure that other schedulers which copy this function don't break # TODO: Add this logic to the other schedulers if hasattr(self.config, "sigma_min"): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, "sigma_max"): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor: """Constructs an exponential noise schedule.""" # Hack to make sure that other schedulers which copy this function don't break # TODO: Add this logic to the other schedulers if hasattr(self.config, "sigma_min"): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, "sigma_max"): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps)) return sigmas # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta def _convert_to_beta( self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6 ) -> torch.Tensor: """From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)""" # Hack to make sure that other schedulers which copy this function don't break # TODO: Add this logic to the other schedulers if hasattr(self.config, "sigma_min"): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, "sigma_max"): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() sigmas = np.array( [ sigma_min + (ppf * (sigma_max - sigma_min)) for ppf in [ scipy.stats.beta.ppf(timestep, alpha, beta) for timestep in 1 - np.linspace(0, 1, num_inference_steps) ] ] ) return sigmas def convert_model_output( self, model_output: torch.Tensor, *args, sample: torch.Tensor = None, **kwargs, ) -> torch.Tensor: """ Convert the model output to the corresponding type the data_prediction/noise_prediction algorithm needs. Noise_prediction is designed to discretize an integral of the noise prediction model, and data_prediction is designed to discretize an integral of the data prediction model. <Tip> The algorithm and model type are decoupled. You can use either data_prediction or noise_prediction for both noise prediction and data prediction models. </Tip> Args: model_output (`torch.Tensor`): The direct output from the learned diffusion model. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. Returns: `torch.Tensor`: The converted model output. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError("missing `sample` as a required keyword argument") if timestep is not None: deprecate( "timesteps", "1.0.0", "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) # SA-Solver_data_prediction needs to solve an integral of the data prediction model. if self.config.algorithm_type in ["data_prediction"]: if self.config.prediction_type == "epsilon": # SA-Solver only needs the "mean" output. if self.config.variance_type in ["learned", "learned_range"]: model_output = model_output[:, :3] x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == "sample": x0_pred = model_output elif self.config.prediction_type == "v_prediction": x0_pred = alpha_t * sample - sigma_t * model_output elif self.config.prediction_type == "flow_prediction": sigma_t = self.sigmas[self.step_index] x0_pred = sample - sigma_t * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, " "`v_prediction`, or `flow_prediction` for the SASolverScheduler." ) if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred # SA-Solver_noise_prediction needs to solve an integral of the noise prediction model. elif self.config.algorithm_type in ["noise_prediction"]: if self.config.prediction_type == "epsilon": # SA-Solver only needs the "mean" output. if self.config.variance_type in ["learned", "learned_range"]: epsilon = model_output[:, :3] else: epsilon = model_output elif self.config.prediction_type == "sample": epsilon = (sample - alpha_t * model_output) / sigma_t elif self.config.prediction_type == "v_prediction": epsilon = alpha_t * model_output + sigma_t * sample else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction` for the SASolverScheduler." ) if self.config.thresholding: alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep] x0_pred = (sample - sigma_t * epsilon) / alpha_t x0_pred = self._threshold_sample(x0_pred) epsilon = (sample - alpha_t * x0_pred) / sigma_t return epsilon def get_coefficients_exponential_negative(self, order, interval_start, interval_end): """ Calculate the integral of exp(-x) * x^order dx from interval_start to interval_end """ assert order in [0, 1, 2, 3], "order is only supported for 0, 1, 2 and 3" if order == 0: return torch.exp(-interval_end) * (torch.exp(interval_end - interval_start) - 1) elif order == 1: return torch.exp(-interval_end) * ( (interval_start + 1) * torch.exp(interval_end - interval_start) - (interval_end + 1) ) elif order == 2: return torch.exp(-interval_end) * ( (interval_start**2 + 2 * interval_start + 2) * torch.exp(interval_end - interval_start) - (interval_end**2 + 2 * interval_end + 2) ) elif order == 3: return torch.exp(-interval_end) * ( (interval_start**3 + 3 * interval_start**2 + 6 * interval_start + 6) * torch.exp(interval_end - interval_start) - (interval_end**3 + 3 * interval_end**2 + 6 * interval_end + 6) ) def get_coefficients_exponential_positive(self, order, interval_start, interval_end, tau): """ Calculate the integral of exp(x(1+tau^2)) * x^order dx from interval_start to interval_end """ assert order in [0, 1, 2, 3], "order is only supported for 0, 1, 2 and 3" # after change of variable(cov) interval_end_cov = (1 + tau**2) * interval_end interval_start_cov = (1 + tau**2) * interval_start if order == 0: return ( torch.exp(interval_end_cov) * (1 - torch.exp(-(interval_end_cov - interval_start_cov))) / (1 + tau**2) ) elif order == 1: return ( torch.exp(interval_end_cov) * ( (interval_end_cov - 1) - (interval_start_cov - 1) * torch.exp(-(interval_end_cov - interval_start_cov)) ) / ((1 + tau**2) ** 2) ) elif order == 2: return ( torch.exp(interval_end_cov) * ( (interval_end_cov**2 - 2 * interval_end_cov + 2) - (interval_start_cov**2 - 2 * interval_start_cov + 2) * torch.exp(-(interval_end_cov - interval_start_cov)) ) / ((1 + tau**2) ** 3) ) elif order == 3: return ( torch.exp(interval_end_cov) * ( (interval_end_cov**3 - 3 * interval_end_cov**2 + 6 * interval_end_cov - 6) - (interval_start_cov**3 - 3 * interval_start_cov**2 + 6 * interval_start_cov - 6) * torch.exp(-(interval_end_cov - interval_start_cov)) ) / ((1 + tau**2) ** 4) ) def lagrange_polynomial_coefficient(self, order, lambda_list): """ Calculate the coefficient of lagrange polynomial """ assert order in [0, 1, 2, 3] assert order == len(lambda_list) - 1 if order == 0: return [[1]] elif order == 1: return [ [ 1 / (lambda_list[0] - lambda_list[1]), -lambda_list[1] / (lambda_list[0] - lambda_list[1]), ], [ 1 / (lambda_list[1] - lambda_list[0]), -lambda_list[0] / (lambda_list[1] - lambda_list[0]), ], ] elif order == 2: denominator1 = (lambda_list[0] - lambda_list[1]) * (lambda_list[0] - lambda_list[2]) denominator2 = (lambda_list[1] - lambda_list[0]) * (lambda_list[1] - lambda_list[2]) denominator3 = (lambda_list[2] - lambda_list[0]) * (lambda_list[2] - lambda_list[1]) return [ [ 1 / denominator1, (-lambda_list[1] - lambda_list[2]) / denominator1, lambda_list[1] * lambda_list[2] / denominator1, ], [ 1 / denominator2, (-lambda_list[0] - lambda_list[2]) / denominator2, lambda_list[0] * lambda_list[2] / denominator2, ], [ 1 / denominator3, (-lambda_list[0] - lambda_list[1]) / denominator3, lambda_list[0] * lambda_list[1] / denominator3, ], ] elif order == 3: denominator1 = ( (lambda_list[0] - lambda_list[1]) * (lambda_list[0] - lambda_list[2]) * (lambda_list[0] - lambda_list[3]) ) denominator2 = ( (lambda_list[1] - lambda_list[0]) * (lambda_list[1] - lambda_list[2]) * (lambda_list[1] - lambda_list[3]) ) denominator3 = ( (lambda_list[2] - lambda_list[0]) * (lambda_list[2] - lambda_list[1]) * (lambda_list[2] - lambda_list[3]) ) denominator4 = ( (lambda_list[3] - lambda_list[0]) * (lambda_list[3] - lambda_list[1]) * (lambda_list[3] - lambda_list[2]) ) return [ [ 1 / denominator1, (-lambda_list[1] - lambda_list[2] - lambda_list[3]) / denominator1, ( lambda_list[1] * lambda_list[2] + lambda_list[1] * lambda_list[3] + lambda_list[2] * lambda_list[3] ) / denominator1, (-lambda_list[1] * lambda_list[2] * lambda_list[3]) / denominator1, ], [ 1 / denominator2, (-lambda_list[0] - lambda_list[2] - lambda_list[3]) / denominator2, ( lambda_list[0] * lambda_list[2] + lambda_list[0] * lambda_list[3] + lambda_list[2] * lambda_list[3] ) / denominator2, (-lambda_list[0] * lambda_list[2] * lambda_list[3]) / denominator2, ], [ 1 / denominator3, (-lambda_list[0] - lambda_list[1] - lambda_list[3]) / denominator3, ( lambda_list[0] * lambda_list[1] + lambda_list[0] * lambda_list[3] + lambda_list[1] * lambda_list[3] ) / denominator3, (-lambda_list[0] * lambda_list[1] * lambda_list[3]) / denominator3, ], [ 1 / denominator4, (-lambda_list[0] - lambda_list[1] - lambda_list[2]) / denominator4, ( lambda_list[0] * lambda_list[1] + lambda_list[0] * lambda_list[2] + lambda_list[1] * lambda_list[2] ) / denominator4, (-lambda_list[0] * lambda_list[1] * lambda_list[2]) / denominator4, ], ] def get_coefficients_fn(self, order, interval_start, interval_end, lambda_list, tau): assert order in [1, 2, 3, 4] assert order == len(lambda_list), "the length of lambda list must be equal to the order" coefficients = [] lagrange_coefficient = self.lagrange_polynomial_coefficient(order - 1, lambda_list) for i in range(order): coefficient = 0 for j in range(order): if self.predict_x0: coefficient += lagrange_coefficient[i][j] * self.get_coefficients_exponential_positive( order - 1 - j, interval_start, interval_end, tau ) else: coefficient += lagrange_coefficient[i][j] * self.get_coefficients_exponential_negative( order - 1 - j, interval_start, interval_end ) coefficients.append(coefficient) assert len(coefficients) == order, "the length of coefficients does not match the order" return coefficients def stochastic_adams_bashforth_update( self, model_output: torch.Tensor, *args, sample: torch.Tensor, noise: torch.Tensor, order: int, tau: torch.Tensor, **kwargs, ) -> torch.Tensor: """ One step for the SA-Predictor. Args: model_output (`torch.Tensor`): The direct output from the learned diffusion model at the current timestep. prev_timestep (`int`): The previous discrete timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. order (`int`): The order of SA-Predictor at this timestep. Returns: `torch.Tensor`: The sample tensor at the previous timestep. """ prev_timestep = args[0] if len(args) > 0 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError("missing `sample` as a required keyword argument") if noise is None: if len(args) > 2: noise = args[2] else: raise ValueError("missing `noise` as a required keyword argument") if order is None: if len(args) > 3: order = args[3] else: raise ValueError("missing `order` as a required keyword argument") if tau is None: if len(args) > 4: tau = args[4] else: raise ValueError("missing `tau` as a required keyword argument") if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) model_output_list = self.model_outputs sigma_t, sigma_s0 = ( self.sigmas[self.step_index + 1], self.sigmas[self.step_index], ) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) gradient_part = torch.zeros_like(sample) h = lambda_t - lambda_s0 lambda_list = [] for i in range(order): si = self.step_index - i alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) lambda_si = torch.log(alpha_si) - torch.log(sigma_si) lambda_list.append(lambda_si) gradient_coefficients = self.get_coefficients_fn(order, lambda_s0, lambda_t, lambda_list, tau) x = sample if self.predict_x0: if ( order == 2 ): ## if order = 2 we do a modification that does not influence the convergence order similar to unipc. Note: This is used only for few steps sampling. # The added term is O(h^3). Empirically we find it will slightly improve the image quality. # ODE case # gradient_coefficients[0] += 1.0 * torch.exp(lambda_t) * (h ** 2 / 2 - (h - 1 + torch.exp(-h))) / (ns.marginal_lambda(t_prev_list[-1]) - ns.marginal_lambda(t_prev_list[-2])) # gradient_coefficients[1] -= 1.0 * torch.exp(lambda_t) * (h ** 2 / 2 - (h - 1 + torch.exp(-h))) / (ns.marginal_lambda(t_prev_list[-1]) - ns.marginal_lambda(t_prev_list[-2])) temp_sigma = self.sigmas[self.step_index - 1] temp_alpha_s, temp_sigma_s = self._sigma_to_alpha_sigma_t(temp_sigma) temp_lambda_s = torch.log(temp_alpha_s) - torch.log(temp_sigma_s) gradient_coefficients[0] += ( 1.0 * torch.exp((1 + tau**2) * lambda_t) * (h**2 / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2)) / (lambda_s0 - temp_lambda_s) ) gradient_coefficients[1] -= ( 1.0 * torch.exp((1 + tau**2) * lambda_t) * (h**2 / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2)) / (lambda_s0 - temp_lambda_s) ) for i in range(order): if self.predict_x0: gradient_part += ( (1 + tau**2) * sigma_t * torch.exp(-(tau**2) * lambda_t) * gradient_coefficients[i] * model_output_list[-(i + 1)] ) else: gradient_part += -(1 + tau**2) * alpha_t * gradient_coefficients[i] * model_output_list[-(i + 1)] if self.predict_x0: noise_part = sigma_t * torch.sqrt(1 - torch.exp(-2 * tau**2 * h)) * noise else: noise_part = tau * sigma_t * torch.sqrt(torch.exp(2 * h) - 1) * noise if self.predict_x0: x_t = torch.exp(-(tau**2) * h) * (sigma_t / sigma_s0) * x + gradient_part + noise_part else: x_t = (alpha_t / alpha_s0) * x + gradient_part + noise_part x_t = x_t.to(x.dtype) return x_t def stochastic_adams_moulton_update( self, this_model_output: torch.Tensor, *args, last_sample: torch.Tensor, last_noise: torch.Tensor, this_sample: torch.Tensor, order: int, tau: torch.Tensor, **kwargs, ) -> torch.Tensor: """ One step for the SA-Corrector. Args: this_model_output (`torch.Tensor`): The model outputs at `x_t`. this_timestep (`int`): The current timestep `t`. last_sample (`torch.Tensor`): The generated sample before the last predictor `x_{t-1}`. this_sample (`torch.Tensor`): The generated sample after the last predictor `x_{t}`. order (`int`): The order of SA-Corrector at this step. Returns: `torch.Tensor`: The corrected sample tensor at the current timestep. """ this_timestep = args[0] if len(args) > 0 else kwargs.pop("this_timestep", None) if last_sample is None: if len(args) > 1: last_sample = args[1] else: raise ValueError("missing `last_sample` as a required keyword argument") if last_noise is None: if len(args) > 2: last_noise = args[2] else: raise ValueError("missing `last_noise` as a required keyword argument") if this_sample is None: if len(args) > 3: this_sample = args[3] else: raise ValueError("missing `this_sample` as a required keyword argument") if order is None: if len(args) > 4: order = args[4] else: raise ValueError("missing `order` as a required keyword argument") if tau is None: if len(args) > 5: tau = args[5] else: raise ValueError("missing `tau` as a required keyword argument") if this_timestep is not None: deprecate( "this_timestep", "1.0.0", "Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) model_output_list = self.model_outputs sigma_t, sigma_s0 = ( self.sigmas[self.step_index], self.sigmas[self.step_index - 1], ) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) gradient_part = torch.zeros_like(this_sample) h = lambda_t - lambda_s0 lambda_list = [] for i in range(order): si = self.step_index - i alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) lambda_si = torch.log(alpha_si) - torch.log(sigma_si) lambda_list.append(lambda_si) model_prev_list = model_output_list + [this_model_output] gradient_coefficients = self.get_coefficients_fn(order, lambda_s0, lambda_t, lambda_list, tau) x = last_sample if self.predict_x0: if ( order == 2 ): ## if order = 2 we do a modification that does not influence the convergence order similar to UniPC. Note: This is used only for few steps sampling. # The added term is O(h^3). Empirically we find it will slightly improve the image quality. # ODE case # gradient_coefficients[0] += 1.0 * torch.exp(lambda_t) * (h / 2 - (h - 1 + torch.exp(-h)) / h) # gradient_coefficients[1] -= 1.0 * torch.exp(lambda_t) * (h / 2 - (h - 1 + torch.exp(-h)) / h) gradient_coefficients[0] += ( 1.0 * torch.exp((1 + tau**2) * lambda_t) * (h / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2 * h)) ) gradient_coefficients[1] -= ( 1.0 * torch.exp((1 + tau**2) * lambda_t) * (h / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2 * h)) ) for i in range(order): if self.predict_x0: gradient_part += ( (1 + tau**2) * sigma_t * torch.exp(-(tau**2) * lambda_t) * gradient_coefficients[i] * model_prev_list[-(i + 1)] ) else: gradient_part += -(1 + tau**2) * alpha_t * gradient_coefficients[i] * model_prev_list[-(i + 1)] if self.predict_x0: noise_part = sigma_t * torch.sqrt(1 - torch.exp(-2 * tau**2 * h)) * last_noise else: noise_part = tau * sigma_t * torch.sqrt(torch.exp(2 * h) - 1) * last_noise if self.predict_x0: x_t = torch.exp(-(tau**2) * h) * (sigma_t / sigma_s0) * x + gradient_part + noise_part else: x_t = (alpha_t / alpha_s0) * x + gradient_part + noise_part x_t = x_t.to(x.dtype) return x_t # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index def _init_step_index(self, timestep): """ Initialize the step_index counter for the scheduler. """ if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step( self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, generator=None, return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with the SA-Solver. Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. return_dict (`bool`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if self.step_index is None: self._init_step_index(timestep) use_corrector = self.step_index > 0 and self.last_sample is not None model_output_convert = self.convert_model_output(model_output, sample=sample) if use_corrector: current_tau = self.tau_func(self.timestep_list[-1]) sample = self.stochastic_adams_moulton_update( this_model_output=model_output_convert, last_sample=self.last_sample, last_noise=self.last_noise, this_sample=sample, order=self.this_corrector_order, tau=current_tau, ) for i in range(max(self.config.predictor_order, self.config.corrector_order - 1) - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.timestep_list[i] = self.timestep_list[i + 1] self.model_outputs[-1] = model_output_convert self.timestep_list[-1] = timestep noise = randn_tensor( model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype, ) if self.config.lower_order_final: this_predictor_order = min(self.config.predictor_order, len(self.timesteps) - self.step_index) this_corrector_order = min(self.config.corrector_order, len(self.timesteps) - self.step_index + 1) else: this_predictor_order = self.config.predictor_order this_corrector_order = self.config.corrector_order self.this_predictor_order = min(this_predictor_order, self.lower_order_nums + 1) # warmup for multistep self.this_corrector_order = min(this_corrector_order, self.lower_order_nums + 2) # warmup for multistep assert self.this_predictor_order > 0 assert self.this_corrector_order > 0 self.last_sample = sample self.last_noise = noise current_tau = self.tau_func(self.timestep_list[-1]) prev_sample = self.stochastic_adams_bashforth_update( model_output=model_output_convert, sample=sample, noise=noise, order=self.this_predictor_order, tau=current_tau, ) if self.lower_order_nums < max(self.config.predictor_order, self.config.corrector_order - 1): self.lower_order_nums += 1 # upon completion increase step index by one self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. Returns: `torch.Tensor`: A scaled input sample. """ return sample # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor, ) -> torch.Tensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement # for the subsequent add_noise calls self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_sasolver.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_sasolver.py", "repo_id": "diffusers", "token_count": 26434 }
181
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class BitsAndBytesConfig(metaclass=DummyObject): _backends = ["bitsandbytes"] def __init__(self, *args, **kwargs): requires_backends(self, ["bitsandbytes"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["bitsandbytes"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["bitsandbytes"])
diffusers/src/diffusers/utils/dummy_bitsandbytes_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_bitsandbytes_objects.py", "repo_id": "diffusers", "token_count": 201 }
182
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class TorchAoConfig(metaclass=DummyObject): _backends = ["torchao"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchao"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torchao"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torchao"])
diffusers/src/diffusers/utils/dummy_torchao_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_torchao_objects.py", "repo_id": "diffusers", "token_count": 201 }
183
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch utilities: Utilities related to PyTorch """ import functools from typing import List, Optional, Tuple, Union from . import logging from .import_utils import is_torch_available, is_torch_npu_available, is_torch_version if is_torch_available(): import torch from torch.fft import fftn, fftshift, ifftn, ifftshift logger = logging.get_logger(__name__) # pylint: disable=invalid-name try: from torch._dynamo import allow_in_graph as maybe_allow_in_graph except (ImportError, ModuleNotFoundError): def maybe_allow_in_graph(cls): return cls def randn_tensor( shape: Union[Tuple, List], generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None, device: Optional[Union[str, "torch.device"]] = None, dtype: Optional["torch.dtype"] = None, layout: Optional["torch.layout"] = None, ): """A helper function to create random tensors on the desired `device` with the desired `dtype`. When passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor is always created on the CPU. """ # device on which tensor is created defaults to device if isinstance(device, str): device = torch.device(device) rand_device = device batch_size = shape[0] layout = layout or torch.strided device = device or torch.device("cpu") if generator is not None: gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type if gen_device_type != device.type and gen_device_type == "cpu": rand_device = "cpu" if device != "mps": logger.info( f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" f" slightly speed up this function by passing a generator that was created on the {device} device." ) elif gen_device_type != device.type and gen_device_type == "cuda": raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") # make sure generator list of length 1 is treated like a non-list if isinstance(generator, list) and len(generator) == 1: generator = generator[0] if isinstance(generator, list): shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) return latents def is_compiled_module(module) -> bool: """Check whether the module was compiled with torch.compile()""" if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): return False return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) def unwrap_module(module): """Unwraps a module if it was compiled with torch.compile()""" return module._orig_mod if is_compiled_module(module) else module def fourier_filter(x_in: "torch.Tensor", threshold: int, scale: int) -> "torch.Tensor": """Fourier filter as introduced in FreeU (https://huggingface.co/papers/2309.11497). This version of the method comes from here: https://github.com/huggingface/diffusers/pull/5164#issuecomment-1732638706 """ x = x_in B, C, H, W = x.shape # Non-power of 2 images must be float32 if (W & (W - 1)) != 0 or (H & (H - 1)) != 0: x = x.to(dtype=torch.float32) # fftn does not support bfloat16 elif x.dtype == torch.bfloat16: x = x.to(dtype=torch.float32) # FFT x_freq = fftn(x, dim=(-2, -1)) x_freq = fftshift(x_freq, dim=(-2, -1)) B, C, H, W = x_freq.shape mask = torch.ones((B, C, H, W), device=x.device) crow, ccol = H // 2, W // 2 mask[..., crow - threshold : crow + threshold, ccol - threshold : ccol + threshold] = scale x_freq = x_freq * mask # IFFT x_freq = ifftshift(x_freq, dim=(-2, -1)) x_filtered = ifftn(x_freq, dim=(-2, -1)).real return x_filtered.to(dtype=x_in.dtype) def apply_freeu( resolution_idx: int, hidden_states: "torch.Tensor", res_hidden_states: "torch.Tensor", **freeu_kwargs ) -> Tuple["torch.Tensor", "torch.Tensor"]: """Applies the FreeU mechanism as introduced in https: //arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU. Args: resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied. hidden_states (`torch.Tensor`): Inputs to the underlying block. res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block. s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ if resolution_idx == 0: num_half_channels = hidden_states.shape[1] // 2 hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b1"] res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s1"]) if resolution_idx == 1: num_half_channels = hidden_states.shape[1] // 2 hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b2"] res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s2"]) return hidden_states, res_hidden_states def get_torch_cuda_device_capability(): if torch.cuda.is_available(): device = torch.device("cuda") compute_capability = torch.cuda.get_device_capability(device) compute_capability = f"{compute_capability[0]}.{compute_capability[1]}" return float(compute_capability) else: return None @functools.lru_cache def get_device(): if torch.cuda.is_available(): return "cuda" elif is_torch_npu_available(): return "npu" elif hasattr(torch, "xpu") and torch.xpu.is_available(): return "xpu" elif torch.backends.mps.is_available(): return "mps" else: return "cpu" def empty_device_cache(device_type: Optional[str] = None): if device_type is None: device_type = get_device() if device_type in ["cpu"]: return device_mod = getattr(torch, device_type, torch.cuda) device_mod.empty_cache() def device_synchronize(device_type: Optional[str] = None): if device_type is None: device_type = get_device() device_mod = getattr(torch, device_type, torch.cuda) device_mod.synchronize()
diffusers/src/diffusers/utils/torch_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/torch_utils.py", "repo_id": "diffusers", "token_count": 2982 }
184
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKLLTXVideo, FlowMatchEulerDiscreteScheduler, LTXPipeline, LTXVideoTransformer3DModel, ) from diffusers.utils.testing_utils import floats_tensor, require_peft_backend sys.path.append(".") from utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend class LTXVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = LTXPipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_classes = [FlowMatchEulerDiscreteScheduler] scheduler_kwargs = {} transformer_kwargs = { "in_channels": 8, "out_channels": 8, "patch_size": 1, "patch_size_t": 1, "num_attention_heads": 4, "attention_head_dim": 8, "cross_attention_dim": 32, "num_layers": 1, "caption_channels": 32, } transformer_cls = LTXVideoTransformer3DModel vae_kwargs = { "in_channels": 3, "out_channels": 3, "latent_channels": 8, "block_out_channels": (8, 8, 8, 8), "decoder_block_out_channels": (8, 8, 8, 8), "layers_per_block": (1, 1, 1, 1, 1), "decoder_layers_per_block": (1, 1, 1, 1, 1), "spatio_temporal_scaling": (True, True, False, False), "decoder_spatio_temporal_scaling": (True, True, False, False), "decoder_inject_noise": (False, False, False, False, False), "upsample_residual": (False, False, False, False), "upsample_factor": (1, 1, 1, 1), "timestep_conditioning": False, "patch_size": 1, "patch_size_t": 1, "encoder_causal": True, "decoder_causal": False, } vae_cls = AutoencoderKLLTXVideo tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" text_encoder_target_modules = ["q", "k", "v", "o"] @property def output_shape(self): return (1, 9, 32, 32, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 16 num_channels = 8 num_frames = 9 num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 latent_height = 8 latent_width = 8 generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_latent_frames, num_channels, latent_height, latent_width)) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "dance monkey", "num_frames": num_frames, "num_inference_steps": 4, "guidance_scale": 6.0, "height": 32, "width": 32, "max_sequence_length": sequence_length, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs def test_simple_inference_with_text_lora_denoiser_fused_multi(self): super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) @unittest.skip("Not supported in LTXVideo.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in LTXVideo.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in LTXVideo.") def test_modify_padding_mode(self): pass @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_partial_text_lora(self): pass @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora(self): pass @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora_and_scale(self): pass @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora_fused(self): pass @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora_save_load(self): pass
diffusers/tests/lora/test_lora_layers_ltx_video.py/0
{ "file_path": "diffusers/tests/lora/test_lora_layers_ltx_video.py", "repo_id": "diffusers", "token_count": 2180 }
185
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import AutoencoderKLHunyuanVideo from diffusers.models.autoencoders.autoencoder_kl_hunyuan_video import prepare_causal_attention_mask from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class AutoencoderKLHunyuanVideoTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AutoencoderKLHunyuanVideo main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_kl_hunyuan_video_config(self): return { "in_channels": 3, "out_channels": 3, "latent_channels": 4, "down_block_types": ( "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", ), "up_block_types": ( "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", ), "block_out_channels": (8, 8, 8, 8), "layers_per_block": 1, "act_fn": "silu", "norm_num_groups": 4, "scaling_factor": 0.476986, "spatial_compression_ratio": 8, "temporal_compression_ratio": 4, "mid_block_add_attention": True, } @property def dummy_input(self): batch_size = 2 num_frames = 9 num_channels = 3 sizes = (16, 16) image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 9, 16, 16) @property def output_shape(self): return (3, 9, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_kl_hunyuan_video_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_enable_disable_tiling(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) torch.manual_seed(0) output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] torch.manual_seed(0) model.enable_tiling() output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertLess( (output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(), 0.5, "VAE tiling should not affect the inference results", ) torch.manual_seed(0) model.disable_tiling() output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertEqual( output_without_tiling.detach().cpu().numpy().all(), output_without_tiling_2.detach().cpu().numpy().all(), "Without tiling outputs should match with the outputs when tiling is manually disabled.", ) def test_enable_disable_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) torch.manual_seed(0) output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] torch.manual_seed(0) model.enable_slicing() output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertLess( (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), 0.5, "VAE slicing should not affect the inference results", ) torch.manual_seed(0) model.disable_slicing() output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertEqual( output_without_slicing.detach().cpu().numpy().all(), output_without_slicing_2.detach().cpu().numpy().all(), "Without slicing outputs should match with the outputs when slicing is manually disabled.", ) def test_gradient_checkpointing_is_applied(self): expected_set = { "HunyuanVideoDecoder3D", "HunyuanVideoDownBlock3D", "HunyuanVideoEncoder3D", "HunyuanVideoMidBlock3D", "HunyuanVideoUpBlock3D", } super().test_gradient_checkpointing_is_applied(expected_set=expected_set) # We need to overwrite this test because the base test does not account length of down_block_types def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["block_out_channels"] = (16, 16, 16, 16) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") @unittest.skip("Unsupported test.") def test_outputs_equivalence(self): pass def test_prepare_causal_attention_mask(self): def prepare_causal_attention_mask_orig( num_frames: int, height_width: int, dtype: torch.dtype, device: torch.device, batch_size: int = None ) -> torch.Tensor: seq_len = num_frames * height_width mask = torch.full((seq_len, seq_len), float("-inf"), dtype=dtype, device=device) for i in range(seq_len): i_frame = i // height_width mask[i, : (i_frame + 1) * height_width] = 0 if batch_size is not None: mask = mask.unsqueeze(0).expand(batch_size, -1, -1) return mask # test with some odd shapes original_mask = prepare_causal_attention_mask_orig( num_frames=31, height_width=111, dtype=torch.float32, device=torch_device ) new_mask = prepare_causal_attention_mask( num_frames=31, height_width=111, dtype=torch.float32, device=torch_device ) self.assertTrue( torch.allclose(original_mask, new_mask), "Causal attention mask should be the same", )
diffusers/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py", "repo_id": "diffusers", "token_count": 3393 }
186
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from torch import nn from diffusers.models.attention import GEGLU, AdaLayerNorm, ApproximateGELU from diffusers.models.embeddings import get_timestep_embedding from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D from diffusers.models.transformers.transformer_2d import Transformer2DModel from diffusers.utils.testing_utils import ( backend_manual_seed, require_torch_accelerator_with_fp64, require_torch_version_greater_equal, torch_device, ) class EmbeddingsTests(unittest.TestCase): def test_timestep_embeddings(self): embedding_dim = 256 timesteps = torch.arange(16) t1 = get_timestep_embedding(timesteps, embedding_dim) # first vector should always be composed only of 0's and 1's assert (t1[0, : embedding_dim // 2] - 0).abs().sum() < 1e-5 assert (t1[0, embedding_dim // 2 :] - 1).abs().sum() < 1e-5 # last element of each vector should be one assert (t1[:, -1] - 1).abs().sum() < 1e-5 # For large embeddings (e.g. 128) the frequency of every vector is higher # than the previous one which means that the gradients of later vectors are # ALWAYS higher than the previous ones grad_mean = np.abs(np.gradient(t1, axis=-1)).mean(axis=1) prev_grad = 0.0 for grad in grad_mean: assert grad > prev_grad prev_grad = grad def test_timestep_flip_sin_cos(self): embedding_dim = 16 timesteps = torch.arange(10) t1 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=True) t1 = torch.cat([t1[:, embedding_dim // 2 :], t1[:, : embedding_dim // 2]], dim=-1) t2 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=False) assert torch.allclose(t1.cpu(), t2.cpu(), 1e-3) def test_timestep_downscale_freq_shift(self): embedding_dim = 16 timesteps = torch.arange(10) t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0) t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1) # get cosine half (vectors that are wrapped into cosine) cosine_half = (t1 - t2)[:, embedding_dim // 2 :] # cosine needs to be negative assert (np.abs((cosine_half <= 0).numpy()) - 1).sum() < 1e-5 def test_sinoid_embeddings_hardcoded(self): embedding_dim = 64 timesteps = torch.arange(128) # standard unet, score_vde t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1, flip_sin_to_cos=False) # glide, ldm t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0, flip_sin_to_cos=True) # grad-tts t3 = get_timestep_embedding(timesteps, embedding_dim, scale=1000) assert torch.allclose( t1[23:26, 47:50].flatten().cpu(), torch.tensor([0.9646, 0.9804, 0.9892, 0.9615, 0.9787, 0.9882, 0.9582, 0.9769, 0.9872]), 1e-3, ) assert torch.allclose( t2[23:26, 47:50].flatten().cpu(), torch.tensor([0.3019, 0.2280, 0.1716, 0.3146, 0.2377, 0.1790, 0.3272, 0.2474, 0.1864]), 1e-3, ) assert torch.allclose( t3[23:26, 47:50].flatten().cpu(), torch.tensor([-0.9801, -0.9464, -0.9349, -0.3952, 0.8887, -0.9709, 0.5299, -0.2853, -0.9927]), 1e-3, ) class Upsample2DBlockTests(unittest.TestCase): def test_upsample_default(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=False) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 32, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.2173, -1.2079, -1.2079, 0.2952, 1.1254, 1.1254, 0.2952, 1.1254, 1.1254]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) @require_torch_version_greater_equal("2.1") def test_upsample_bfloat16(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32).to(torch.bfloat16) upsample = Upsample2D(channels=32, use_conv=False) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 32, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.2173, -1.2079, -1.2079, 0.2952, 1.1254, 1.1254, 0.2952, 1.1254, 1.1254], dtype=torch.bfloat16 ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_upsample_with_conv(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=True) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 32, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([0.7145, 1.3773, 0.3492, 0.8448, 1.0839, -0.3341, 0.5956, 0.1250, -0.4841]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_upsample_with_conv_out_dim(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=True, out_channels=64) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 64, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([0.2703, 0.1656, -0.2538, -0.0553, -0.2984, 0.1044, 0.1155, 0.2579, 0.7755]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_upsample_with_transpose(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=False, use_conv_transpose=True) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 32, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.3028, -0.1582, 0.0071, 0.0350, -0.4799, -0.1139, 0.1056, -0.1153, -0.1046]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) class Downsample2DBlockTests(unittest.TestCase): def test_downsample_default(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=False) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 32, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.0513, -0.3889, 0.0640, 0.0836, -0.5460, -0.0341, -0.0169, -0.6967, 0.1179]) max_diff = (output_slice.flatten() - expected_slice).abs().sum().item() assert max_diff <= 1e-3 # assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-1) def test_downsample_with_conv(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=True) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 32, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor( [0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913], ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_downsample_with_conv_pad1(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=True, padding=1) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 32, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_downsample_with_conv_out_dim(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=True, out_channels=16) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 16, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.6586, 0.5985, 0.0721, 0.1256, -0.1492, 0.4436, -0.2544, 0.5021, 1.1522]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) class ResnetBlock2DTests(unittest.TestCase): def test_resnet_default(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 64, 64) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-1.9010, -0.2974, -0.8245, -1.3533, 0.8742, -0.9645, -2.0584, 1.3387, -0.4746], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_restnet_with_use_in_shortcut(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, use_in_shortcut=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 64, 64) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [0.2226, -1.0791, -0.1629, 0.3659, -0.2889, -1.2376, 0.0582, 0.9206, 0.0044], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_resnet_up(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, up=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 128, 128) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [1.2130, -0.8753, -0.9027, 1.5783, -0.5362, -0.5001, 1.0726, -0.7732, -0.4182], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_resnet_down(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, down=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 32, 32) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_restnet_with_kernel_fir(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="fir", down=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 32, 32) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.0934, -0.5729, 0.0909, -0.2710, -0.5044, 0.0243, -0.0665, -0.5267, -0.3136], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_restnet_with_kernel_sde_vp(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="sde_vp", down=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 32, 32) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) class Transformer2DModelTests(unittest.TestCase): def test_spatial_transformer_default(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) sample = torch.randn(1, 32, 64, 64).to(torch_device) spatial_transformer_block = Transformer2DModel( in_channels=32, num_attention_heads=1, attention_head_dim=32, dropout=0.0, cross_attention_dim=None, ).to(torch_device) with torch.no_grad(): attention_scores = spatial_transformer_block(sample).sample assert attention_scores.shape == (1, 32, 64, 64) output_slice = attention_scores[0, -1, -3:, -3:] expected_slice = torch.tensor( [-1.9455, -0.0066, -1.3933, -1.5878, 0.5325, -0.6486, -1.8648, 0.7515, -0.9689], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_spatial_transformer_cross_attention_dim(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) sample = torch.randn(1, 64, 64, 64).to(torch_device) spatial_transformer_block = Transformer2DModel( in_channels=64, num_attention_heads=2, attention_head_dim=32, dropout=0.0, cross_attention_dim=64, ).to(torch_device) with torch.no_grad(): context = torch.randn(1, 4, 64).to(torch_device) attention_scores = spatial_transformer_block(sample, context).sample assert attention_scores.shape == (1, 64, 64, 64) output_slice = attention_scores[0, -1, -3:, -3:] expected_slice = torch.tensor( [0.0143, -0.6909, -2.1547, -1.8893, 1.4097, 0.1359, -0.2521, -1.3359, 0.2598], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_spatial_transformer_timestep(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) num_embeds_ada_norm = 5 sample = torch.randn(1, 64, 64, 64).to(torch_device) spatial_transformer_block = Transformer2DModel( in_channels=64, num_attention_heads=2, attention_head_dim=32, dropout=0.0, cross_attention_dim=64, num_embeds_ada_norm=num_embeds_ada_norm, ).to(torch_device) with torch.no_grad(): timestep_1 = torch.tensor(1, dtype=torch.long).to(torch_device) timestep_2 = torch.tensor(2, dtype=torch.long).to(torch_device) attention_scores_1 = spatial_transformer_block(sample, timestep=timestep_1).sample attention_scores_2 = spatial_transformer_block(sample, timestep=timestep_2).sample assert attention_scores_1.shape == (1, 64, 64, 64) assert attention_scores_2.shape == (1, 64, 64, 64) output_slice_1 = attention_scores_1[0, -1, -3:, -3:] output_slice_2 = attention_scores_2[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.3923, -1.0923, -1.7144, -1.5570, 1.4154, 0.1738, -0.1157, -1.2998, -0.1703], device=torch_device ) expected_slice_2 = torch.tensor( [-0.4311, -1.1376, -1.7732, -1.5997, 1.3450, 0.0964, -0.1569, -1.3590, -0.2348], device=torch_device ) assert torch.allclose(output_slice_1.flatten(), expected_slice, atol=1e-3) assert torch.allclose(output_slice_2.flatten(), expected_slice_2, atol=1e-3) def test_spatial_transformer_dropout(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) sample = torch.randn(1, 32, 64, 64).to(torch_device) spatial_transformer_block = ( Transformer2DModel( in_channels=32, num_attention_heads=2, attention_head_dim=16, dropout=0.3, cross_attention_dim=None, ) .to(torch_device) .eval() ) with torch.no_grad(): attention_scores = spatial_transformer_block(sample).sample assert attention_scores.shape == (1, 32, 64, 64) output_slice = attention_scores[0, -1, -3:, -3:] expected_slice = torch.tensor( [-1.9380, -0.0083, -1.3771, -1.5819, 0.5209, -0.6441, -1.8545, 0.7563, -0.9615], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) @require_torch_accelerator_with_fp64 def test_spatial_transformer_discrete(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) num_embed = 5 sample = torch.randint(0, num_embed, (1, 32)).to(torch_device) spatial_transformer_block = ( Transformer2DModel( num_attention_heads=1, attention_head_dim=32, num_vector_embeds=num_embed, sample_size=16, ) .to(torch_device) .eval() ) with torch.no_grad(): attention_scores = spatial_transformer_block(sample).sample assert attention_scores.shape == (1, num_embed - 1, 32) output_slice = attention_scores[0, -2:, -3:] expected_slice = torch.tensor([-1.7648, -1.0241, -2.0985, -1.8035, -1.6404, -1.2098], device=torch_device) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_spatial_transformer_default_norm_layers(self): spatial_transformer_block = Transformer2DModel(num_attention_heads=1, attention_head_dim=32, in_channels=32) assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == nn.LayerNorm assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm def test_spatial_transformer_ada_norm_layers(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, num_embeds_ada_norm=5, ) assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == AdaLayerNorm assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm def test_spatial_transformer_default_ff_layers(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, ) assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == GEGLU assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == nn.Linear dim = 32 inner_dim = 128 # First dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim # NOTE: inner_dim * 2 because GEGLU assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim * 2 # Second dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim def test_spatial_transformer_geglu_approx_ff_layers(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, activation_fn="geglu-approximate", ) assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == ApproximateGELU assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == nn.Linear dim = 32 inner_dim = 128 # First dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim # Second dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim def test_spatial_transformer_attention_bias(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, attention_bias=True ) assert spatial_transformer_block.transformer_blocks[0].attn1.to_q.bias is not None assert spatial_transformer_block.transformer_blocks[0].attn1.to_k.bias is not None assert spatial_transformer_block.transformer_blocks[0].attn1.to_v.bias is not None
diffusers/tests/models/test_layers_utils.py/0
{ "file_path": "diffusers/tests/models/test_layers_utils.py", "repo_id": "diffusers", "token_count": 10834 }
187
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import CosmosTransformer3DModel from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class CosmosTransformer3DModelTests(ModelTesterMixin, unittest.TestCase): model_class = CosmosTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 1 num_channels = 4 num_frames = 1 height = 16 width = 16 text_embed_dim = 16 sequence_length = 12 fps = 30 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, text_embed_dim)).to(torch_device) attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) padding_mask = torch.zeros(batch_size, 1, height, width).to(torch_device) return { "hidden_states": hidden_states, "timestep": timestep, "encoder_hidden_states": encoder_hidden_states, "attention_mask": attention_mask, "fps": fps, "padding_mask": padding_mask, } @property def input_shape(self): return (4, 1, 16, 16) @property def output_shape(self): return (4, 1, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 4, "out_channels": 4, "num_attention_heads": 2, "attention_head_dim": 12, "num_layers": 2, "mlp_ratio": 2, "text_embed_dim": 16, "adaln_lora_dim": 4, "max_size": (4, 32, 32), "patch_size": (1, 2, 2), "rope_scale": (2.0, 1.0, 1.0), "concat_padding_mask": True, "extra_pos_embed_type": "learnable", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"CosmosTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class CosmosTransformer3DModelVideoToWorldTests(ModelTesterMixin, unittest.TestCase): model_class = CosmosTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 1 num_channels = 4 num_frames = 1 height = 16 width = 16 text_embed_dim = 16 sequence_length = 12 fps = 30 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, text_embed_dim)).to(torch_device) attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) condition_mask = torch.ones(batch_size, 1, num_frames, height, width).to(torch_device) padding_mask = torch.zeros(batch_size, 1, height, width).to(torch_device) return { "hidden_states": hidden_states, "timestep": timestep, "encoder_hidden_states": encoder_hidden_states, "attention_mask": attention_mask, "fps": fps, "condition_mask": condition_mask, "padding_mask": padding_mask, } @property def input_shape(self): return (4, 1, 16, 16) @property def output_shape(self): return (4, 1, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 4 + 1, "out_channels": 4, "num_attention_heads": 2, "attention_head_dim": 12, "num_layers": 2, "mlp_ratio": 2, "text_embed_dim": 16, "adaln_lora_dim": 4, "max_size": (4, 32, 32), "patch_size": (1, 2, 2), "rope_scale": (2.0, 1.0, 1.0), "concat_padding_mask": True, "extra_pos_embed_type": "learnable", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"CosmosTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
diffusers/tests/models/transformers/test_models_transformer_cosmos.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_cosmos.py", "repo_id": "diffusers", "token_count": 2346 }
188
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import SkyReelsV2Transformer3DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin enable_full_determinism() class SkyReelsV2Transformer3DTests(ModelTesterMixin, TorchCompileTesterMixin, unittest.TestCase): model_class = SkyReelsV2Transformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 1 num_channels = 4 num_frames = 2 height = 16 width = 16 text_encoder_embedding_dim = 16 sequence_length = 12 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, } @property def input_shape(self): return (4, 1, 16, 16) @property def output_shape(self): return (4, 1, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "patch_size": (1, 2, 2), "num_attention_heads": 2, "attention_head_dim": 12, "in_channels": 4, "out_channels": 4, "text_dim": 16, "freq_dim": 256, "ffn_dim": 32, "num_layers": 2, "cross_attn_norm": True, "qk_norm": "rms_norm_across_heads", "rope_max_seq_len": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"SkyReelsV2Transformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
diffusers/tests/models/transformers/test_models_transformer_skyreels_v2.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_skyreels_v2.py", "repo_id": "diffusers", "token_count": 1132 }
189
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest from typing import Any, Dict import numpy as np import torch from PIL import Image from diffusers import ( ClassifierFreeGuidance, StableDiffusionXLAutoBlocks, StableDiffusionXLModularPipeline, ) from diffusers.loaders import ModularIPAdapterMixin from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from ...models.unets.test_models_unet_2d_condition import ( create_ip_adapter_state_dict, ) from ..test_modular_pipelines_common import ( ModularPipelineTesterMixin, ) enable_full_determinism() class SDXLModularTests: """ This mixin defines method to create pipeline, base input and base test across all SDXL modular tests. """ pipeline_class = StableDiffusionXLModularPipeline pipeline_blocks_class = StableDiffusionXLAutoBlocks repo = "hf-internal-testing/tiny-sdxl-modular" params = frozenset( [ "prompt", "height", "width", "negative_prompt", "cross_attention_kwargs", "image", "mask_image", ] ) batch_params = frozenset(["prompt", "negative_prompt", "image", "mask_image"]) def get_pipeline(self, components_manager=None, torch_dtype=torch.float32): pipeline = self.pipeline_blocks_class().init_pipeline(self.repo, components_manager=components_manager) pipeline.load_default_components(torch_dtype=torch_dtype) return pipeline def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "np", } return inputs def _test_stable_diffusion_xl_euler(self, expected_image_shape, expected_slice, expected_max_diff=1e-2): device = "cpu" # ensure determinism for the device-dependent torch.Generator sd_pipe = self.get_pipeline() sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs, output="images") image_slice = image[0, -3:, -3:, -1] assert image.shape == expected_image_shape assert np.abs(image_slice.flatten() - expected_slice).max() < expected_max_diff, ( "Image Slice does not match expected slice" ) class SDXLModularIPAdapterTests: """ This mixin is designed to test IP Adapter. """ def test_pipeline_inputs_and_blocks(self): blocks = self.pipeline_blocks_class() parameters = blocks.input_names assert issubclass(self.pipeline_class, ModularIPAdapterMixin) assert "ip_adapter_image" in parameters, ( "`ip_adapter_image` argument must be supported by the `__call__` method" ) assert "ip_adapter" in blocks.sub_blocks, "pipeline must contain an IPAdapter block" _ = blocks.sub_blocks.pop("ip_adapter") parameters = blocks.input_names assert "ip_adapter_image" not in parameters, ( "`ip_adapter_image` argument must be removed from the `__call__` method" ) def _get_dummy_image_embeds(self, cross_attention_dim: int = 32): return torch.randn((1, 1, cross_attention_dim), device=torch_device) def _get_dummy_faceid_image_embeds(self, cross_attention_dim: int = 32): return torch.randn((1, 1, 1, cross_attention_dim), device=torch_device) def _get_dummy_masks(self, input_size: int = 64): _masks = torch.zeros((1, 1, input_size, input_size), device=torch_device) _masks[0, :, :, : int(input_size / 2)] = 1 return _masks def _modify_inputs_for_ip_adapter_test(self, inputs: Dict[str, Any]): blocks = self.pipeline_blocks_class() _ = blocks.sub_blocks.pop("ip_adapter") parameters = blocks.input_names if "image" in parameters and "strength" in parameters: inputs["num_inference_steps"] = 4 inputs["output_type"] = "np" return inputs def test_ip_adapter(self, expected_max_diff: float = 1e-4, expected_pipe_slice=None): r"""Tests for IP-Adapter. The following scenarios are tested: - Single IP-Adapter with scale=0 should produce same output as no IP-Adapter. - Multi IP-Adapter with scale=0 should produce same output as no IP-Adapter. - Single IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. - Multi IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. """ # Raising the tolerance for this test when it's run on a CPU because we # compare against static slices and that can be shaky (with a VVVV low probability). expected_max_diff = 9e-4 if torch_device == "cpu" else expected_max_diff blocks = self.pipeline_blocks_class() _ = blocks.sub_blocks.pop("ip_adapter") pipe = blocks.init_pipeline(self.repo) pipe.load_default_components(torch_dtype=torch.float32) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) cross_attention_dim = pipe.unet.config.get("cross_attention_dim") # forward pass without ip adapter inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) if expected_pipe_slice is None: output_without_adapter = pipe(**inputs, output="images") else: output_without_adapter = expected_pipe_slice # 1. Single IP-Adapter test cases adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights(adapter_state_dict) # forward pass with single ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] inputs["negative_ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] pipe.set_ip_adapter_scale(0.0) output_without_adapter_scale = pipe(**inputs, output="images") if expected_pipe_slice is not None: output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with single ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] inputs["negative_ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] pipe.set_ip_adapter_scale(42.0) output_with_adapter_scale = pipe(**inputs, output="images") if expected_pipe_slice is not None: output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() assert max_diff_without_adapter_scale < expected_max_diff, ( "Output without ip-adapter must be same as normal inference" ) assert max_diff_with_adapter_scale > 1e-2, "Output with ip-adapter must be different from normal inference" # 2. Multi IP-Adapter test cases adapter_state_dict_1 = create_ip_adapter_state_dict(pipe.unet) adapter_state_dict_2 = create_ip_adapter_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights([adapter_state_dict_1, adapter_state_dict_2]) # forward pass with multi ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 inputs["negative_ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 pipe.set_ip_adapter_scale([0.0, 0.0]) output_without_multi_adapter_scale = pipe(**inputs, output="images") if expected_pipe_slice is not None: output_without_multi_adapter_scale = output_without_multi_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with multi ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 inputs["negative_ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 pipe.set_ip_adapter_scale([42.0, 42.0]) output_with_multi_adapter_scale = pipe(**inputs, output="images") if expected_pipe_slice is not None: output_with_multi_adapter_scale = output_with_multi_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_multi_adapter_scale = np.abs( output_without_multi_adapter_scale - output_without_adapter ).max() max_diff_with_multi_adapter_scale = np.abs(output_with_multi_adapter_scale - output_without_adapter).max() assert max_diff_without_multi_adapter_scale < expected_max_diff, ( "Output without multi-ip-adapter must be same as normal inference" ) assert max_diff_with_multi_adapter_scale > 1e-2, ( "Output with multi-ip-adapter scale must be different from normal inference" ) class SDXLModularControlNetTests: """ This mixin is designed to test ControlNet. """ def test_pipeline_inputs(self): blocks = self.pipeline_blocks_class() parameters = blocks.input_names assert "control_image" in parameters, "`control_image` argument must be supported by the `__call__` method" assert "controlnet_conditioning_scale" in parameters, ( "`controlnet_conditioning_scale` argument must be supported by the `__call__` method" ) def _modify_inputs_for_controlnet_test(self, inputs: Dict[str, Any]): controlnet_embedder_scale_factor = 2 image = torch.randn( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), device=torch_device, ) inputs["control_image"] = image return inputs def test_controlnet(self, expected_max_diff: float = 1e-4, expected_pipe_slice=None): r"""Tests for ControlNet. The following scenarios are tested: - Single ControlNet with scale=0 should produce same output as no ControlNet. - Single ControlNet with scale!=0 should produce different output compared to no ControlNet. """ # Raising the tolerance for this test when it's run on a CPU because we # compare against static slices and that can be shaky (with a VVVV low probability). expected_max_diff = 9e-4 if torch_device == "cpu" else expected_max_diff pipe = self.get_pipeline() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # forward pass without controlnet inputs = self.get_dummy_inputs(torch_device) output_without_controlnet = pipe(**inputs, output="images") output_without_controlnet = output_without_controlnet[0, -3:, -3:, -1].flatten() # forward pass with single controlnet, but scale=0 which should have no effect inputs = self._modify_inputs_for_controlnet_test(self.get_dummy_inputs(torch_device)) inputs["controlnet_conditioning_scale"] = 0.0 output_without_controlnet_scale = pipe(**inputs, output="images") output_without_controlnet_scale = output_without_controlnet_scale[0, -3:, -3:, -1].flatten() # forward pass with single controlnet, but with scale of adapter weights inputs = self._modify_inputs_for_controlnet_test(self.get_dummy_inputs(torch_device)) inputs["controlnet_conditioning_scale"] = 42.0 output_with_controlnet_scale = pipe(**inputs, output="images") output_with_controlnet_scale = output_with_controlnet_scale[0, -3:, -3:, -1].flatten() max_diff_without_controlnet_scale = np.abs(output_without_controlnet_scale - output_without_controlnet).max() max_diff_with_controlnet_scale = np.abs(output_with_controlnet_scale - output_without_controlnet).max() assert max_diff_without_controlnet_scale < expected_max_diff, ( "Output without controlnet must be same as normal inference" ) assert max_diff_with_controlnet_scale > 1e-2, "Output with controlnet must be different from normal inference" def test_controlnet_cfg(self): pipe = self.get_pipeline() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # forward pass with CFG not applied guider = ClassifierFreeGuidance(guidance_scale=1.0) pipe.update_components(guider=guider) inputs = self._modify_inputs_for_controlnet_test(self.get_dummy_inputs(torch_device)) out_no_cfg = pipe(**inputs, output="images") # forward pass with CFG applied guider = ClassifierFreeGuidance(guidance_scale=7.5) pipe.update_components(guider=guider) inputs = self._modify_inputs_for_controlnet_test(self.get_dummy_inputs(torch_device)) out_cfg = pipe(**inputs, output="images") assert out_cfg.shape == out_no_cfg.shape max_diff = np.abs(out_cfg - out_no_cfg).max() assert max_diff > 1e-2, "Output with CFG must be different from normal inference" class SDXLModularGuiderTests: def test_guider_cfg(self): pipe = self.get_pipeline() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # forward pass with CFG not applied guider = ClassifierFreeGuidance(guidance_scale=1.0) pipe.update_components(guider=guider) inputs = self.get_dummy_inputs(torch_device) out_no_cfg = pipe(**inputs, output="images") # forward pass with CFG applied guider = ClassifierFreeGuidance(guidance_scale=7.5) pipe.update_components(guider=guider) inputs = self.get_dummy_inputs(torch_device) out_cfg = pipe(**inputs, output="images") assert out_cfg.shape == out_no_cfg.shape max_diff = np.abs(out_cfg - out_no_cfg).max() assert max_diff > 1e-2, "Output with CFG must be different from normal inference" class SDXLModularPipelineFastTests( SDXLModularTests, SDXLModularIPAdapterTests, SDXLModularControlNetTests, SDXLModularGuiderTests, ModularPipelineTesterMixin, unittest.TestCase, ): """Test cases for Stable Diffusion XL modular pipeline fast tests.""" def test_stable_diffusion_xl_euler(self): self._test_stable_diffusion_xl_euler( expected_image_shape=(1, 64, 64, 3), expected_slice=[ 0.5966781, 0.62939394, 0.48465094, 0.51573336, 0.57593524, 0.47035995, 0.53410417, 0.51436996, 0.47313565, ], expected_max_diff=1e-2, ) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) class SDXLImg2ImgModularPipelineFastTests( SDXLModularTests, SDXLModularIPAdapterTests, SDXLModularControlNetTests, SDXLModularGuiderTests, ModularPipelineTesterMixin, unittest.TestCase, ): """Test cases for Stable Diffusion XL image-to-image modular pipeline fast tests.""" def get_dummy_inputs(self, device, seed=0): inputs = super().get_dummy_inputs(device, seed) image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 inputs["image"] = image inputs["strength"] = 0.8 return inputs def test_stable_diffusion_xl_euler(self): self._test_stable_diffusion_xl_euler( expected_image_shape=(1, 64, 64, 3), expected_slice=[ 0.56943184, 0.4702148, 0.48048905, 0.6235963, 0.551138, 0.49629188, 0.60031277, 0.5688907, 0.43996853, ], expected_max_diff=1e-2, ) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) class SDXLInpaintingModularPipelineFastTests( SDXLModularTests, SDXLModularIPAdapterTests, SDXLModularControlNetTests, SDXLModularGuiderTests, ModularPipelineTesterMixin, unittest.TestCase, ): """Test cases for Stable Diffusion XL inpainting modular pipeline fast tests.""" def get_dummy_inputs(self, device, seed=0): inputs = super().get_dummy_inputs(device, seed) image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # create mask image[8:, 8:, :] = 255 mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64)) inputs["image"] = init_image inputs["mask_image"] = mask_image inputs["strength"] = 1.0 return inputs def test_stable_diffusion_xl_euler(self): self._test_stable_diffusion_xl_euler( expected_image_shape=(1, 64, 64, 3), expected_slice=[ 0.40872607, 0.38842705, 0.34893104, 0.47837183, 0.43792963, 0.5332134, 0.3716843, 0.47274873, 0.45000193, ], expected_max_diff=1e-2, ) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3)
diffusers/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py/0
{ "file_path": "diffusers/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 8154 }
190
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_numpy, require_torch_accelerator, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetImg2ImgPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"}) image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) image = floats_tensor(control_image.shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) def test_ip_adapter(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.7096, 0.5149, 0.3571, 0.5897, 0.4715, 0.4052, 0.6098, 0.6886, 0.4213]) return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) class StableDiffusionMultiControlNetPipelineFastTests( IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal_(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] image = floats_tensor(control_image[0].shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_ip_adapter(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.5293, 0.7339, 0.6642, 0.3950, 0.5212, 0.5175, 0.7002, 0.5907, 0.5182]) return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) @slow @require_torch_accelerator class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "evil space-punk bird" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) output = pipe( prompt, image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=50, strength=0.6, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" ) assert np.abs(expected_image - image).max() < 9e-2
diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py", "repo_id": "diffusers", "token_count": 7949 }
191
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ===== This file is an implementation of a dummy guardrail for the fast tests ===== from typing import Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin from diffusers.models.modeling_utils import ModelMixin class DummyCosmosSafetyChecker(ModelMixin, ConfigMixin): def __init__(self) -> None: super().__init__() self._dtype = torch.float32 def check_text_safety(self, prompt: str) -> bool: return True def check_video_safety(self, frames: np.ndarray) -> np.ndarray: return frames def to(self, device: Union[str, torch.device] = None, dtype: torch.dtype = None) -> None: self._dtype = dtype @property def device(self) -> torch.device: return None @property def dtype(self) -> torch.dtype: return self._dtype
diffusers/tests/pipelines/cosmos/cosmos_guardrail.py/0
{ "file_path": "diffusers/tests/pipelines/cosmos/cosmos_guardrail.py", "repo_id": "diffusers", "token_count": 461 }
192
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import ( AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, LlamaForCausalLM, T5EncoderModel, ) from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, HiDreamImagePipeline, HiDreamImageTransformer2DModel, ) from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class HiDreamImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = HiDreamImagePipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "prompt_embeds", "negative_prompt_embeds"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params test_layerwise_casting = True supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) transformer = HiDreamImageTransformer2DModel( patch_size=2, in_channels=4, out_channels=4, num_layers=1, num_single_layers=1, attention_head_dim=8, num_attention_heads=4, caption_channels=[32, 16], text_emb_dim=64, num_routed_experts=4, num_activated_experts=2, axes_dims_rope=(4, 2, 2), max_resolution=(32, 32), llama_layers=(0, 1), ).eval() torch.manual_seed(0) vae = AutoencoderKL(scaling_factor=0.3611, shift_factor=0.1159) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, max_position_embeddings=128, ) torch.manual_seed(0) text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) torch.manual_seed(0) text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) text_encoder_4 = LlamaForCausalLM.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") text_encoder_4.generation_config.pad_token_id = 1 tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer_4 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") scheduler = FlowMatchEulerDiscreteScheduler() components = { "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "text_encoder_3": text_encoder_3, "tokenizer_3": tokenizer_3, "text_encoder_4": text_encoder_4, "tokenizer_4": tokenizer_4, "transformer": transformer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs)[0] generated_image = image[0] self.assertEqual(generated_image.shape, (128, 128, 3)) # fmt: off expected_slice = np.array([0.4507, 0.5256, 0.4205, 0.5791, 0.4848, 0.4831, 0.4443, 0.5107, 0.6586, 0.3163, 0.7318, 0.5933, 0.6252, 0.5512, 0.5357, 0.5983]) # fmt: on generated_slice = generated_image.flatten() generated_slice = np.concatenate([generated_slice[:8], generated_slice[-8:]]) self.assertTrue(np.allclose(generated_slice, expected_slice, atol=1e-3)) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-4)
diffusers/tests/pipelines/hidream_image/test_pipeline_hidream.py/0
{ "file_path": "diffusers/tests/pipelines/hidream_image/test_pipeline_hidream.py", "repo_id": "diffusers", "token_count": 2632 }
193
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyV22Pipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_numpy, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Pipeline params = [ "image_embeds", "negative_image_embeds", ] batch_params = ["image_embeds", "negative_image_embeds"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] callback_cfg_params = ["image_embds"] test_xformers_attention = False def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_slice.flatten()}" ) assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" ) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) @slow @require_torch_accelerator class KandinskyV22PipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_kandinsky_text2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.enable_model_cpu_offload(device=torch_device) pipeline = KandinskyV22Pipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.set_progress_bar_config(disable=None) prompt = "red cat, 4k photo" generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=3, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cpu").manual_seed(0) output = pipeline( image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=3, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) assert max_diff < 1e-4
diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py", "repo_id": "diffusers", "token_count": 4112 }
194
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import tempfile import unittest import numpy as np import torch from transformers import AutoTokenizer, BertModel, T5EncoderModel from diffusers import ( AutoencoderKL, DDPMScheduler, HunyuanDiT2DModel, HunyuanDiTPAGPipeline, HunyuanDiTPipeline, ) from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class HunyuanDiTPAGPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = HunyuanDiTPAGPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params def get_dummy_components(self): torch.manual_seed(0) transformer = HunyuanDiT2DModel( sample_size=16, num_layers=2, patch_size=2, attention_head_dim=8, num_attention_heads=3, in_channels=4, cross_attention_dim=32, cross_attention_dim_t5=32, pooled_projection_dim=16, hidden_size=24, activation_fn="gelu-approximate", ) torch.manual_seed(0) vae = AutoencoderKL() scheduler = DDPMScheduler() text_encoder = BertModel.from_pretrained("hf-internal-testing/tiny-random-BertModel") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel") text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "use_resolution_binning": False, "pag_scale": 0.0, } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3)) expected_slice = np.array( [0.56939435, 0.34541583, 0.35915792, 0.46489206, 0.38775963, 0.45004836, 0.5957267, 0.59481275, 0.33287364] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) @unittest.skip("Not supported.") def test_sequential_cpu_offload_forward_pass(self): # TODO(YiYi) need to fix later pass @unittest.skip("Not supported.") def test_sequential_offload_forward_pass_twice(self): # TODO(YiYi) need to fix later pass def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-3, ) def test_feed_forward_chunking(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_no_chunking = image[0, -3:, -3:, -1] pipe.transformer.enable_forward_chunking(chunk_size=1, dim=0) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_chunking = image[0, -3:, -3:, -1] max_diff = np.abs(to_np(image_slice_no_chunking) - to_np(image_slice_chunking)).max() self.assertLess(max_diff, 1e-4) def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image = pipe(**inputs)[0] original_image_slice = image[0, -3:, -3:, -1] pipe.transformer.fuse_qkv_projections() inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image_fused = pipe(**inputs)[0] image_slice_fused = image_fused[0, -3:, -3:, -1] pipe.transformer.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image_disabled = pipe(**inputs)[0] image_slice_disabled = image_disabled[0, -3:, -3:, -1] assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( "Fusion of QKV projections shouldn't affect the outputs." ) assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." ) assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Original outputs should match when fused QKV projections are disabled." ) def test_pag_disable_enable(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # base pipeline (expect same output when pag is disabled) pipe_sd = HunyuanDiTPipeline(**components) pipe_sd = pipe_sd.to(device) pipe_sd.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() # pag disabled with pag_scale=0.0 pipe_pag = self.pipeline_class(**components) pipe_pag = pipe_pag.to(device) pipe_pag.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["pag_scale"] = 0.0 out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] # pag enabled pipe_pag = self.pipeline_class(**components) pipe_pag = pipe_pag.to(device) pipe_pag.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["pag_scale"] = 3.0 out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 def test_pag_applied_layers(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # base pipeline pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) all_self_attn_layers = [k for k in pipe.transformer.attn_processors.keys() if "attn1" in k] original_attn_procs = pipe.transformer.attn_processors pag_layers = ["blocks.0", "blocks.1"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) # blocks.0 block_0_self_attn = ["blocks.0.attn1.processor"] pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.0"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(block_0_self_attn) pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.0.attn1"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(block_0_self_attn) pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.(0|1)"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert (len(pipe.pag_attn_processors)) == 2 pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.0", r"blocks\.1"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert len(pipe.pag_attn_processors) == 2 @unittest.skip( "Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have." ) def test_encode_prompt_works_in_isolation(self): pass def test_save_load_optional_components(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] ( prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) = pipe.encode_prompt(prompt, device=torch_device, dtype=torch.float32, text_encoder_index=0) ( prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2, ) = pipe.encode_prompt( prompt, device=torch_device, dtype=torch.float32, text_encoder_index=1, ) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "prompt_embeds_2": prompt_embeds_2, "prompt_attention_mask_2": prompt_attention_mask_2, "negative_prompt_embeds_2": negative_prompt_embeds_2, "negative_prompt_attention_mask_2": negative_prompt_attention_mask_2, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "prompt_embeds_2": prompt_embeds_2, "prompt_attention_mask_2": prompt_attention_mask_2, "negative_prompt_embeds_2": negative_prompt_embeds_2, "negative_prompt_attention_mask_2": negative_prompt_attention_mask_2, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4)
diffusers/tests/pipelines/pag/test_pag_hunyuan_dit.py/0
{ "file_path": "diffusers/tests/pipelines/pag/test_pag_hunyuan_dit.py", "repo_id": "diffusers", "token_count": 6511 }
195
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, DDIMScheduler, PixArtSigmaPipeline, PixArtTransformer2DModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, check_qkv_fusion_matches_attn_procs_length, check_qkv_fusion_processors_exist, to_np, ) enable_full_determinism() class PixArtSigmaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = PixArtSigmaPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self): torch.manual_seed(0) transformer = PixArtTransformer2DModel( sample_size=8, num_layers=2, patch_size=2, attention_head_dim=8, num_attention_heads=3, caption_channels=32, in_channels=4, cross_attention_dim=24, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_single", norm_elementwise_affine=False, norm_eps=1e-6, ) torch.manual_seed(0) vae = AutoencoderKL() scheduler = DDIMScheduler() text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "use_resolution_binning": False, "output_type": "np", } return inputs @unittest.skip("Not supported.") def test_sequential_cpu_offload_forward_pass(self): # TODO(PVP, Sayak) need to fix later return def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 8, 8, 3)) expected_slice = np.array([0.6319, 0.3526, 0.3806, 0.6327, 0.4639, 0.4830, 0.2583, 0.5331, 0.4852]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_non_square_images(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs, height=32, width=48).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 32, 48, 3)) expected_slice = np.array([0.6493, 0.5370, 0.4081, 0.4762, 0.3695, 0.4711, 0.3026, 0.5218, 0.5263]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_with_embeddings_and_multiple_images(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] prompt_embeds, prompt_attn_mask, negative_prompt_embeds, neg_prompt_attn_mask = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attn_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": neg_prompt_attn_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "num_images_per_prompt": 2, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attn_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": neg_prompt_attn_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "num_images_per_prompt": 2, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) def test_inference_with_multiple_images_per_prompt(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_images_per_prompt"] = 2 image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (2, 8, 8, 3)) expected_slice = np.array([0.6319, 0.3526, 0.3806, 0.6327, 0.4639, 0.4830, 0.2583, 0.5331, 0.4852]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) @unittest.skip("Test is already covered through encode_prompt isolation.") def test_save_load_optional_components(self): pass def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() assert check_qkv_fusion_processors_exist(pipe.transformer), ( "Something wrong with the fused attention processors. Expected all the attention processors to be fused." ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] pipe.transformer.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( "Fusion of QKV projections shouldn't affect the outputs." ) assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." ) assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Original outputs should match when fused QKV projections are disabled." ) @slow @require_torch_accelerator class PixArtSigmaPipelineIntegrationTests(unittest.TestCase): ckpt_id_1024 = "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS" ckpt_id_512 = "PixArt-alpha/PixArt-Sigma-XL-2-512-MS" prompt = "A small cactus with a happy face in the Sahara desert." def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_pixart_1024(self): generator = torch.Generator("cpu").manual_seed(0) pipe = PixArtSigmaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload(device=torch_device) prompt = self.prompt image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.4517, 0.4446, 0.4375, 0.449, 0.4399, 0.4365, 0.4583, 0.4629, 0.4473]) max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice) self.assertLessEqual(max_diff, 1e-4) def test_pixart_512(self): generator = torch.Generator("cpu").manual_seed(0) transformer = PixArtTransformer2DModel.from_pretrained( self.ckpt_id_512, subfolder="transformer", torch_dtype=torch.float16 ) pipe = PixArtSigmaPipeline.from_pretrained( self.ckpt_id_1024, transformer=transformer, torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload(device=torch_device) prompt = self.prompt image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0479, 0.0378, 0.0217, 0.0942, 0.064, 0.0791, 0.2073, 0.1975, 0.2017]) max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice) self.assertLessEqual(max_diff, 1e-4) def test_pixart_1024_without_resolution_binning(self): generator = torch.manual_seed(0) pipe = PixArtSigmaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload(device=torch_device) prompt = self.prompt height, width = 1024, 768 num_inference_steps = 2 image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] generator = torch.manual_seed(0) no_res_bin_image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", use_resolution_binning=False, ).images no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4) def test_pixart_512_without_resolution_binning(self): generator = torch.manual_seed(0) transformer = PixArtTransformer2DModel.from_pretrained( self.ckpt_id_512, subfolder="transformer", torch_dtype=torch.float16 ) pipe = PixArtSigmaPipeline.from_pretrained( self.ckpt_id_1024, transformer=transformer, torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload(device=torch_device) prompt = self.prompt height, width = 512, 768 num_inference_steps = 2 image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] generator = torch.manual_seed(0) no_res_bin_image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", use_resolution_binning=False, ).images no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4)
diffusers/tests/pipelines/pixart_sigma/test_pixart.py/0
{ "file_path": "diffusers/tests/pipelines/pixart_sigma/test_pixart.py", "repo_id": "diffusers", "token_count": 6930 }
196
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import ( floats_tensor, is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort # TODO: (Dhruv) Update hub_checkpoint repo_id @unittest.skip( "There is a potential backdoor vulnerability in the hub_checkpoint. Skip running this test until resolved" ) class OnnxStableDiffusionUpscalePipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): # TODO: is there an appropriate internal test set? hub_checkpoint = "ssube/stable-diffusion-x4-upscaler-onnx" def get_dummy_inputs(self, seed=0): image = floats_tensor((1, 3, 128, 128), rng=random.Random(seed)) generator = np.random.RandomState(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_pipeline_default_ddpm(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.6957, 0.7002, 0.7186, 0.6881, 0.6693, 0.6910, 0.7445, 0.7274, 0.7056]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def test_pipeline_pndm(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.7349, 0.7347, 0.7034, 0.7696, 0.7876, 0.7597, 0.7916, 0.8085, 0.8036]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_dpm_multistep(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler_ancestral(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_ddpm(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((128, 128)) # using the PNDM scheduler by default pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def test_inference_k_lms(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((128, 128)) lms_scheduler = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" ) pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", scheduler=lms_scheduler, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, guidance_scale=7.5, num_inference_steps=20, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py", "repo_id": "diffusers", "token_count": 3971 }
197
import gc import random import unittest import numpy as np import torch from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel, StableDiffusion3Img2ImgPipeline, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( Expectations, backend_empty_cache, floats_tensor, numpy_cosine_similarity_distance, require_big_accelerator, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin class StableDiffusion3Img2ImgPipelineFastTests(PipelineLatentTesterMixin, unittest.TestCase, PipelineTesterMixin): pipeline_class = StableDiffusion3Img2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) transformer = SD3Transformer2DModel( sample_size=32, patch_size=1, in_channels=4, num_layers=1, attention_head_dim=8, num_attention_heads=4, joint_attention_dim=32, caption_projection_dim=32, pooled_projection_dim=64, out_channels=4, ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=4, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "text_encoder_3": text_encoder_3, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "tokenizer_3": tokenizer_3, "transformer": transformer, "vae": vae, "image_encoder": None, "feature_extractor": None, } def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "strength": 0.8, } return inputs def test_inference(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images[0] generated_slice = image.flatten() generated_slice = np.concatenate([generated_slice[:8], generated_slice[-8:]]) # fmt: off expected_slice = np.array([0.4564, 0.5486, 0.4868, 0.5923, 0.3775, 0.5543, 0.4807, 0.4177, 0.3778, 0.5957, 0.5726, 0.4333, 0.6312, 0.5062, 0.4838, 0.5984]) # fmt: on self.assertTrue( np.allclose(generated_slice, expected_slice, atol=1e-3), "Output does not match expected slice." ) @unittest.skip("Skip for now.") def test_multi_vae(self): pass @slow @require_big_accelerator class StableDiffusion3Img2ImgPipelineSlowTests(unittest.TestCase): pipeline_class = StableDiffusion3Img2ImgPipeline repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, seed=0): init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) return { "prompt": "A photo of a cat", "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "generator": generator, "image": init_image, } def test_sd3_img2img_inference(self): torch.manual_seed(0) pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16) pipe.enable_model_cpu_offload(device=torch_device) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] image_slice = image[0, :10, :10] # fmt: off expected_slices = Expectations( { ("xpu", 3): np.array([0.5117, 0.4421, 0.3852, 0.5044, 0.4219, 0.3262, 0.5024, 0.4329, 0.3276, 0.4978, 0.4412, 0.3355, 0.4983, 0.4338, 0.3279, 0.4893, 0.4241, 0.3129, 0.4875, 0.4253, 0.3030, 0.4961, 0.4267, 0.2988, 0.5029, 0.4255, 0.3054, 0.5132, 0.4248, 0.3222]), ("cuda", 7): np.array([0.5435, 0.4673, 0.5732, 0.4438, 0.3557, 0.4912, 0.4331, 0.3491, 0.4915, 0.4287, 0.347, 0.4849, 0.4355, 0.3469, 0.4871, 0.4431, 0.3538, 0.4912, 0.4521, 0.3643, 0.5059, 0.4587, 0.373, 0.5166, 0.4685, 0.3845, 0.5264, 0.4746, 0.3914, 0.5342]), ("cuda", 8): np.array([0.5146, 0.4385, 0.3826, 0.5098, 0.4150, 0.3218, 0.5142, 0.4312, 0.3298, 0.5127, 0.4431, 0.3411, 0.5171, 0.4424, 0.3374, 0.5088, 0.4348, 0.3242, 0.5073, 0.4380, 0.3174, 0.5132, 0.4397, 0.3115, 0.5132, 0.4343, 0.3118, 0.5219, 0.4328, 0.3256]), } ) # fmt: on expected_slice = expected_slices.get_expectation() max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) assert max_diff < 1e-4, f"Outputs are not close enough, got {max_diff}"
diffusers/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py", "repo_id": "diffusers", "token_count": 3870 }
198
import gc import random import tempfile import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection, ) import diffusers from diffusers import ( AutoencoderKLTemporalDecoder, EulerDiscreteScheduler, StableVideoDiffusionPipeline, UNetSpatioTemporalConditionModel, ) from diffusers.utils import load_image, logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( CaptureLogger, backend_empty_cache, enable_full_determinism, floats_tensor, numpy_cosine_similarity_distance, require_accelerate_version_greater, require_accelerator, require_torch_accelerator, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableVideoDiffusionPipeline params = frozenset(["image"]) batch_params = frozenset(["image", "generator"]) required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", ] ) supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) unet = UNetSpatioTemporalConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=( "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal", ), up_block_types=("UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal"), cross_attention_dim=32, num_attention_heads=8, projection_class_embeddings_input_dim=96, addition_time_embed_dim=32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", interpolation_type="linear", num_train_timesteps=1000, prediction_type="v_prediction", sigma_max=700.0, sigma_min=0.002, steps_offset=1, timestep_spacing="leading", timestep_type="continuous", trained_betas=None, use_karras_sigmas=True, ) torch.manual_seed(0) vae = AutoencoderKLTemporalDecoder( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=32, projection_dim=32, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) image_encoder = CLIPVisionModelWithProjection(config) torch.manual_seed(0) feature_extractor = CLIPImageProcessor(crop_size=32, size=32) components = { "unet": unet, "image_encoder": image_encoder, "scheduler": scheduler, "vae": vae, "feature_extractor": feature_extractor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) image = floats_tensor((1, 3, 32, 32), rng=random.Random(0)).to(device) inputs = { "generator": generator, "image": image, "num_inference_steps": 2, "output_type": "pt", "min_guidance_scale": 1.0, "max_guidance_scale": 2.5, "num_frames": 2, "height": 32, "width": 32, } return inputs @unittest.skip("Deprecated functionality") def test_attention_slicing_forward_pass(self): pass @unittest.skip("Batched inference works and outputs look correct, but the test is failing") def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = torch.Generator("cpu").manual_seed(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) batched_inputs["generator"] = [torch.Generator("cpu").manual_seed(0) for i in range(batch_size)] batched_inputs["image"] = torch.cat([inputs["image"]] * batch_size, dim=0) output = pipe(**inputs).frames output_batch = pipe(**batched_inputs).frames assert len(output_batch) == batch_size max_diff = np.abs(to_np(output_batch[0]) - to_np(output[0])).max() assert max_diff < expected_max_diff @unittest.skip("Test is similar to test_inference_batch_single_identical") def test_inference_batch_consistent(self): pass def test_np_output_type(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["output_type"] = "np" output = pipe(**inputs).frames self.assertTrue(isinstance(output, np.ndarray)) self.assertEqual(len(output.shape), 5) def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" output = pipe(**self.get_dummy_inputs(generator_device)).frames[0] output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0] max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) @unittest.skip("Test is currently failing") def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) components = self.get_dummy_components() pipe_fp16 = self.pipeline_class(**components) for component in pipe_fp16.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs).frames[0] fp16_inputs = self.get_dummy_inputs(torch_device) output_fp16 = pipe_fp16(**fp16_inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs).frames[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess( max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." ) def test_save_load_optional_components(self, expected_max_difference=1e-4): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output = pipe(**inputs).frames[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) output_loaded = pipe_loaded(**inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) def test_save_load_local(self, expected_max_difference=9e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs).frames[0] logger = logging.get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel(diffusers.logging.INFO) with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) with CaptureLogger(logger) as cap_logger: pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for name in pipe_loaded.components.keys(): if name not in pipe_loaded._optional_components: assert name in str(cap_logger) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu")).frames[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == torch_device for device in model_devices)) output_device = pipe(**self.get_dummy_inputs(torch_device)).frames[0] self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) @require_accelerator @require_accelerate_version_greater("0.14.0") def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs).frames[0] pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs).frames[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") @require_accelerator @require_accelerate_version_greater("0.17.0") def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs).frames[0] pipe.enable_model_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs).frames[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") offloaded_modules = [ v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload ] ( self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): expected_max_diff = 9e-4 if not self.test_xformers_attention: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs).frames[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs).frames[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "XFormers attention should not affect the inference results") def test_disable_cfg(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["max_guidance_scale"] = 1.0 output = pipe(**inputs).frames self.assertEqual(len(output.shape), 5) @slow @require_torch_accelerator class StableVideoDiffusionPipelineSlowTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_sd_video(self): pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid", variant="fp16", torch_dtype=torch.float16, ) pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png?download=true" ) generator = torch.Generator("cpu").manual_seed(0) num_frames = 3 output = pipe( image=image, num_frames=num_frames, generator=generator, num_inference_steps=3, output_type="np", ) image = output.frames[0] assert image.shape == (num_frames, 576, 1024, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.8592, 0.8645, 0.8499, 0.8722, 0.8769, 0.8421, 0.8557, 0.8528, 0.8285]) assert numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice.flatten()) < 1e-3
diffusers/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py", "repo_id": "diffusers", "token_count": 9636 }
199
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from PIL import Image from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanVACEPipeline, WanVACETransformer3DModel from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WanVACEPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WanVACEPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKLWan( base_dim=3, z_dim=16, dim_mult=[1, 1, 1, 1], num_res_blocks=1, temperal_downsample=[False, True, True], ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) transformer = WanVACETransformer3DModel( patch_size=(1, 2, 2), num_attention_heads=2, attention_head_dim=12, in_channels=16, out_channels=16, text_dim=32, freq_dim=256, ffn_dim=32, num_layers=3, cross_attn_norm=True, qk_norm="rms_norm_across_heads", rope_max_seq_len=32, vace_layers=[0, 2], vace_in_channels=96, ) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) num_frames = 17 height = 16 width = 16 video = [Image.new("RGB", (height, width))] * num_frames mask = [Image.new("L", (height, width), 0)] * num_frames inputs = { "video": video, "mask": mask, "prompt": "dance monkey", "negative_prompt": "negative", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "height": 16, "width": 16, "num_frames": num_frames, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames[0] self.assertEqual(video.shape, (17, 3, 16, 16)) # fmt: off expected_slice = [0.4523, 0.45198, 0.44872, 0.45326, 0.45211, 0.45258, 0.45344, 0.453, 0.52431, 0.52572, 0.50701, 0.5118, 0.53717, 0.53093, 0.50557, 0.51402] # fmt: on video_slice = video.flatten() video_slice = torch.cat([video_slice[:8], video_slice[-8:]]) video_slice = [round(x, 5) for x in video_slice.tolist()] self.assertTrue(np.allclose(video_slice, expected_slice, atol=1e-3)) def test_inference_with_single_reference_image(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["reference_images"] = Image.new("RGB", (16, 16)) video = pipe(**inputs).frames[0] self.assertEqual(video.shape, (17, 3, 16, 16)) # fmt: off expected_slice = [0.45247, 0.45214, 0.44874, 0.45314, 0.45171, 0.45299, 0.45428, 0.45317, 0.51378, 0.52658, 0.53361, 0.52303, 0.46204, 0.50435, 0.52555, 0.51342] # fmt: on video_slice = video.flatten() video_slice = torch.cat([video_slice[:8], video_slice[-8:]]) video_slice = [round(x, 5) for x in video_slice.tolist()] self.assertTrue(np.allclose(video_slice, expected_slice, atol=1e-3)) def test_inference_with_multiple_reference_image(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["reference_images"] = [[Image.new("RGB", (16, 16))] * 2] video = pipe(**inputs).frames[0] self.assertEqual(video.shape, (17, 3, 16, 16)) # fmt: off expected_slice = [0.45321, 0.45221, 0.44818, 0.45375, 0.45268, 0.4519, 0.45271, 0.45253, 0.51244, 0.52223, 0.51253, 0.51321, 0.50743, 0.51177, 0.51626, 0.50983] # fmt: on video_slice = video.flatten() video_slice = torch.cat([video_slice[:8], video_slice[-8:]]) video_slice = [round(x, 5) for x in video_slice.tolist()] self.assertTrue(np.allclose(video_slice, expected_slice, atol=1e-3)) @unittest.skip("Test not supported") def test_attention_slicing_forward_pass(self): pass @unittest.skip("Errors out because passing multiple prompts at once is not yet supported by this pipeline.") def test_encode_prompt_works_in_isolation(self): pass @unittest.skip("Batching is not yet supported with this pipeline") def test_inference_batch_consistent(self): pass @unittest.skip("Batching is not yet supported with this pipeline") def test_inference_batch_single_identical(self): return super().test_inference_batch_single_identical() @unittest.skip( "AutoencoderKLWan encoded latents are always in FP32. This test is not designed to handle mixed dtype inputs" ) def test_float16_inference(self): pass @unittest.skip( "AutoencoderKLWan encoded latents are always in FP32. This test is not designed to handle mixed dtype inputs" ) def test_save_load_float16(self): pass
diffusers/tests/pipelines/wan/test_wan_vace.py/0
{ "file_path": "diffusers/tests/pipelines/wan/test_wan_vace.py", "repo_id": "diffusers", "token_count": 3480 }
200
from diffusers.utils import is_torch_available from diffusers.utils.testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_peak_memory_stats, torch_device, ) if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only Taken from https://github.com/huggingface/transformers/blob/566302686a71de14125717dea9a6a45b24d42b37/tests/quantization/bnb/test_4bit.py#L62C5-L78C77 """ def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) @torch.no_grad() @torch.inference_mode() def get_memory_consumption_stat(model, inputs): backend_reset_peak_memory_stats(torch_device) backend_empty_cache(torch_device) model(**inputs) max_mem_allocated = backend_max_memory_allocated(torch_device) return max_mem_allocated
diffusers/tests/quantization/utils.py/0
{ "file_path": "diffusers/tests/quantization/utils.py", "repo_id": "diffusers", "token_count": 725 }
201
import gc import tempfile import unittest import torch from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name from diffusers.utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from .single_file_testing_utils import ( SDSingleFileTesterMixin, download_diffusers_config, download_original_config, download_single_file_checkpoint, ) enable_full_determinism() @slow @require_torch_accelerator class StableDiffusionControlNetInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionControlNetInpaintPipeline ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt" original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml" repo_id = "stable-diffusion-v1-5/stable-diffusion-inpainting" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self): control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ).resize((512, 512)) inputs = { "prompt": "bird", "image": image, "control_image": control_image, "mask_image": mask_image, "generator": torch.Generator(device="cpu").manual_seed(0), "num_inference_steps": 3, "output_type": "np", } return inputs def test_single_file_format_inference_is_same_as_pretrained(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet, safety_checker=None) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload(device=torch_device) pipe_sf = self.pipeline_class.from_single_file(self.ckpt_path, controlnet=controlnet, safety_checker=None) pipe_sf.unet.set_default_attn_processor() pipe_sf.enable_model_cpu_offload(device=torch_device) inputs = self.get_inputs() output = pipe(**inputs).images[0] inputs = self.get_inputs() output_sf = pipe_sf(**inputs).images[0] max_diff = numpy_cosine_similarity_distance(output_sf.flatten(), output.flatten()) assert max_diff < 2e-3 def test_single_file_components(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe = self.pipeline_class.from_pretrained( self.repo_id, variant="fp16", safety_checker=None, controlnet=controlnet ) pipe_single_file = self.pipeline_class.from_single_file( self.ckpt_path, safety_checker=None, controlnet=controlnet, ) super()._compare_component_configs(pipe, pipe_single_file) def test_single_file_components_local_files_only(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe = self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None, controlnet=controlnet) with tempfile.TemporaryDirectory() as tmpdir: repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) pipe_single_file = self.pipeline_class.from_single_file( local_ckpt_path, controlnet=controlnet, safety_checker=None, local_files_only=True ) super()._compare_component_configs(pipe, pipe_single_file) @unittest.skip("runwayml original config repo does not exist") def test_single_file_components_with_original_config(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16") pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) pipe_single_file = self.pipeline_class.from_single_file( self.ckpt_path, controlnet=controlnet, original_config=self.original_config ) super()._compare_component_configs(pipe, pipe_single_file) @unittest.skip("runwayml original config repo does not exist") def test_single_file_components_with_original_config_local_files_only(self): controlnet = ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" ) pipe = self.pipeline_class.from_pretrained( self.repo_id, controlnet=controlnet, safety_checker=None, ) with tempfile.TemporaryDirectory() as tmpdir: repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) local_original_config = download_original_config(self.original_config, tmpdir) pipe_single_file = self.pipeline_class.from_single_file( local_ckpt_path, original_config=local_original_config, controlnet=controlnet, safety_checker=None, local_files_only=True, ) super()._compare_component_configs(pipe, pipe_single_file) def test_single_file_components_with_diffusers_config(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16") pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) pipe_single_file = self.pipeline_class.from_single_file( self.ckpt_path, controlnet=controlnet, config=self.repo_id, ) super()._compare_component_configs(pipe, pipe_single_file) def test_single_file_components_with_diffusers_config_local_files_only(self): controlnet = ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16", ) pipe = self.pipeline_class.from_pretrained( self.repo_id, controlnet=controlnet, safety_checker=None, ) with tempfile.TemporaryDirectory() as tmpdir: repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) local_diffusers_config = download_diffusers_config(self.repo_id, tmpdir) pipe_single_file = self.pipeline_class.from_single_file( local_ckpt_path, config=local_diffusers_config, controlnet=controlnet, safety_checker=None, local_files_only=True, ) super()._compare_component_configs(pipe, pipe_single_file) def test_single_file_setting_pipeline_dtype_to_fp16(self): controlnet = ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" ) single_file_pipe = self.pipeline_class.from_single_file( self.ckpt_path, controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 ) super().test_single_file_setting_pipeline_dtype_to_fp16(single_file_pipe)
diffusers/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py", "repo_id": "diffusers", "token_count": 3691 }
202
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect import os import re import warnings from collections import OrderedDict from difflib import get_close_matches from pathlib import Path from diffusers.models.auto import get_values from diffusers.utils import ENV_VARS_TRUE_VALUES, is_flax_available, is_torch_available # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_repo.py PATH_TO_DIFFUSERS = "src/diffusers" PATH_TO_TESTS = "tests" PATH_TO_DOC = "docs/source/en" # Update this list with models that are supposed to be private. PRIVATE_MODELS = [ "DPRSpanPredictor", "RealmBertModel", "T5Stack", "TFDPRSpanPredictor", ] # Update this list for models that are not tested with a comment explaining the reason it should not be. # Being in this list is an exception and should **not** be the rule. IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [ # models to ignore for not tested "OPTDecoder", # Building part of bigger (tested) model. "DecisionTransformerGPT2Model", # Building part of bigger (tested) model. "SegformerDecodeHead", # Building part of bigger (tested) model. "PLBartEncoder", # Building part of bigger (tested) model. "PLBartDecoder", # Building part of bigger (tested) model. "PLBartDecoderWrapper", # Building part of bigger (tested) model. "BigBirdPegasusEncoder", # Building part of bigger (tested) model. "BigBirdPegasusDecoder", # Building part of bigger (tested) model. "BigBirdPegasusDecoderWrapper", # Building part of bigger (tested) model. "DetrEncoder", # Building part of bigger (tested) model. "DetrDecoder", # Building part of bigger (tested) model. "DetrDecoderWrapper", # Building part of bigger (tested) model. "M2M100Encoder", # Building part of bigger (tested) model. "M2M100Decoder", # Building part of bigger (tested) model. "Speech2TextEncoder", # Building part of bigger (tested) model. "Speech2TextDecoder", # Building part of bigger (tested) model. "LEDEncoder", # Building part of bigger (tested) model. "LEDDecoder", # Building part of bigger (tested) model. "BartDecoderWrapper", # Building part of bigger (tested) model. "BartEncoder", # Building part of bigger (tested) model. "BertLMHeadModel", # Needs to be setup as decoder. "BlenderbotSmallEncoder", # Building part of bigger (tested) model. "BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model. "BlenderbotEncoder", # Building part of bigger (tested) model. "BlenderbotDecoderWrapper", # Building part of bigger (tested) model. "MBartEncoder", # Building part of bigger (tested) model. "MBartDecoderWrapper", # Building part of bigger (tested) model. "MegatronBertLMHeadModel", # Building part of bigger (tested) model. "MegatronBertEncoder", # Building part of bigger (tested) model. "MegatronBertDecoder", # Building part of bigger (tested) model. "MegatronBertDecoderWrapper", # Building part of bigger (tested) model. "PegasusEncoder", # Building part of bigger (tested) model. "PegasusDecoderWrapper", # Building part of bigger (tested) model. "DPREncoder", # Building part of bigger (tested) model. "ProphetNetDecoderWrapper", # Building part of bigger (tested) model. "RealmBertModel", # Building part of bigger (tested) model. "RealmReader", # Not regular model. "RealmScorer", # Not regular model. "RealmForOpenQA", # Not regular model. "ReformerForMaskedLM", # Needs to be setup as decoder. "Speech2Text2DecoderWrapper", # Building part of bigger (tested) model. "TFDPREncoder", # Building part of bigger (tested) model. "TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFModelMixin ?) "TFRobertaForMultipleChoice", # TODO: fix "TrOCRDecoderWrapper", # Building part of bigger (tested) model. "SeparableConv1D", # Building part of bigger (tested) model. "FlaxBartForCausalLM", # Building part of bigger (tested) model. "FlaxBertForCausalLM", # Building part of bigger (tested) model. Tested implicitly through FlaxRobertaForCausalLM. "OPTDecoderWrapper", ] # Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't # trigger the common tests. TEST_FILES_WITH_NO_COMMON_TESTS = [ "models/decision_transformer/test_modeling_decision_transformer.py", "models/camembert/test_modeling_camembert.py", "models/mt5/test_modeling_flax_mt5.py", "models/mbart/test_modeling_mbart.py", "models/mt5/test_modeling_mt5.py", "models/pegasus/test_modeling_pegasus.py", "models/camembert/test_modeling_tf_camembert.py", "models/mt5/test_modeling_tf_mt5.py", "models/xlm_roberta/test_modeling_tf_xlm_roberta.py", "models/xlm_roberta/test_modeling_flax_xlm_roberta.py", "models/xlm_prophetnet/test_modeling_xlm_prophetnet.py", "models/xlm_roberta/test_modeling_xlm_roberta.py", "models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py", "models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py", "models/decision_transformer/test_modeling_decision_transformer.py", ] # Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and # should **not** be the rule. IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [ # models to ignore for model xxx mapping "DPTForDepthEstimation", "DecisionTransformerGPT2Model", "GLPNForDepthEstimation", "ViltForQuestionAnswering", "ViltForImagesAndTextClassification", "ViltForImageAndTextRetrieval", "ViltForMaskedLM", "XGLMEncoder", "XGLMDecoder", "XGLMDecoderWrapper", "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "SegformerDecodeHead", "FlaxBeitForMaskedImageModeling", "PLBartEncoder", "PLBartDecoder", "PLBartDecoderWrapper", "BeitForMaskedImageModeling", "CLIPTextModel", "CLIPVisionModel", "TFCLIPTextModel", "TFCLIPVisionModel", "FlaxCLIPTextModel", "FlaxCLIPVisionModel", "FlaxWav2Vec2ForCTC", "DetrForSegmentation", "DPRReader", "FlaubertForQuestionAnswering", "FlavaImageCodebook", "FlavaTextModel", "FlavaImageModel", "FlavaMultimodalModel", "GPT2DoubleHeadsModel", "LukeForMaskedLM", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "OpenAIGPTDoubleHeadsModel", "RagModel", "RagSequenceForGeneration", "RagTokenForGeneration", "RealmEmbedder", "RealmForOpenQA", "RealmScorer", "RealmReader", "TFDPRReader", "TFGPT2DoubleHeadsModel", "TFOpenAIGPTDoubleHeadsModel", "TFRagModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", "Wav2Vec2ForCTC", "HubertForCTC", "SEWForCTC", "SEWDForCTC", "XLMForQuestionAnswering", "XLNetForQuestionAnswering", "SeparableConv1D", "VisualBertForRegionToPhraseAlignment", "VisualBertForVisualReasoning", "VisualBertForQuestionAnswering", "VisualBertForMultipleChoice", "TFWav2Vec2ForCTC", "TFHubertForCTC", "MaskFormerForInstanceSegmentation", ] # Update this list for models that have multiple model types for the same # model doc MODEL_TYPE_TO_DOC_MAPPING = OrderedDict( [ ("data2vec-text", "data2vec"), ("data2vec-audio", "data2vec"), ("data2vec-vision", "data2vec"), ] ) # This is to make sure the transformers module imported is the one in the repo. spec = importlib.util.spec_from_file_location( "diffusers", os.path.join(PATH_TO_DIFFUSERS, "__init__.py"), submodule_search_locations=[PATH_TO_DIFFUSERS], ) diffusers = spec.loader.load_module() def check_model_list(): """Check the model list inside the transformers library.""" # Get the models from the directory structure of `src/diffusers/models/` models_dir = os.path.join(PATH_TO_DIFFUSERS, "models") _models = [] for model in os.listdir(models_dir): model_dir = os.path.join(models_dir, model) if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir): _models.append(model) # Get the models from the directory structure of `src/transformers/models/` models = [model for model in dir(diffusers.models) if not model.startswith("__")] missing_models = sorted(set(_models).difference(models)) if missing_models: raise Exception( f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}." ) # If some modeling modules should be ignored for all checks, they should be added in the nested list # _ignore_modules of this function. def get_model_modules(): """Get the model modules inside the transformers library.""" _ignore_modules = [ "modeling_auto", "modeling_encoder_decoder", "modeling_marian", "modeling_mmbt", "modeling_outputs", "modeling_retribert", "modeling_utils", "modeling_flax_auto", "modeling_flax_encoder_decoder", "modeling_flax_utils", "modeling_speech_encoder_decoder", "modeling_flax_speech_encoder_decoder", "modeling_flax_vision_encoder_decoder", "modeling_transfo_xl_utilities", "modeling_tf_auto", "modeling_tf_encoder_decoder", "modeling_tf_outputs", "modeling_tf_pytorch_utils", "modeling_tf_utils", "modeling_tf_transfo_xl_utilities", "modeling_tf_vision_encoder_decoder", "modeling_vision_encoder_decoder", ] modules = [] for model in dir(diffusers.models): # There are some magic dunder attributes in the dir, we ignore them if not model.startswith("__"): model_module = getattr(diffusers.models, model) for submodule in dir(model_module): if submodule.startswith("modeling") and submodule not in _ignore_modules: modeling_module = getattr(model_module, submodule) if inspect.ismodule(modeling_module): modules.append(modeling_module) return modules def get_models(module, include_pretrained=False): """Get the objects in module that are models.""" models = [] model_classes = (diffusers.ModelMixin, diffusers.TFModelMixin, diffusers.FlaxModelMixin) for attr_name in dir(module): if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name): continue attr = getattr(module, attr_name) if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__: models.append((attr_name, attr)) return models def is_a_private_model(model): """Returns True if the model should not be in the main init.""" if model in PRIVATE_MODELS: return True # Wrapper, Encoder and Decoder are all privates if model.endswith("Wrapper"): return True if model.endswith("Encoder"): return True if model.endswith("Decoder"): return True return False def check_models_are_in_init(): """Checks all models defined in the library are in the main init.""" models_not_in_init = [] dir_transformers = dir(diffusers) for module in get_model_modules(): models_not_in_init += [ model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers ] # Remove private models models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)] if len(models_not_in_init) > 0: raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.") # If some test_modeling files should be ignored when checking models are all tested, they should be added in the # nested list _ignore_files of this function. def get_model_test_files(): """Get the model test files. The returned files should NOT contain the `tests` (i.e. `PATH_TO_TESTS` defined in this script). They will be considered as paths relative to `tests`. A caller has to use `os.path.join(PATH_TO_TESTS, ...)` to access the files. """ _ignore_files = [ "test_modeling_common", "test_modeling_encoder_decoder", "test_modeling_flax_encoder_decoder", "test_modeling_flax_speech_encoder_decoder", "test_modeling_marian", "test_modeling_tf_common", "test_modeling_tf_encoder_decoder", ] test_files = [] # Check both `PATH_TO_TESTS` and `PATH_TO_TESTS/models` model_test_root = os.path.join(PATH_TO_TESTS, "models") model_test_dirs = [] for x in os.listdir(model_test_root): x = os.path.join(model_test_root, x) if os.path.isdir(x): model_test_dirs.append(x) for target_dir in [PATH_TO_TESTS] + model_test_dirs: for file_or_dir in os.listdir(target_dir): path = os.path.join(target_dir, file_or_dir) if os.path.isfile(path): filename = os.path.split(path)[-1] if "test_modeling" in filename and os.path.splitext(filename)[0] not in _ignore_files: file = os.path.join(*path.split(os.sep)[1:]) test_files.append(file) return test_files # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class # for the all_model_classes variable. def find_tested_models(test_file): """Parse the content of test_file to detect what's in all_model_classes""" # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f: content = f.read() all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content) # Check with one less parenthesis as well all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content) if len(all_models) > 0: model_tested = [] for entry in all_models: for line in entry.split(","): name = line.strip() if len(name) > 0: model_tested.append(name) return model_tested def check_models_are_tested(module, test_file): """Check models defined in module are tested in test_file.""" # XxxModelMixin are not tested defined_models = get_models(module) tested_models = find_tested_models(test_file) if tested_models is None: if test_file.replace(os.path.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS: return return [ f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. " + "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file " + "`utils/check_repo.py`." ] failures = [] for model_name, _ in defined_models: if model_name not in tested_models and model_name not in IGNORE_NON_TESTED: failures.append( f"{model_name} is defined in {module.__name__} but is not tested in " + f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file." + "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`" + "in the file `utils/check_repo.py`." ) return failures def check_all_models_are_tested(): """Check all models are properly tested.""" modules = get_model_modules() test_files = get_model_test_files() failures = [] for module in modules: test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file] if len(test_file) == 0: failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.") elif len(test_file) > 1: failures.append(f"{module.__name__} has several test files: {test_file}.") else: test_file = test_file[0] new_failures = check_models_are_tested(module, test_file) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def get_all_auto_configured_models(): """Return the list of all models in at least one auto class.""" result = set() # To avoid duplicates we concatenate all model classes in a set. if is_torch_available(): for attr_name in dir(diffusers.models.auto.modeling_auto): if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(diffusers.models.auto.modeling_auto, attr_name))) if is_flax_available(): for attr_name in dir(diffusers.models.auto.modeling_flax_auto): if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"): result = result | set(get_values(getattr(diffusers.models.auto.modeling_flax_auto, attr_name))) return list(result) def ignore_unautoclassed(model_name): """Rules to determine if `name` should be in an auto class.""" # Special white list if model_name in IGNORE_NON_AUTO_CONFIGURED: return True # Encoder and Decoder should be ignored if "Encoder" in model_name or "Decoder" in model_name: return True return False def check_models_are_auto_configured(module, all_auto_models): """Check models defined in module are each in an auto class.""" defined_models = get_models(module) failures = [] for model_name, _ in defined_models: if model_name not in all_auto_models and not ignore_unautoclassed(model_name): failures.append( f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. " "If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file " "`utils/check_repo.py`." ) return failures def check_all_models_are_auto_configured(): """Check all models are each in an auto class.""" missing_backends = [] if not is_torch_available(): missing_backends.append("PyTorch") if not is_flax_available(): missing_backends.append("Flax") if len(missing_backends) > 0: missing = ", ".join(missing_backends) if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: raise Exception( "Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the " f"Transformers repo, the following are missing: {missing}." ) else: warnings.warn( "Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the " f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you " "didn't make any change in one of those backends modeling files, you should probably execute the " "command above to be on the safe side." ) modules = get_model_modules() all_auto_models = get_all_auto_configured_models() failures = [] for module in modules: new_failures = check_models_are_auto_configured(module, all_auto_models) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) _re_decorator = re.compile(r"^\s*@(\S+)\s+$") def check_decorator_order(filename): """Check that in the test file `filename` the slow decorator is always last.""" with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() decorator_before = None errors = [] for i, line in enumerate(lines): search = _re_decorator.search(line) if search is not None: decorator_name = search.groups()[0] if decorator_before is not None and decorator_name.startswith("parameterized"): errors.append(i) decorator_before = decorator_name elif decorator_before is not None: decorator_before = None return errors def check_all_decorator_order(): """Check that in all test files, the slow decorator is always last.""" errors = [] for fname in os.listdir(PATH_TO_TESTS): if fname.endswith(".py"): filename = os.path.join(PATH_TO_TESTS, fname) new_errors = check_decorator_order(filename) errors += [f"- {filename}, line {i}" for i in new_errors] if len(errors) > 0: msg = "\n".join(errors) raise ValueError( "The parameterized decorator (and its variants) should always be first, but this is not the case in the" f" following files:\n{msg}" ) def find_all_documented_objects(): """Parse the content of all doc files to detect which classes and functions it documents""" documented_obj = [] for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"): with open(doc_file, "r", encoding="utf-8", newline="\n") as f: content = f.read() raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content) documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] for doc_file in Path(PATH_TO_DOC).glob("**/*.md"): with open(doc_file, "r", encoding="utf-8", newline="\n") as f: content = f.read() raw_doc_objs = re.findall(r"\[\[autodoc\]\]\s+(\S+)\s+", content) documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] return documented_obj # One good reason for not being documented is to be deprecated. Put in this list deprecated objects. DEPRECATED_OBJECTS = [ "AutoModelWithLMHead", "BartPretrainedModel", "DataCollator", "DataCollatorForSOP", "GlueDataset", "GlueDataTrainingArguments", "LineByLineTextDataset", "LineByLineWithRefDataset", "LineByLineWithSOPTextDataset", "PretrainedBartModel", "PretrainedFSMTModel", "SingleSentenceClassificationProcessor", "SquadDataTrainingArguments", "SquadDataset", "SquadExample", "SquadFeatures", "SquadV1Processor", "SquadV2Processor", "TFAutoModelWithLMHead", "TFBartPretrainedModel", "TextDataset", "TextDatasetForNextSentencePrediction", "Wav2Vec2ForMaskedLM", "Wav2Vec2Tokenizer", "glue_compute_metrics", "glue_convert_examples_to_features", "glue_output_modes", "glue_processors", "glue_tasks_num_labels", "squad_convert_examples_to_features", "xnli_compute_metrics", "xnli_output_modes", "xnli_processors", "xnli_tasks_num_labels", "TFTrainer", "TFTrainingArguments", ] # Exceptionally, some objects should not be documented after all rules passed. # ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT! UNDOCUMENTED_OBJECTS = [ "AddedToken", # This is a tokenizers class. "BasicTokenizer", # Internal, should never have been in the main init. "CharacterTokenizer", # Internal, should never have been in the main init. "DPRPretrainedReader", # Like an Encoder. "DummyObject", # Just picked by mistake sometimes. "MecabTokenizer", # Internal, should never have been in the main init. "ModelCard", # Internal type. "SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer) "TFDPRPretrainedReader", # Like an Encoder. "TransfoXLCorpus", # Internal type. "WordpieceTokenizer", # Internal, should never have been in the main init. "absl", # External module "add_end_docstrings", # Internal, should never have been in the main init. "add_start_docstrings", # Internal, should never have been in the main init. "cached_path", # Internal used for downloading models. "convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights "logger", # Internal logger "logging", # External module "requires_backends", # Internal function ] # This list should be empty. Objects in it should get their own doc page. SHOULD_HAVE_THEIR_OWN_PAGE = [ # Benchmarks "PyTorchBenchmark", "PyTorchBenchmarkArguments", "TensorFlowBenchmark", "TensorFlowBenchmarkArguments", ] def ignore_undocumented(name): """Rules to determine if `name` should be undocumented.""" # NOT DOCUMENTED ON PURPOSE. # Constants uppercase are not documented. if name.isupper(): return True # ModelMixins / Encoders / Decoders / Layers / Embeddings / Attention are not documented. if ( name.endswith("ModelMixin") or name.endswith("Decoder") or name.endswith("Encoder") or name.endswith("Layer") or name.endswith("Embeddings") or name.endswith("Attention") ): return True # Submodules are not documented. if os.path.isdir(os.path.join(PATH_TO_DIFFUSERS, name)) or os.path.isfile( os.path.join(PATH_TO_DIFFUSERS, f"{name}.py") ): return True # All load functions are not documented. if name.startswith("load_tf") or name.startswith("load_pytorch"): return True # is_xxx_available functions are not documented. if name.startswith("is_") and name.endswith("_available"): return True # Deprecated objects are not documented. if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS: return True # MMBT model does not really work. if name.startswith("MMBT"): return True if name in SHOULD_HAVE_THEIR_OWN_PAGE: return True return False def check_all_objects_are_documented(): """Check all models are properly documented.""" documented_objs = find_all_documented_objects() modules = diffusers._modules objects = [c for c in dir(diffusers) if c not in modules and not c.startswith("_")] undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)] if len(undocumented_objs) > 0: raise Exception( "The following objects are in the public init so should be documented:\n - " + "\n - ".join(undocumented_objs) ) check_docstrings_are_in_md() check_model_type_doc_match() def check_model_type_doc_match(): """Check all doc pages have a corresponding model type.""" model_doc_folder = Path(PATH_TO_DOC) / "model_doc" model_docs = [m.stem for m in model_doc_folder.glob("*.md")] model_types = list(diffusers.models.auto.configuration_auto.MODEL_NAMES_MAPPING.keys()) model_types = [MODEL_TYPE_TO_DOC_MAPPING[m] if m in MODEL_TYPE_TO_DOC_MAPPING else m for m in model_types] errors = [] for m in model_docs: if m not in model_types and m != "auto": close_matches = get_close_matches(m, model_types) error_message = f"{m} is not a proper model identifier." if len(close_matches) > 0: close_matches = "/".join(close_matches) error_message += f" Did you mean {close_matches}?" errors.append(error_message) if len(errors) > 0: raise ValueError( "Some model doc pages do not match any existing model type:\n" + "\n".join(errors) + "\nYou can add any missing model type to the `MODEL_NAMES_MAPPING` constant in " "models/auto/configuration_auto.py." ) # Re pattern to catch :obj:`xx`, :class:`xx`, :func:`xx` or :meth:`xx`. _re_rst_special_words = re.compile(r":(?:obj|func|class|meth):`([^`]+)`") # Re pattern to catch things between double backquotes. _re_double_backquotes = re.compile(r"(^|[^`])``([^`]+)``([^`]|$)") # Re pattern to catch example introduction. _re_rst_example = re.compile(r"^\s*Example.*::\s*$", flags=re.MULTILINE) def is_rst_docstring(docstring): """ Returns `True` if `docstring` is written in rst. """ if _re_rst_special_words.search(docstring) is not None: return True if _re_double_backquotes.search(docstring) is not None: return True if _re_rst_example.search(docstring) is not None: return True return False def check_docstrings_are_in_md(): """Check all docstrings are in md""" files_with_rst = [] for file in Path(PATH_TO_DIFFUSERS).glob("**/*.py"): with open(file, "r") as f: code = f.read() docstrings = code.split('"""') for idx, docstring in enumerate(docstrings): if idx % 2 == 0 or not is_rst_docstring(docstring): continue files_with_rst.append(file) break if len(files_with_rst) > 0: raise ValueError( "The following files have docstrings written in rst:\n" + "\n".join([f"- {f}" for f in files_with_rst]) + "\nTo fix this run `doc-builder convert path_to_py_file` after installing `doc-builder`\n" "(`pip install git+https://github.com/huggingface/doc-builder`)" ) def check_repo_quality(): """Check all models are properly tested and documented.""" print("Checking all models are included.") check_model_list() print("Checking all models are public.") check_models_are_in_init() print("Checking all models are properly tested.") check_all_decorator_order() check_all_models_are_tested() print("Checking all objects are properly documented.") check_all_objects_are_documented() print("Checking all models are in at least one auto class.") check_all_models_are_auto_configured() if __name__ == "__main__": check_repo_quality()
diffusers/utils/check_repo.py/0
{ "file_path": "diffusers/utils/check_repo.py", "repo_id": "diffusers", "token_count": 12370 }
203
# Copyright 2025 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ import os from datetime import datetime as dt from datetime import timezone from github import Github LABELS_TO_EXEMPT = [ "close-to-merge", "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def main(): g = Github(os.environ["GITHUB_TOKEN"]) repo = g.get_repo("huggingface/diffusers") open_issues = repo.get_issues(state="open") for issue in open_issues: labels = [label.name.lower() for label in issue.get_labels()] if "stale" in labels: comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None if last_comment is not None and last_comment.user.login != "github-actions[bot]": # Opens the issue if someone other than Stalebot commented. issue.edit(state="open") issue.remove_from_labels("stale") elif ( (dt.now(timezone.utc) - issue.updated_at).days > 23 and (dt.now(timezone.utc) - issue.created_at).days >= 30 and not any(label in LABELS_TO_EXEMPT for label in labels) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale") if __name__ == "__main__": main()
diffusers/utils/stale.py/0
{ "file_path": "diffusers/utils/stale.py", "repo_id": "diffusers", "token_count": 1008 }
204
<p align="center"> <img alt="LeRobot, Hugging Face Robotics Library" src="https://raw.githubusercontent.com/huggingface/lerobot/main/media/lerobot-logo-thumbnail.png" width="100%"> <br/> <br/> </p> <div align="center"> [![Tests](https://github.com/huggingface/lerobot/actions/workflows/nightly.yml/badge.svg?branch=main)](https://github.com/huggingface/lerobot/actions/workflows/nightly.yml?query=branch%3Amain) [![Python versions](https://img.shields.io/pypi/pyversions/lerobot)](https://www.python.org/downloads/) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/huggingface/lerobot/blob/main/LICENSE) [![Status](https://img.shields.io/pypi/status/lerobot)](https://pypi.org/project/lerobot/) [![Version](https://img.shields.io/pypi/v/lerobot)](https://pypi.org/project/lerobot/) [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.1-ff69b4.svg)](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md) [![Discord](https://dcbadge.vercel.app/api/server/C5P34WJ68S?style=flat)](https://discord.gg/s3KuuzsPFb) <!-- [![Coverage](https://codecov.io/gh/huggingface/lerobot/branch/main/graph/badge.svg?token=TODO)](https://codecov.io/gh/huggingface/lerobot) --> </div> <h2 align="center"> <p><a href="https://huggingface.co/docs/lerobot/hope_jr"> Build Your Own HopeJR Robot!</a></p> </h2> <div align="center"> <img src="https://raw.githubusercontent.com/huggingface/lerobot/main/media/hope_jr/hopejr.png" alt="HopeJR robot" title="HopeJR robot" width="60%" /> <p><strong>Meet HopeJR – A humanoid robot arm and hand for dexterous manipulation!</strong></p> <p>Control it with exoskeletons and gloves for precise hand movements.</p> <p>Perfect for advanced manipulation tasks! 🤖</p> <p><a href="https://huggingface.co/docs/lerobot/hope_jr"> See the full HopeJR tutorial here.</a></p> </div> <br/> <h2 align="center"> <p><a href="https://huggingface.co/docs/lerobot/so101"> Build Your Own SO-101 Robot!</a></p> </h2> <div align="center"> <table> <tr> <td align="center"><img src="https://raw.githubusercontent.com/huggingface/lerobot/main/media/so101/so101.webp" alt="SO-101 follower arm" title="SO-101 follower arm" width="90%"/></td> <td align="center"><img src="https://raw.githubusercontent.com/huggingface/lerobot/main/media/so101/so101-leader.webp" alt="SO-101 leader arm" title="SO-101 leader arm" width="90%"/></td> </tr> </table> <p><strong>Meet the updated SO100, the SO-101 – Just €114 per arm!</strong></p> <p>Train it in minutes with a few simple moves on your laptop.</p> <p>Then sit back and watch your creation act autonomously! 🤯</p> <p><a href="https://huggingface.co/docs/lerobot/so101"> See the full SO-101 tutorial here.</a></p> <p>Want to take it to the next level? Make your SO-101 mobile by building LeKiwi!</p> <p>Check out the <a href="https://huggingface.co/docs/lerobot/lekiwi">LeKiwi tutorial</a> and bring your robot to life on wheels.</p> <img src="https://raw.githubusercontent.com/huggingface/lerobot/main/media/lekiwi/kiwi.webp" alt="LeKiwi mobile robot" title="LeKiwi mobile robot" width="50%"> </div> <br/> <h3 align="center"> <p>LeRobot: State-of-the-art AI for real-world robotics</p> </h3> --- 🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models. 🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning. 🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulation environments to get started without assembling a robot. In the coming weeks, the plan is to add more and more support for real-world robotics on the most affordable and capable robots out there. 🤗 LeRobot hosts pretrained models and datasets on this Hugging Face community page: [huggingface.co/lerobot](https://huggingface.co/lerobot) #### Examples of pretrained models on simulation environments <table> <tr> <td><img src="https://raw.githubusercontent.com/huggingface/lerobot/main/media/gym/aloha_act.gif" width="100%" alt="ACT policy on ALOHA env"/></td> <td><img src="https://raw.githubusercontent.com/huggingface/lerobot/main/media/gym/simxarm_tdmpc.gif" width="100%" alt="TDMPC policy on SimXArm env"/></td> <td><img src="https://raw.githubusercontent.com/huggingface/lerobot/main/media/gym/pusht_diffusion.gif" width="100%" alt="Diffusion policy on PushT env"/></td> </tr> <tr> <td align="center">ACT policy on ALOHA env</td> <td align="center">TDMPC policy on SimXArm env</td> <td align="center">Diffusion policy on PushT env</td> </tr> </table> ## Installation LeRobot works with Python 3.10+ and PyTorch 2.2+. ### Environment Setup Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniconda`](https://docs.anaconda.com/free/miniconda/index.html): ```bash conda create -y -n lerobot python=3.10 conda activate lerobot ``` When using `miniconda`, install `ffmpeg` in your environment: ```bash conda install ffmpeg -c conda-forge ``` > **NOTE:** This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can: > > - _[On any platform]_ Explicitly install `ffmpeg 7.X` using: > > ```bash > conda install ffmpeg=7.1.1 -c conda-forge > ``` > > - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`. ### Install LeRobot 🤗 #### From Source First, clone the repository and navigate into the directory: ```bash git clone https://github.com/huggingface/lerobot.git cd lerobot ``` Then, install the library in editable mode. This is useful if you plan to contribute to the code. ```bash pip install -e . ``` > **NOTE:** If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run: > `sudo apt-get install cmake build-essential python3-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg) For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras: - [aloha](https://github.com/huggingface/gym-aloha) - [xarm](https://github.com/huggingface/gym-xarm) - [pusht](https://github.com/huggingface/gym-pusht) For instance, to install 🤗 LeRobot with aloha and pusht, use: ```bash pip install -e ".[aloha, pusht]" ``` ### Installation from PyPI **Core Library:** Install the base package with: ```bash pip install lerobot ``` _This installs only the default dependencies._ **Extra Features:** To install additional functionality, use one of the following: ```bash pip install 'lerobot[all]' # All available features pip install 'lerobot[aloha,pusht]' # Specific features (Aloha & Pusht) pip install 'lerobot[feetech]' # Feetech motor support ``` _Replace `[...]` with your desired features._ **Available Tags:** For a full list of optional dependencies, see: https://pypi.org/project/lerobot/ ### Weights & Biases To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with ```bash wandb login ``` (note: you will also need to enable WandB in the configuration. See below.) ### Visualize datasets Check out [example 1](https://github.com/huggingface/lerobot/blob/main/examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically downloads data from the Hugging Face hub. You can also locally visualize episodes from a dataset on the hub by executing our script from the command line: ```bash python -m lerobot.scripts.visualize_dataset \ --repo-id lerobot/pusht \ --episode-index 0 ``` or from a dataset in a local folder with the `root` option and the `--local-files-only` (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`) ```bash python -m lerobot.scripts.visualize_dataset \ --repo-id lerobot/pusht \ --root ./my_local_data_dir \ --local-files-only 1 \ --episode-index 0 ``` It will open `rerun.io` and display the camera streams, robot states and actions, like this: https://github-production-user-asset-6210df.s3.amazonaws.com/4681518/328035972-fd46b787-b532-47e2-bb6f-fd536a55a7ed.mov?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20240505%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240505T172924Z&X-Amz-Expires=300&X-Amz-Signature=d680b26c532eeaf80740f08af3320d22ad0b8a4e4da1bcc4f33142c15b509eda&X-Amz-SignedHeaders=host&actor_id=24889239&key_id=0&repo_id=748713144 Our script can also visualize datasets stored on a distant server. See `python -m lerobot.scripts.visualize_dataset --help` for more instructions. ### The `LeRobotDataset` format A dataset in `LeRobotDataset` format is very simple to use. It can be loaded from a repository on the Hugging Face hub or a local folder simply with e.g. `dataset = LeRobotDataset("lerobot/aloha_static_coffee")` and can be indexed into like any Hugging Face and PyTorch dataset. For instance `dataset[0]` will retrieve a single temporal frame from the dataset containing observation(s) and an action as PyTorch tensors ready to be fed to a model. A specificity of `LeRobotDataset` is that, rather than retrieving a single frame by its index, we can retrieve several frames based on their temporal relationship with the indexed frame, by setting `delta_timestamps` to a list of relative times with respect to the indexed frame. For example, with `delta_timestamps = {"observation.image": [-1, -0.5, -0.2, 0]}` one can retrieve, for a given index, 4 frames: 3 "previous" frames 1 second, 0.5 seconds, and 0.2 seconds before the indexed frame, and the indexed frame itself (corresponding to the 0 entry). See example [1_load_lerobot_dataset.py](https://github.com/huggingface/lerobot/blob/main/examples/1_load_lerobot_dataset.py) for more details on `delta_timestamps`. Under the hood, the `LeRobotDataset` format makes use of several ways to serialize data which can be useful to understand if you plan to work more closely with this format. We tried to make a flexible yet simple dataset format that would cover most type of features and specificities present in reinforcement learning and robotics, in simulation and in real-world, with a focus on cameras and robot states but easily extended to other types of sensory inputs as long as they can be represented by a tensor. Here are the important details and internal structure organization of a typical `LeRobotDataset` instantiated with `dataset = LeRobotDataset("lerobot/aloha_static_coffee")`. The exact features will change from dataset to dataset but not the main aspects: ``` dataset attributes: ├ hf_dataset: a Hugging Face dataset (backed by Arrow/parquet). Typical features example: │ ├ observation.images.cam_high (VideoFrame): │ │ VideoFrame = {'path': path to a mp4 video, 'timestamp' (float32): timestamp in the video} │ ├ observation.state (list of float32): position of an arm joints (for instance) │ ... (more observations) │ ├ action (list of float32): goal position of an arm joints (for instance) │ ├ episode_index (int64): index of the episode for this sample │ ├ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode │ ├ timestamp (float32): timestamp in the episode │ ├ next.done (bool): indicates the end of an episode ; True for the last frame in each episode │ └ index (int64): general index in the whole dataset ├ episode_data_index: contains 2 tensors with the start and end indices of each episode │ ├ from (1D int64 tensor): first frame index for each episode — shape (num episodes,) starts with 0 │ └ to: (1D int64 tensor): last frame index for each episode — shape (num episodes,) ├ stats: a dictionary of statistics (max, mean, min, std) for each feature in the dataset, for instance │ ├ observation.images.cam_high: {'max': tensor with same number of dimensions (e.g. `(c, 1, 1)` for images, `(c,)` for states), etc.} │ ... ├ info: a dictionary of metadata on the dataset │ ├ codebase_version (str): this is to keep track of the codebase version the dataset was created with │ ├ fps (float): frame per second the dataset is recorded/synchronized to │ ├ video (bool): indicates if frames are encoded in mp4 video files to save space or stored as png files │ └ encoding (dict): if video, this documents the main options that were used with ffmpeg to encode the videos ├ videos_dir (Path): where the mp4 videos or png images are stored/accessed └ camera_keys (list of string): the keys to access camera features in the item returned by the dataset (e.g. `["observation.images.cam_high", ...]`) ``` A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely: - hf_dataset stored using Hugging Face datasets library serialization to parquet - videos are stored in mp4 format to save space - metadata are stored in plain json/jsonl files Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location. ### Evaluate a pretrained policy Check out [example 2](https://github.com/huggingface/lerobot/blob/main/examples/2_evaluate_pretrained_policy.py) that illustrates how to download a pretrained policy from Hugging Face hub, and run an evaluation on its corresponding environment. We also provide a more capable script to parallelize the evaluation over multiple environments during the same rollout. Here is an example with a pretrained model hosted on [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht): ```bash lerobot-eval \ --policy.path=lerobot/diffusion_pusht \ --env.type=pusht \ --eval.batch_size=10 \ --eval.n_episodes=10 \ --policy.use_amp=false \ --policy.device=cuda ``` Note: After training your own policy, you can re-evaluate the checkpoints with: ```bash lerobot-eval --policy.path={OUTPUT_DIR}/checkpoints/last/pretrained_model ``` See `lerobot-eval --help` for more instructions. ### Train your own policy Check out [example 3](https://github.com/huggingface/lerobot/blob/main/examples/3_train_policy.py) that illustrates how to train a model using our core library in python, and [example 4](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md) that shows how to use our training script from command line. To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding `--wandb.enable=true`. A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explanation of some commonly used metrics in logs. \<img src="https://raw.githubusercontent.com/huggingface/lerobot/main/media/wandb.png" alt="WandB logs example"\> Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `--eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `lerobot-eval --help` for more instructions. #### Reproduce state-of-the-art (SOTA) We provide some pretrained policies on our [hub page](https://huggingface.co/lerobot) that can achieve state-of-the-art performances. You can reproduce their training by loading the config from their run. Simply running: ```bash lerobot-train --config_path=lerobot/diffusion_pusht ``` reproduces SOTA results for Diffusion Policy on the PushT task. ## Contribute If you would like to contribute to 🤗 LeRobot, please check out our [contribution guide](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md). ### Add a pretrained policy Once you have trained a policy you may upload it to the Hugging Face hub using a hub id that looks like `${hf_user}/${repo_name}` (e.g. [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)). You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain: - `config.json`: A serialized version of the policy configuration (following the policy's dataclass config). - `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format. - `train_config.json`: A consolidated configuration containing all parameters used for training. The policy configuration should match `config.json` exactly. This is useful for anyone who wants to evaluate your policy or for reproducibility. To upload these to the hub, run the following: ```bash huggingface-cli upload ${hf_user}/${repo_name} path/to/pretrained_model ``` See [eval.py](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/eval.py) for an example of how other people may use your policy. ### Acknowledgment - The LeRobot team 🤗 for building SmolVLA [Paper](https://arxiv.org/abs/2506.01844), [Blog](https://huggingface.co/blog/smolvla). - Thanks to Tony Zhao, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io). - Thanks to Cheng Chi, Zhenjia Xu and colleagues for open sourcing Diffusion policy, Pusht environment and datasets, as well as UMI datasets. Ours are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu) and [UMI Gripper](https://umi-gripper.github.io). - Thanks to Nicklas Hansen, Yunhai Feng and colleagues for open sourcing TDMPC policy, Simxarm environments and datasets. Ours are adapted from [TDMPC](https://github.com/nicklashansen/tdmpc) and [FOWM](https://www.yunhaifeng.com/FOWM). - Thanks to Antonio Loquercio and Ashish Kumar for their early support. - Thanks to [Seungjae (Jay) Lee](https://sjlee.cc/), [Mahi Shafiullah](https://mahis.life/) and colleagues for open sourcing [VQ-BeT](https://sjlee.cc/vq-bet/) policy and helping us adapt the codebase to our repository. The policy is adapted from [VQ-BeT repo](https://github.com/jayLEE0301/vq_bet_official). ## Citation If you want, you can cite this work with: ```bibtex @misc{cadene2024lerobot, author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Palma, Steven and Kooijmans, Pepijn and Aractingi, Michel and Shukor, Mustafa and Aubakirova, Dana and Russi, Martino and Capuano, Francesco and Pascal, Caroline and Choghari, Jade and Moss, Jess and Wolf, Thomas}, title = {LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch}, howpublished = "\url{https://github.com/huggingface/lerobot}", year = {2024} } ``` ## Star History [![Star History Chart](https://api.star-history.com/svg?repos=huggingface/lerobot&type=Timeline)](https://star-history.com/#huggingface/lerobot&Timeline)
lerobot/README.md/0
{ "file_path": "lerobot/README.md", "repo_id": "lerobot", "token_count": 6636 }
205
# Imitation Learning on Real-World Robots This tutorial will explain how to train a neural network to control a real robot autonomously. **You'll learn:** 1. How to record and visualize your dataset. 2. How to train a policy using your data and prepare it for evaluation. 3. How to evaluate your policy and visualize the results. By following these steps, you'll be able to replicate tasks, such as picking up a Lego block and placing it in a bin with a high success rate, as shown in the video below. <details> <summary><strong>Video: pickup lego block task</strong></summary> <div class="video-container"> <video controls width="600"> <source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/lerobot_task.mp4" type="video/mp4" /> </video> </div> </details> This tutorial isn’t tied to a specific robot: we walk you through the commands and API snippets you can adapt for any supported platform. During data collection, you’ll use a “teloperation” device, such as a leader arm or keyboard to teleoperate the robot and record its motion trajectories. Once you’ve gathered enough trajectories, you’ll train a neural network to imitate these trajectories and deploy the trained model so your robot can perform the task autonomously. If you run into any issues at any point, jump into our [Discord community](https://discord.com/invite/s3KuuzsPFb) for support. ## Set up and Calibrate If you haven't yet set up and calibrated your robot and teleop device, please do so by following the robot-specific tutorial. ## Teleoperate In this example, we’ll demonstrate how to teleoperate the SO101 robot. For each command, we also provide a corresponding API example. Note that the `id` associated with a robot is used to store the calibration file. It's important to use the same `id` when teleoperating, recording, and evaluating when using the same setup. <hfoptions id="teleoperate_so101"> <hfoption id="Command"> ```bash lerobot-teleoperate \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=my_awesome_follower_arm \ --teleop.type=so101_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ --teleop.id=my_awesome_leader_arm ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python from lerobot.teleoperators.so101_leader import SO101LeaderConfig, SO101Leader from lerobot.robots.so101_follower import SO101FollowerConfig, SO101Follower robot_config = SO101FollowerConfig( port="/dev/tty.usbmodem58760431541", id="my_red_robot_arm", ) teleop_config = SO101LeaderConfig( port="/dev/tty.usbmodem58760431551", id="my_blue_leader_arm", ) robot = SO101Follower(robot_config) teleop_device = SO101Leader(teleop_config) robot.connect() teleop_device.connect() while True: action = teleop_device.get_action() robot.send_action(action) ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> The teleoperate command will automatically: 1. Identify any missing calibrations and initiate the calibration procedure. 2. Connect the robot and teleop device and start teleoperation. ## Cameras To add cameras to your setup, follow this [Guide](./cameras#setup-cameras). ## Teleoperate with cameras With `rerun`, you can teleoperate again while simultaneously visualizing the camera feeds and joint positions. In this example, we’re using the Koch arm. <hfoptions id="teleoperate_koch_camera"> <hfoption id="Command"> ```bash lerobot-teleoperate \ --robot.type=koch_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=my_awesome_follower_arm \ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ --teleop.type=koch_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ --teleop.id=my_awesome_leader_arm \ --display_data=true ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.teleoperators.koch_leader import KochLeaderConfig, KochLeader from lerobot.robots.koch_follower import KochFollowerConfig, KochFollower camera_config = { "front": OpenCVCameraConfig(index_or_path=0, width=1920, height=1080, fps=30) } robot_config = KochFollowerConfig( port="/dev/tty.usbmodem585A0076841", id="my_red_robot_arm", cameras=camera_config ) teleop_config = KochLeaderConfig( port="/dev/tty.usbmodem58760431551", id="my_blue_leader_arm", ) robot = KochFollower(robot_config) teleop_device = KochLeader(teleop_config) robot.connect() teleop_device.connect() while True: observation = robot.get_observation() action = teleop_device.get_action() robot.send_action(action) ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> ## Record a dataset Once you're familiar with teleoperation, you can record your first dataset. We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens). Add your token to the CLI by running this command: ```bash huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential ``` Then store your Hugging Face repository name in a variable: ```bash HF_USER=$(huggingface-cli whoami | head -n 1) echo $HF_USER ``` Now you can record a dataset. To record 5 episodes and upload your dataset to the hub, adapt the code below for your robot and execute the command or API example. <hfoptions id="record"> <hfoption id="Command"> ```bash lerobot-record \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem585A0076841 \ --robot.id=my_awesome_follower_arm \ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ --teleop.type=so101_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ --teleop.id=my_awesome_leader_arm \ --display_data=true \ --dataset.repo_id=${HF_USER}/record-test \ --dataset.num_episodes=5 \ --dataset.single_task="Grab the black cube" ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets.utils import hw_to_dataset_features from lerobot.robots.so100_follower import SO100Follower, SO100FollowerConfig from lerobot.teleoperators.so100_leader.config_so100_leader import SO100LeaderConfig from lerobot.teleoperators.so100_leader.so100_leader import SO100Leader from lerobot.utils.control_utils import init_keyboard_listener from lerobot.utils.utils import log_say from lerobot.utils.visualization_utils import _init_rerun from lerobot.record import record_loop NUM_EPISODES = 5 FPS = 30 EPISODE_TIME_SEC = 60 RESET_TIME_SEC = 10 TASK_DESCRIPTION = "My task description" # Create the robot and teleoperator configurations camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} robot_config = SO100FollowerConfig( port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", cameras=camera_config ) teleop_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm") # Initialize the robot and teleoperator robot = SO100Follower(robot_config) teleop = SO100Leader(teleop_config) # Configure the dataset features action_features = hw_to_dataset_features(robot.action_features, "action") obs_features = hw_to_dataset_features(robot.observation_features, "observation") dataset_features = {**action_features, **obs_features} # Create the dataset dataset = LeRobotDataset.create( repo_id="<hf_username>/<dataset_repo_id>", fps=FPS, features=dataset_features, robot_type=robot.name, use_videos=True, image_writer_threads=4, ) # Initialize the keyboard listener and rerun visualization _, events = init_keyboard_listener() _init_rerun(session_name="recording") # Connect the robot and teleoperator robot.connect() teleop.connect() episode_idx = 0 while episode_idx < NUM_EPISODES and not events["stop_recording"]: log_say(f"Recording episode {episode_idx + 1} of {NUM_EPISODES}") record_loop( robot=robot, events=events, fps=FPS, teleop=teleop, dataset=dataset, control_time_s=EPISODE_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, ) # Reset the environment if not stopping or re-recording if not events["stop_recording"] and (episode_idx < NUM_EPISODES - 1 or events["rerecord_episode"]): log_say("Reset the environment") record_loop( robot=robot, events=events, fps=FPS, teleop=teleop, control_time_s=RESET_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, ) if events["rerecord_episode"]: log_say("Re-recording episode") events["rerecord_episode"] = False events["exit_early"] = False dataset.clear_episode_buffer() continue dataset.save_episode() episode_idx += 1 # Clean up log_say("Stop recording") robot.disconnect() teleop.disconnect() dataset.push_to_hub() ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> #### Dataset upload Locally, your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}`. At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. `https://huggingface.co/datasets/${HF_USER}/so101_test`) that you can obtain by running: ```bash echo https://huggingface.co/datasets/${HF_USER}/so101_test ``` Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example). You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot). You can also push your local dataset to the Hub manually, running: ```bash huggingface-cli upload ${HF_USER}/record-test ~/.cache/huggingface/lerobot/{repo-id} --repo-type dataset ``` #### Record function The `record` function provides a suite of tools for capturing and managing data during robot operation: ##### 1. Data Storage - Data is stored using the `LeRobotDataset` format and is stored on disk during recording. - By default, the dataset is pushed to your Hugging Face page after recording. - To disable uploading, use `--dataset.push_to_hub=False`. ##### 2. Checkpointing and Resuming - Checkpoints are automatically created during recording. - If an issue occurs, you can resume by re-running the same command with `--resume=true`. When resuming a recording, `--dataset.num_episodes` must be set to the **number of additional episodes to be recorded**, and not to the targeted total number of episodes in the dataset ! - To start recording from scratch, **manually delete** the dataset directory. ##### 3. Recording Parameters Set the flow of data recording using command-line arguments: - `--dataset.episode_time_s=60` Duration of each data recording episode (default: **60 seconds**). - `--dataset.reset_time_s=60` Duration for resetting the environment after each episode (default: **60 seconds**). - `--dataset.num_episodes=50` Total number of episodes to record (default: **50**). ##### 4. Keyboard Controls During Recording Control the data recording flow using keyboard shortcuts: - Press **Right Arrow (`→`)**: Early stop the current episode or reset time and move to the next. - Press **Left Arrow (`←`)**: Cancel the current episode and re-record it. - Press **Escape (`ESC`)**: Immediately stop the session, encode videos, and upload the dataset. #### Tips for gathering data Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images. In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions. Avoid adding too much variation too quickly, as it may hinder your results. If you want to dive deeper into this important topic, you can check out the [blog post](https://huggingface.co/blog/lerobot-datasets#what-makes-a-good-dataset) we wrote on what makes a good dataset. #### Troubleshooting: - On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux). ## Visualize a dataset If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: ```bash echo ${HF_USER}/so101_test ``` ## Replay an episode A useful feature is the `replay` function, which allows you to replay any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model. You can replay the first episode on your robot with either the command below or with the API example: <hfoptions id="replay"> <hfoption id="Command"> ```bash lerobot-replay \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=my_awesome_follower_arm \ --dataset.repo_id=${HF_USER}/record-test \ --dataset.episode=0 # choose the episode you want to replay ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python import time from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig from lerobot.robots.so100_follower.so100_follower import SO100Follower from lerobot.utils.robot_utils import busy_wait from lerobot.utils.utils import log_say episode_idx = 0 robot_config = SO100FollowerConfig(port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm") robot = SO100Follower(robot_config) robot.connect() dataset = LeRobotDataset("<hf_username>/<dataset_repo_id>", episodes=[episode_idx]) actions = dataset.hf_dataset.select_columns("action") log_say(f"Replaying episode {episode_idx}") for idx in range(dataset.num_frames): t0 = time.perf_counter() action = { name: float(actions[idx]["action"][i]) for i, name in enumerate(dataset.features["action"]["names"]) } robot.send_action(action) busy_wait(1.0 / dataset.fps - (time.perf_counter() - t0)) robot.disconnect() ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com). ## Train a policy To train a policy to control your robot, use the [`lerobot-train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash lerobot-train \ --dataset.repo_id=${HF_USER}/so101_test \ --policy.type=act \ --output_dir=outputs/train/act_so101_test \ --job_name=act_so101_test \ --policy.device=cuda \ --wandb.enable=true \ --policy.repo_id=${HF_USER}/my_policy ``` Let's explain the command: 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so101_test`. 2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. 3. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. 4. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. Training should take several hours. You will find checkpoints in `outputs/train/act_so101_test/checkpoints`. To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy: ```bash lerobot-train \ --config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \ --resume=true ``` If you do not want to push your model to the hub after training use `--policy.push_to_hub=false`. Additionally you can provide extra `tags` or specify a `license` for your model or make the model repo `private` by adding this: `--policy.private=true --policy.tags=\[ppo,rl\] --policy.license=mit` #### Train using Google Colab If your local computer doesn't have a powerful GPU you could utilize Google Colab to train your model by following the [ACT training notebook](./notebooks#training-act). #### Upload policy checkpoints Once training is done, upload the latest checkpoint with: ```bash huggingface-cli upload ${HF_USER}/act_so101_test \ outputs/train/act_so101_test/checkpoints/last/pretrained_model ``` You can also upload intermediate checkpoints with: ```bash CKPT=010000 huggingface-cli upload ${HF_USER}/act_so101_test${CKPT} \ outputs/train/act_so101_test/checkpoints/${CKPT}/pretrained_model ``` ## Run inference and evaluate your policy You can use the `record` script from [`lerobot/record.py`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/record.py) with a policy checkpoint as input, to run inference and evaluate your policy. For instance, run this command or API example to run inference and record 10 evaluation episodes: <hfoptions id="eval"> <hfoption id="Command"> ```bash lerobot-record \ --robot.type=so100_follower \ --robot.port=/dev/ttyACM1 \ --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \ --robot.id=my_awesome_follower_arm \ --display_data=false \ --dataset.repo_id=${HF_USER}/eval_so100 \ --dataset.single_task="Put lego brick into the transparent box" \ # <- Teleop optional if you want to teleoperate in between episodes \ # --teleop.type=so100_leader \ # --teleop.port=/dev/ttyACM0 \ # --teleop.id=my_awesome_leader_arm \ --policy.path=${HF_USER}/my_policy ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets.utils import hw_to_dataset_features from lerobot.policies.act.modeling_act import ACTPolicy from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig from lerobot.robots.so100_follower.so100_follower import SO100Follower from lerobot.utils.control_utils import init_keyboard_listener from lerobot.utils.utils import log_say from lerobot.utils.visualization_utils import _init_rerun from lerobot.record import record_loop NUM_EPISODES = 5 FPS = 30 EPISODE_TIME_SEC = 60 TASK_DESCRIPTION = "My task description" # Create the robot configuration camera_config = {"front": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=FPS)} robot_config = SO100FollowerConfig( port="/dev/tty.usbmodem58760434471", id="my_awesome_follower_arm", cameras=camera_config ) # Initialize the robot robot = SO100Follower(robot_config) # Initialize the policy policy = ACTPolicy.from_pretrained("<hf_username>/<my_policy_repo_id>") # Configure the dataset features action_features = hw_to_dataset_features(robot.action_features, "action") obs_features = hw_to_dataset_features(robot.observation_features, "observation") dataset_features = {**action_features, **obs_features} # Create the dataset dataset = LeRobotDataset.create( repo_id="<hf_username>/eval_<dataset_repo_id>", fps=FPS, features=dataset_features, robot_type=robot.name, use_videos=True, image_writer_threads=4, ) # Initialize the keyboard listener and rerun visualization _, events = init_keyboard_listener() _init_rerun(session_name="recording") # Connect the robot robot.connect() for episode_idx in range(NUM_EPISODES): log_say(f"Running inference, recording eval episode {episode_idx + 1} of {NUM_EPISODES}") # Run the policy inference loop record_loop( robot=robot, events=events, fps=FPS, policy=policy, dataset=dataset, control_time_s=EPISODE_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, ) dataset.save_episode() # Clean up robot.disconnect() dataset.push_to_hub() ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: 1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`). 2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`).
lerobot/docs/source/il_robots.mdx/0
{ "file_path": "lerobot/docs/source/il_robots.mdx", "repo_id": "lerobot", "token_count": 7621 }
206
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script demonstrates the use of `LeRobotDataset` class for handling and processing robotic datasets from Hugging Face. It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch. Features included in this script: - Viewing a dataset's metadata and exploring its properties. - Loading an existing dataset from the hub or a subset of it. - Accessing frames by episode number. - Using advanced dataset features like timestamp-based frame selection. - Demonstrating compatibility with PyTorch DataLoader for batch processing. The script ends with examples of how to batch process data using PyTorch's DataLoader. """ from pprint import pprint import torch from huggingface_hub import HfApi import lerobot from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata # We ported a number of existing datasets ourselves, use this to see the list: print("List of available datasets:") pprint(lerobot.available_datasets) # You can also browse through the datasets created/ported by the community on the hub using the hub api: hub_api = HfApi() repo_ids = [info.id for info in hub_api.list_datasets(task_categories="robotics", tags=["LeRobot"])] pprint(repo_ids) # Or simply explore them in your web browser directly at: # https://huggingface.co/datasets?other=LeRobot # Let's take this one for this example repo_id = "lerobot/aloha_mobile_cabinet" # We can have a look and fetch its metadata to know more about it: ds_meta = LeRobotDatasetMetadata(repo_id) # By instantiating just this class, you can quickly access useful information about the content and the # structure of the dataset without downloading the actual data yet (only metadata files — which are # lightweight). print(f"Total number of episodes: {ds_meta.total_episodes}") print(f"Average number of frames per episode: {ds_meta.total_frames / ds_meta.total_episodes:.3f}") print(f"Frames per second used during data collection: {ds_meta.fps}") print(f"Robot type: {ds_meta.robot_type}") print(f"keys to access images from cameras: {ds_meta.camera_keys=}\n") print("Tasks:") print(ds_meta.tasks) print("Features:") pprint(ds_meta.features) # You can also get a short summary by simply printing the object: print(ds_meta) # You can then load the actual dataset from the hub. # Either load any subset of episodes: dataset = LeRobotDataset(repo_id, episodes=[0, 10, 11, 23]) # And see how many frames you have: print(f"Selected episodes: {dataset.episodes}") print(f"Number of episodes selected: {dataset.num_episodes}") print(f"Number of frames selected: {dataset.num_frames}") # Or simply load the entire dataset: dataset = LeRobotDataset(repo_id) print(f"Number of episodes selected: {dataset.num_episodes}") print(f"Number of frames selected: {dataset.num_frames}") # The previous metadata class is contained in the 'meta' attribute of the dataset: print(dataset.meta) # LeRobotDataset actually wraps an underlying Hugging Face dataset # (see https://huggingface.co/docs/datasets for more information). print(dataset.hf_dataset) # LeRobot datasets also subclasses PyTorch datasets so you can do everything you know and love from working # with the latter, like iterating through the dataset. # The __getitem__ iterates over the frames of the dataset. Since our datasets are also structured by # episodes, you can access the frame indices of any episode using the episode_data_index. Here, we access # frame indices associated to the first episode: episode_index = 0 from_idx = dataset.episode_data_index["from"][episode_index].item() to_idx = dataset.episode_data_index["to"][episode_index].item() # Then we grab all the image frames from the first camera: camera_key = dataset.meta.camera_keys[0] frames = [dataset[idx][camera_key] for idx in range(from_idx, to_idx)] # The objects returned by the dataset are all torch.Tensors print(type(frames[0])) print(frames[0].shape) # Since we're using pytorch, the shape is in pytorch, channel-first convention (c, h, w). # We can compare this shape with the information available for that feature pprint(dataset.features[camera_key]) # In particular: print(dataset.features[camera_key]["shape"]) # The shape is in (h, w, c) which is a more universal format. # For many machine learning applications we need to load the history of past observations or trajectories of # future actions. Our datasets can load previous and future frames for each key/modality, using timestamps # differences with the current loaded frame. For instance: delta_timestamps = { # loads 4 images: 1 second before current frame, 500 ms before, 200 ms before, and current frame camera_key: [-1, -0.5, -0.20, 0], # loads 6 state vectors: 1.5 seconds before, 1 second before, ... 200 ms, 100 ms, and current frame "observation.state": [-1.5, -1, -0.5, -0.20, -0.10, 0], # loads 64 action vectors: current frame, 1 frame in the future, 2 frames, ... 63 frames in the future "action": [t / dataset.fps for t in range(64)], } # Note that in any case, these delta_timestamps values need to be multiples of (1/fps) so that added to any # timestamp, you still get a valid timestamp. dataset = LeRobotDataset(repo_id, delta_timestamps=delta_timestamps) print(f"\n{dataset[0][camera_key].shape=}") # (4, c, h, w) print(f"{dataset[0]['observation.state'].shape=}") # (6, c) print(f"{dataset[0]['action'].shape=}\n") # (64, c) # Finally, our datasets are fully compatible with PyTorch dataloaders and samplers because they are just # PyTorch datasets. dataloader = torch.utils.data.DataLoader( dataset, num_workers=0, batch_size=32, shuffle=True, ) for batch in dataloader: print(f"{batch[camera_key].shape=}") # (32, 4, c, h, w) print(f"{batch['observation.state'].shape=}") # (32, 6, c) print(f"{batch['action'].shape=}") # (32, 64, c) break
lerobot/examples/1_load_lerobot_dataset.py/0
{ "file_path": "lerobot/examples/1_load_lerobot_dataset.py", "repo_id": "lerobot", "token_count": 2021 }
207
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from pathlib import Path from ..configs import CameraConfig, ColorMode, Cv2Rotation @CameraConfig.register_subclass("opencv") @dataclass class OpenCVCameraConfig(CameraConfig): """Configuration class for OpenCV-based camera devices or video files. This class provides configuration options for cameras accessed through OpenCV, supporting both physical camera devices and video files. It includes settings for resolution, frame rate, color mode, and image rotation. Example configurations: ```python # Basic configurations OpenCVCameraConfig(0, 30, 1280, 720) # 1280x720 @ 30FPS OpenCVCameraConfig(/dev/video4, 60, 640, 480) # 640x480 @ 60FPS # Advanced configurations OpenCVCameraConfig(128422271347, 30, 640, 480, rotation=Cv2Rotation.ROTATE_90) # With 90° rotation ``` Attributes: index_or_path: Either an integer representing the camera device index, or a Path object pointing to a video file. fps: Requested frames per second for the color stream. width: Requested frame width in pixels for the color stream. height: Requested frame height in pixels for the color stream. color_mode: Color mode for image output (RGB or BGR). Defaults to RGB. rotation: Image rotation setting (0°, 90°, 180°, or 270°). Defaults to no rotation. warmup_s: Time reading frames before returning from connect (in seconds) Note: - Only 3-channel color output (RGB/BGR) is currently supported. """ index_or_path: int | Path color_mode: ColorMode = ColorMode.RGB rotation: Cv2Rotation = Cv2Rotation.NO_ROTATION warmup_s: int = 1 def __post_init__(self): if self.color_mode not in (ColorMode.RGB, ColorMode.BGR): raise ValueError( f"`color_mode` is expected to be {ColorMode.RGB.value} or {ColorMode.BGR.value}, but {self.color_mode} is provided." ) if self.rotation not in ( Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270, ): raise ValueError( f"`rotation` is expected to be in {(Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270)}, but {self.rotation} is provided." )
lerobot/src/lerobot/cameras/opencv/configuration_opencv.py/0
{ "file_path": "lerobot/src/lerobot/cameras/opencv/configuration_opencv.py", "repo_id": "lerobot", "token_count": 1100 }
208
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing import queue import threading from pathlib import Path import numpy as np import PIL.Image import torch def safe_stop_image_writer(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: dataset = kwargs.get("dataset") image_writer = getattr(dataset, "image_writer", None) if dataset else None if image_writer is not None: print("Waiting for image writer to terminate...") image_writer.stop() raise e return wrapper def image_array_to_pil_image(image_array: np.ndarray, range_check: bool = True) -> PIL.Image.Image: # TODO(aliberts): handle 1 channel and 4 for depth images if image_array.ndim != 3: raise ValueError(f"The array has {image_array.ndim} dimensions, but 3 is expected for an image.") if image_array.shape[0] == 3: # Transpose from pytorch convention (C, H, W) to (H, W, C) image_array = image_array.transpose(1, 2, 0) elif image_array.shape[-1] != 3: raise NotImplementedError( f"The image has {image_array.shape[-1]} channels, but 3 is required for now." ) if image_array.dtype != np.uint8: if range_check: max_ = image_array.max().item() min_ = image_array.min().item() if max_ > 1.0 or min_ < 0.0: raise ValueError( "The image data type is float, which requires values in the range [0.0, 1.0]. " f"However, the provided range is [{min_}, {max_}]. Please adjust the range or " "provide a uint8 image with values in the range [0, 255]." ) image_array = (image_array * 255).astype(np.uint8) return PIL.Image.fromarray(image_array) def write_image(image: np.ndarray | PIL.Image.Image, fpath: Path): try: if isinstance(image, np.ndarray): img = image_array_to_pil_image(image) elif isinstance(image, PIL.Image.Image): img = image else: raise TypeError(f"Unsupported image type: {type(image)}") img.save(fpath) except Exception as e: print(f"Error writing image {fpath}: {e}") def worker_thread_loop(queue: queue.Queue): while True: item = queue.get() if item is None: queue.task_done() break image_array, fpath = item write_image(image_array, fpath) queue.task_done() def worker_process(queue: queue.Queue, num_threads: int): threads = [] for _ in range(num_threads): t = threading.Thread(target=worker_thread_loop, args=(queue,)) t.daemon = True t.start() threads.append(t) for t in threads: t.join() class AsyncImageWriter: """ This class abstract away the initialisation of processes or/and threads to save images on disk asynchronously, which is critical to control a robot and record data at a high frame rate. When `num_processes=0`, it creates a threads pool of size `num_threads`. When `num_processes>0`, it creates processes pool of size `num_processes`, where each subprocess starts their own threads pool of size `num_threads`. The optimal number of processes and threads depends on your computer capabilities. We advise to use 4 threads per camera with 0 processes. If the fps is not stable, try to increase or lower the number of threads. If it is still not stable, try to use 1 subprocess, or more. """ def __init__(self, num_processes: int = 0, num_threads: int = 1): self.num_processes = num_processes self.num_threads = num_threads self.queue = None self.threads = [] self.processes = [] self._stopped = False if num_threads <= 0 and num_processes <= 0: raise ValueError("Number of threads and processes must be greater than zero.") if self.num_processes == 0: # Use threading self.queue = queue.Queue() for _ in range(self.num_threads): t = threading.Thread(target=worker_thread_loop, args=(self.queue,)) t.daemon = True t.start() self.threads.append(t) else: # Use multiprocessing self.queue = multiprocessing.JoinableQueue() for _ in range(self.num_processes): p = multiprocessing.Process(target=worker_process, args=(self.queue, self.num_threads)) p.daemon = True p.start() self.processes.append(p) def save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path): if isinstance(image, torch.Tensor): # Convert tensor to numpy array to minimize main process time image = image.cpu().numpy() self.queue.put((image, fpath)) def wait_until_done(self): self.queue.join() def stop(self): if self._stopped: return if self.num_processes == 0: for _ in self.threads: self.queue.put(None) for t in self.threads: t.join() else: num_nones = self.num_processes * self.num_threads for _ in range(num_nones): self.queue.put(None) for p in self.processes: p.join() if p.is_alive(): p.terminate() self.queue.close() self.queue.join_thread() self._stopped = True
lerobot/src/lerobot/datasets/image_writer.py/0
{ "file_path": "lerobot/src/lerobot/datasets/image_writer.py", "repo_id": "lerobot", "token_count": 2712 }
209
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import gymnasium as gym from lerobot.envs.configs import AlohaEnv, EnvConfig, HILEnvConfig, PushtEnv, XarmEnv def make_env_config(env_type: str, **kwargs) -> EnvConfig: if env_type == "aloha": return AlohaEnv(**kwargs) elif env_type == "pusht": return PushtEnv(**kwargs) elif env_type == "xarm": return XarmEnv(**kwargs) elif env_type == "hil": return HILEnvConfig(**kwargs) else: raise ValueError(f"Policy type '{env_type}' is not available.") def make_env(cfg: EnvConfig, n_envs: int = 1, use_async_envs: bool = False) -> gym.vector.VectorEnv | None: """Makes a gym vector environment according to the config. Args: cfg (EnvConfig): the config of the environment to instantiate. n_envs (int, optional): The number of parallelized env to return. Defaults to 1. use_async_envs (bool, optional): Whether to return an AsyncVectorEnv or a SyncVectorEnv. Defaults to False. Raises: ValueError: if n_envs < 1 ModuleNotFoundError: If the requested env package is not installed Returns: gym.vector.VectorEnv: The parallelized gym.env instance. """ if n_envs < 1: raise ValueError("`n_envs must be at least 1") package_name = f"gym_{cfg.type}" try: importlib.import_module(package_name) except ModuleNotFoundError as e: print(f"{package_name} is not installed. Please install it with `pip install 'lerobot[{cfg.type}]'`") raise e gym_handle = f"{package_name}/{cfg.task}" # batched version of the env that returns an observation of shape (b, c) env_cls = gym.vector.AsyncVectorEnv if use_async_envs else gym.vector.SyncVectorEnv env = env_cls( [lambda: gym.make(gym_handle, disable_env_checker=True, **cfg.gym_kwargs) for _ in range(n_envs)] ) return env
lerobot/src/lerobot/envs/factory.py/0
{ "file_path": "lerobot/src/lerobot/envs/factory.py", "repo_id": "lerobot", "token_count": 946 }
210
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch.optim import Optimizer from torch.optim.lr_scheduler import LRScheduler from lerobot.configs.train import TrainPipelineConfig from lerobot.policies.pretrained import PreTrainedPolicy def make_optimizer_and_scheduler( cfg: TrainPipelineConfig, policy: PreTrainedPolicy ) -> tuple[Optimizer, LRScheduler | None]: """Generates the optimizer and scheduler based on configs. Args: cfg (TrainPipelineConfig): The training config that contains optimizer and scheduler configs policy (PreTrainedPolicy): The policy config from which parameters and presets must be taken from. Returns: tuple[Optimizer, LRScheduler | None]: The couple (Optimizer, Scheduler). Scheduler can be `None`. """ params = policy.get_optim_params() if cfg.use_policy_training_preset else policy.parameters() optimizer = cfg.optimizer.build(params) lr_scheduler = cfg.scheduler.build(optimizer, cfg.steps) if cfg.scheduler is not None else None return optimizer, lr_scheduler
lerobot/src/lerobot/optim/factory.py/0
{ "file_path": "lerobot/src/lerobot/optim/factory.py", "repo_id": "lerobot", "token_count": 512 }
211
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert pi0 parameters from Jax to Pytorch Follow [README of openpi](https://github.com/Physical-Intelligence/openpi) to create a new environment and install the required libraries. ```bash cd ~/code/openpi source .venv/bin/activate ``` Example downloading parameters: ```bash python >>> import openpi.shared.download as download >>> path='s3://openpi-assets/checkpoints/pi0_base/params' >>> download.maybe_download(path) ``` Converting pi0_base: ```python python -m lerobot.policies.pi0.conversion_scripts.convert_pi0_to_hf_lerobot \ --checkpoint_dir /home/remi_cadene/.cache/openpi/openpi-assets/checkpoints/pi0_base/params \ --output_path /home/remi_cadene/.cache/openpi/openpi-assets/checkpoints/pi0_base_pytorch ``` ```python python -m lerobot.policies.pi0.conversion_scripts.convert_pi0_to_hf_lerobot \ --checkpoint_dir /home/remi_cadene/.cache/openpi/openpi-assets/checkpoints/pi0_aloha_sim/params \ --output_path /home/remi_cadene/.cache/openpi/openpi-assets/checkpoints/pi0_aloha_sim_pytorch ``` """ import argparse import pathlib import jax import numpy as np import orbax.checkpoint as ocp import torch from jax.sharding import SingleDeviceSharding from lerobot.policies.pi0.configuration_pi0 import PI0Config from lerobot.policies.pi0.conversion_scripts.conversion_utils import ( get_gemma_config, get_paligemma_config, ) from lerobot.policies.pi0.modeling_pi0 import PI0Policy PRECISIONS = {"bfloat16": torch.bfloat16, "float32": torch.float32, "float16": torch.float16} def slice_paligemma_state_dict(state_dict, config): suffix = "/value" if "img/embedding/kernel/value" in state_dict else "" # fmt: off # patch embeddings state_dict["paligemma.vision_tower.vision_model.embeddings.patch_embedding.weight"] = state_dict.pop(f"img/embedding/kernel{suffix}").transpose( 3, 2, 0, 1 ) state_dict["paligemma.vision_tower.vision_model.embeddings.patch_embedding.bias"] = state_dict.pop(f"img/embedding/bias{suffix}") # positional embeddings state_dict["paligemma.vision_tower.vision_model.embeddings.position_embedding.weight"] = state_dict.pop(f"img/pos_embedding{suffix}").reshape( -1, config.vision_config.hidden_size ) # extract vision layers to be sliced at index 0. There are 27 layers in the base model. encoderblock_layernorm0_scale = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_0/scale{suffix}") encoderblock_layernorm0_bias = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_0/bias{suffix}") encoderblock_layernorm1_scale = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_1/scale{suffix}") encoderblock_layernorm1_bias = state_dict.pop(f"img/Transformer/encoderblock/LayerNorm_1/bias{suffix}") encoderblock_mlp_dense0_kernel= state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_0/kernel{suffix}") encoderblock_mlp_dense0_bias= state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_0/bias{suffix}") encoderblock_mlp_dense1_kernel= state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_1/kernel{suffix}") encoderblock_mlp_dense1_bias= state_dict.pop(f"img/Transformer/encoderblock/MlpBlock_0/Dense_1/bias{suffix}") encoderblock_attention_0_key_kernel = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/kernel{suffix}") encoderblock_attention_0_key_bias = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/key/bias{suffix}") encoderblock_attention_0_value_kernel = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/kernel{suffix}") encoderblock_attention_0_value_bias = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/value/bias{suffix}") encoderblock_attention_0_query_kernel = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/kernel{suffix}") encoderblock_attention_0_query_bias = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/query/bias{suffix}") encoderblock_attention_0_out_kernel = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/kernel{suffix}") encoderblock_attention_0_out_bias = state_dict.pop(f"img/Transformer/encoderblock/MultiHeadDotProductAttention_0/out/bias{suffix}") for i in range(config.vision_config.num_hidden_layers): state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.layer_norm1.weight"] = encoderblock_layernorm0_scale[i].transpose() state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.layer_norm1.bias"] = encoderblock_layernorm0_bias[i] state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.layer_norm2.weight"] = encoderblock_layernorm1_scale[i].transpose() state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.layer_norm2.bias"] = encoderblock_layernorm1_bias[i] state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.weight"] = encoderblock_mlp_dense0_kernel[i].transpose() state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.mlp.fc1.bias"] = encoderblock_mlp_dense0_bias[i] state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.weight"] = encoderblock_mlp_dense1_kernel[i].transpose() state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.mlp.fc2.bias"] = encoderblock_mlp_dense1_bias[i] state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.weight"] = encoderblock_attention_0_key_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.k_proj.bias"] = encoderblock_attention_0_key_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.weight"] = encoderblock_attention_0_value_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.v_proj.bias"] = encoderblock_attention_0_value_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.weight"] = encoderblock_attention_0_query_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.q_proj.bias"] = encoderblock_attention_0_query_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.weight"] = encoderblock_attention_0_out_kernel[i].reshape(-1, config.vision_config.hidden_size).transpose() state_dict[f"paligemma.vision_tower.vision_model.encoder.layers.{i}.self_attn.out_proj.bias"] = encoderblock_attention_0_out_bias[i].reshape(-1, config.vision_config.hidden_size).reshape(-1) state_dict["paligemma.vision_tower.vision_model.post_layernorm.weight"] = state_dict.pop(f"img/Transformer/encoder_norm/scale{suffix}").transpose() state_dict["paligemma.vision_tower.vision_model.post_layernorm.bias"] = state_dict.pop(f"img/Transformer/encoder_norm/bias{suffix}") # multimodal projector state_dict['paligemma.multi_modal_projector.linear.weight'] = state_dict.pop(f"img/head/kernel{suffix}").transpose() state_dict['paligemma.multi_modal_projector.linear.bias'] = state_dict.pop(f"img/head/bias{suffix}") # text decoder (gemma) embedding_vector = state_dict.pop(f"llm/embedder/input_embedding{suffix}") state_dict["paligemma.language_model.model.embed_tokens.weight"] = embedding_vector # pop the einsum attention + mlp representations. There are 18 layers in gemma-2b. llm_attention_attn_vec_einsum = state_dict.pop(f"llm/layers/attn/attn_vec_einsum/w{suffix}") llm_attention_kv_einsum = state_dict.pop(f"llm/layers/attn/kv_einsum/w{suffix}") llm_attention_q_einsum = state_dict.pop(f"llm/layers/attn/q_einsum/w{suffix}") llm_mlp_gating_einsum = state_dict.pop(f"llm/layers/mlp/gating_einsum{suffix}") llm_mlp_linear = state_dict.pop(f"llm/layers/mlp/linear{suffix}") # TODO verify correctness of layer norm loading llm_input_layernorm = state_dict.pop(f"llm/layers/pre_attention_norm/scale{suffix}") llm_post_attention_layernorm = state_dict.pop(f"llm/layers/pre_ffw_norm/scale{suffix}") for i in range(config.text_config.num_hidden_layers): # llm_attention_q_einsum[i].shape = (8, 2048, 256) q_proj_weight_reshaped = llm_attention_q_einsum[i].transpose(0, 2, 1).reshape(config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size) state_dict[f"paligemma.language_model.model.layers.{i}.self_attn.q_proj.weight"] = q_proj_weight_reshaped # llm_attention_kv_einsum[i, 0, 0].shape = (2048, 256) k_proj_weight_reshaped = llm_attention_kv_einsum[i, 0, 0].transpose() state_dict[f"paligemma.language_model.model.layers.{i}.self_attn.k_proj.weight"] = k_proj_weight_reshaped # llm_attention_kv_einsum[i, 1, 0].shape = (2048, 256) v_proj_weight_reshaped = llm_attention_kv_einsum[i, 1, 0].transpose() state_dict[f"paligemma.language_model.model.layers.{i}.self_attn.v_proj.weight"] = v_proj_weight_reshaped # output projection. # llm_attention_attn_vec_einsum[i].shape = (8, 256, 2048) o_proj_weight_reshaped = llm_attention_attn_vec_einsum[i].transpose(2, 0, 1).reshape(config.text_config.num_attention_heads * config.text_config.head_dim, config.text_config.hidden_size) state_dict[f"paligemma.language_model.model.layers.{i}.self_attn.o_proj.weight"] = o_proj_weight_reshaped # mlp layers gate_proj_weight = llm_mlp_gating_einsum[i, 0] state_dict[f"paligemma.language_model.model.layers.{i}.mlp.gate_proj.weight"] = gate_proj_weight.transpose() up_proj_weight = llm_mlp_gating_einsum[i, 1] state_dict[f"paligemma.language_model.model.layers.{i}.mlp.up_proj.weight"] = up_proj_weight.transpose() state_dict[f"paligemma.language_model.model.layers.{i}.mlp.down_proj.weight"] = llm_mlp_linear[i].transpose() state_dict[f"paligemma.language_model.model.layers.{i}.input_layernorm.weight"] = llm_input_layernorm[i] state_dict[f"paligemma.language_model.model.layers.{i}.post_attention_layernorm.weight"] = llm_post_attention_layernorm[i] state_dict["paligemma.language_model.model.norm.weight"] = state_dict.pop(f"llm/final_norm/scale{suffix}") state_dict["paligemma.language_model.lm_head.weight"] = embedding_vector # weights are tied. # fmt: on expert_dict = {} final_state_dict = {} for key, value in state_dict.items(): if key not in [ f"llm/final_norm_1/scale{suffix}", f"llm/layers/attn/attn_vec_einsum_1/w{suffix}", f"llm/layers/attn/kv_einsum_1/w{suffix}", f"llm/layers/attn/q_einsum_1/w{suffix}", f"llm/layers/mlp_1/gating_einsum{suffix}", f"llm/layers/mlp_1/linear{suffix}", f"llm/layers/pre_attention_norm_1/scale{suffix}", f"llm/layers/pre_ffw_norm_1/scale{suffix}", ]: final_state_dict[key] = torch.from_numpy(value) else: expert_dict[key] = value return final_state_dict, expert_dict def slice_gemma_state_dict(state_dict, config, num_expert=1): # fmt: off # text decoder (gemma) # no embedding vector, the expert just has the decoder layers embedding_vector = torch.zeros([config.vocab_size, config.hidden_size]) state_dict["gemma_expert.model.embed_tokens.weight"] = embedding_vector # pop the einsum attention + mlp representations. There are 18 layers in gemma-2b. suffix = "/value" if f"llm/layers/attn/attn_vec_einsum_{num_expert}/w/value" in state_dict else "" llm_attention_attn_vec_einsum = state_dict.pop(f"llm/layers/attn/attn_vec_einsum_{num_expert}/w{suffix}") llm_attention_kv_einsum = state_dict.pop(f"llm/layers/attn/kv_einsum_{num_expert}/w{suffix}") llm_attention_q_einsum = state_dict.pop(f"llm/layers/attn/q_einsum_{num_expert}/w{suffix}") llm_mlp_gating_einsum = state_dict.pop(f"llm/layers/mlp_{num_expert}/gating_einsum{suffix}") llm_mlp_linear = state_dict.pop(f"llm/layers/mlp_{num_expert}/linear{suffix}") # TODO verify correctness of layer norm loading llm_input_layernorm = state_dict.pop(f"llm/layers/pre_attention_norm_{num_expert}/scale{suffix}") llm_post_attention_layernorm = state_dict.pop(f"llm/layers/pre_ffw_norm_{num_expert}/scale{suffix}") for i in range(config.num_hidden_layers): q_proj_weight_reshaped = llm_attention_q_einsum[i].transpose(0, 2, 1).reshape(config.num_attention_heads * config.head_dim, config.hidden_size) state_dict[f"gemma_expert.model.layers.{i}.self_attn.q_proj.weight"] = q_proj_weight_reshaped k_proj_weight_reshaped = llm_attention_kv_einsum[i, 0, 0].transpose() state_dict[f"gemma_expert.model.layers.{i}.self_attn.k_proj.weight"] = k_proj_weight_reshaped v_proj_weight_reshaped = llm_attention_kv_einsum[i, 1, 0].transpose() state_dict[f"gemma_expert.model.layers.{i}.self_attn.v_proj.weight"] = v_proj_weight_reshaped # output projection. # llm_attention_attn_vec_einsum[i].shape = (8, 256, 1024) o_proj_weight_reshaped = llm_attention_attn_vec_einsum[i].reshape(config.num_attention_heads * config.head_dim, config.hidden_size).transpose(1,0)# .transpose(2, 0, 1).reshape(config.num_attention_heads * config.head_dim, config.hidden_size).transpose(1, 0) state_dict[f"gemma_expert.model.layers.{i}.self_attn.o_proj.weight"] = o_proj_weight_reshaped # mlp layers gate_proj_weight = llm_mlp_gating_einsum[i, 0] state_dict[f"gemma_expert.model.layers.{i}.mlp.gate_proj.weight"] = gate_proj_weight.transpose() up_proj_weight = llm_mlp_gating_einsum[i, 1] state_dict[f"gemma_expert.model.layers.{i}.mlp.up_proj.weight"] = up_proj_weight.transpose() state_dict[f"gemma_expert.model.layers.{i}.mlp.down_proj.weight"] = llm_mlp_linear[i].transpose() state_dict[f"gemma_expert.model.layers.{i}.input_layernorm.weight"] = llm_input_layernorm[i] state_dict[f"gemma_expert.model.layers.{i}.post_attention_layernorm.weight"] = llm_post_attention_layernorm[i] state_dict["gemma_expert.model.norm.weight"] = state_dict.pop(f"llm/final_norm_{num_expert}/scale{suffix}") state_dict["gemma_expert.lm_head.weight"] = embedding_vector # weights are tied. (and zeros here) # fmt: on final_state_dict = {} for key, value in state_dict.items(): if not isinstance(value, torch.Tensor): final_state_dict[key] = torch.from_numpy(value) else: final_state_dict[key] = value return final_state_dict def flatten_for_memory(tree, parent_key=""): out = {} for k, v in tree.items(): new_key = f"{parent_key}/{k}" if parent_key else k if isinstance(v, dict): out.update(flatten_for_memory(v, new_key)) else: out[new_key] = np.array(v) # Ensure conversion to np.array for consistency return out def flatten_for_npz(tree, parent_key=""): out = {} for k, v in tree.items(): new_key = f"{parent_key}/{k}" if parent_key else k if isinstance(v, dict): out.update(flatten_for_npz(v, new_key)) else: # bf16/f32 here? out[new_key] = np.array(v) return out def slice_initial_orbax_checkpoint(checkpoint_dir: str): params_path = pathlib.Path(checkpoint_dir).resolve() checkpointer = ocp.PyTreeCheckpointer() metadata = checkpointer.metadata(params_path) print("Metadata keys:", list(metadata.keys())) params_name = "params" item = {params_name: metadata[params_name]} device = jax.local_devices()[0] # Use the first local device sharding = SingleDeviceSharding(device) restored = checkpointer.restore( params_path, ocp.args.PyTreeRestore( item=item, restore_args=jax.tree_util.tree_map( lambda _: ocp.ArrayRestoreArgs( restore_type=jax.Array, # or np.ndarray, but bf16 is annoying about it sharding=sharding, ), item, ), transforms={}, ), ) params = restored[params_name] # get params for PaliGemma pali_params = params["PaliGemma"] del params["PaliGemma"] pali_params_flat = flatten_for_npz(pali_params) return {"paligemma_params": pali_params_flat, "projection_params": params} def update_keys_with_prefix(d: dict, prefix: str) -> dict: """Update dictionary keys by adding a prefix.""" return {f"{prefix}{key}": value for key, value in d.items()} def convert_pi0_checkpoint(checkpoint_dir: str, precision: str, tokenizer_id: str, output_path: str): # Break down orbax ckpts - they are in OCDBT initial_params = slice_initial_orbax_checkpoint(checkpoint_dir=checkpoint_dir) # process projection params keys = [ "state_proj", "action_in_proj", "action_out_proj", "action_time_mlp_in", "action_time_mlp_out", ] projection_params = {} for key in keys: kernel_params = initial_params["projection_params"][key]["kernel"] bias_params = initial_params["projection_params"][key]["bias"] if isinstance(kernel_params, dict): weight = kernel_params["value"] bias = bias_params["value"] else: weight = kernel_params bias = bias_params projection_params[f"{key}.weight"] = torch.from_numpy(np.array(weight)).T projection_params[f"{key}.bias"] = torch.from_numpy(np.array(bias)) # Process PaliGemma weights paligemma_config = get_paligemma_config(precision) paligemma_params, gemma_raw_dictionary = slice_paligemma_state_dict( initial_params["paligemma_params"], paligemma_config ) # Process Gemma weights (at this stage they are unused) gemma_config = get_gemma_config(precision) gemma_params = slice_gemma_state_dict(gemma_raw_dictionary, config=gemma_config) # Instantiate model from configs if "pi0_aloha_sim" in checkpoint_dir: pi0_config = PI0Config( empty_cameras=2, adapt_to_pi_aloha=True, use_delta_joint_actions_aloha=False, ) elif "pi0_aloha_towel" in checkpoint_dir: pi0_config = PI0Config( adapt_to_pi_aloha=True, use_delta_joint_actions_aloha=True, ) elif "pi0_base" in checkpoint_dir: pi0_config = PI0Config( empty_cameras=0, adapt_to_pi_aloha=False, use_delta_joint_actions_aloha=False, ) else: raise ValueError() # gemma_config=gemma_config, paligemma_config=paligemma_config) pi0_model = PI0Policy(pi0_config) paligemma_params = update_keys_with_prefix(paligemma_params, "model.paligemma_with_expert.") gemma_params = update_keys_with_prefix(gemma_params, "model.paligemma_with_expert.") projection_params = update_keys_with_prefix(projection_params, "model.") # load state dict torch_dtype = PRECISIONS[precision] pi0_model.load_state_dict({**paligemma_params, **gemma_params, **projection_params}) pi0_model = pi0_model.to(torch_dtype) # pi0_tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) pi0_model.save_pretrained(output_path, safe_serialization=True) # pi0_tokenizer.save_pretrained(output_path, dtype=torch_dtype) # assert that model loads properly del pi0_model PI0Policy.from_pretrained(output_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_dir", default="/raid/pablo/.cache/openpi/openpi-assets/checkpoints/pi0_aloha_sim/params", type=str, help="Path to the ocdbt checkpoint", ) parser.add_argument( "--precision", choices=["float32", "bfloat16", "float16"], default="float32", type=str, help="Precision identifier for model conversion - should match the base checkpoint precision.", ) # tokenizer is identical to paligemma, it appears parser.add_argument( "--tokenizer_hub_id", default="google/paligemma-3b-pt-224", type=str, help="Hub path to the tokenizer to save", ) parser.add_argument( "--output_path", required=True, type=str, help="Path to save converted weights to", ) args = parser.parse_args() convert_pi0_checkpoint( checkpoint_dir=args.checkpoint_dir, precision=args.precision, tokenizer_id=args.tokenizer_hub_id, output_path=args.output_path, )
lerobot/src/lerobot/policies/pi0/conversion_scripts/convert_pi0_to_hf_lerobot.py/0
{ "file_path": "lerobot/src/lerobot/policies/pi0/conversion_scripts/convert_pi0_to_hf_lerobot.py", "repo_id": "lerobot", "token_count": 9448 }
212
#!/usr/bin/env python # Copyright 2024 Nicklas Hansen, Xiaolong Wang, Hao Su, # and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import NormalizationMode from lerobot.optim.optimizers import AdamConfig @PreTrainedConfig.register_subclass("tdmpc") @dataclass class TDMPCConfig(PreTrainedConfig): """Configuration class for TDMPCPolicy. Defaults are configured for training with xarm_lift_medium_replay providing proprioceptive and single camera observations. The parameters you will most likely need to change are the ones which depend on the environment / sensors. Those are: `input_shapes`, `output_shapes`, and perhaps `max_random_shift_ratio`. Args: n_action_repeats: The number of times to repeat the action returned by the planning. (hint: Google action repeats in Q-learning or ask your favorite chatbot) horizon: Horizon for model predictive control. n_action_steps: Number of action steps to take from the plan given by model predictive control. This is an alternative to using action repeats. If this is set to more than 1, then we require `n_action_repeats == 1`, `use_mpc == True` and `n_action_steps <= horizon`. Note that this approach of using multiple steps from the plan is not in the original implementation. input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents the input data name, and the value is a list indicating the dimensions of the corresponding data. For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96], indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't include batch dimension or temporal dimension. output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents the output data name, and the value is a list indicating the dimensions of the corresponding data. For example, "action" refers to an output shape of [14], indicating 14-dimensional actions. Importantly, `output_shapes` doesn't include batch dimension or temporal dimension. input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"), and the value specifies the normalization mode to apply. The two available modes are "mean_std" which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a [-1, 1] range. Note that here this defaults to None meaning inputs are not normalized. This is to match the original implementation. output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the original scale. Note that this is also used for normalizing the training targets. NOTE: Clipping to [-1, +1] is used during MPPI/CEM. Therefore, it is recommended that you stick with "min_max" normalization mode here. image_encoder_hidden_dim: Number of channels for the convolutional layers used for image encoding. state_encoder_hidden_dim: Hidden dimension for MLP used for state vector encoding. latent_dim: Observation's latent embedding dimension. q_ensemble_size: Number of Q function estimators to use in an ensemble for uncertainty estimation. mlp_dim: Hidden dimension of MLPs used for modelling the dynamics encoder, reward function, policy (π), Q ensemble, and V. discount: Discount factor (γ) to use for the reinforcement learning formalism. use_mpc: Whether to use model predictive control. The alternative is to just sample the policy model (π) for each step. cem_iterations: Number of iterations for the MPPI/CEM loop in MPC. max_std: Maximum standard deviation for actions sampled from the gaussian PDF in CEM. min_std: Minimum standard deviation for noise applied to actions sampled from the policy model (π). Doubles up as the minimum standard deviation for actions sampled from the gaussian PDF in CEM. n_gaussian_samples: Number of samples to draw from the gaussian distribution every CEM iteration. Must be non-zero. n_pi_samples: Number of samples to draw from the policy / world model rollout every CEM iteration. Can be zero. uncertainty_regularizer_coeff: Coefficient for the uncertainty regularization used when estimating trajectory values (this is the λ coefficient in eqn 4 of FOWM). n_elites: The number of elite samples to use for updating the gaussian parameters every CEM iteration. elite_weighting_temperature: The temperature to use for softmax weighting (by trajectory value) of the elites, when updating the gaussian parameters for CEM. gaussian_mean_momentum: Momentum (α) used for EMA updates of the mean parameter μ of the gaussian parameters optimized in CEM. Updates are calculated as μ⁻ ← αμ⁻ + (1-α)μ. max_random_shift_ratio: Maximum random shift (as a proportion of the image size) to apply to the image(s) (in units of pixels) for training-time augmentation. If set to 0, no such augmentation is applied. Note that the input images are assumed to be square for this augmentation. reward_coeff: Loss weighting coefficient for the reward regression loss. expectile_weight: Weighting (τ) used in expectile regression for the state value function (V). v_pred < v_target is weighted by τ and v_pred >= v_target is weighted by (1-τ). τ is expected to be in [0, 1]. Setting τ closer to 1 results in a more "optimistic" V. This is sensible to do because v_target is obtained by evaluating the learned state-action value functions (Q) with in-sample actions that may not be always optimal. value_coeff: Loss weighting coefficient for both the state-action value (Q) TD loss, and the state value (V) expectile regression loss. consistency_coeff: Loss weighting coefficient for the consistency loss. advantage_scaling: A factor by which the advantages are scaled prior to exponentiation for advantage weighted regression of the policy (π) estimator parameters. Note that the exponentiated advantages are clamped at 100.0. pi_coeff: Loss weighting coefficient for the action regression loss. temporal_decay_coeff: Exponential decay coefficient for decaying the loss coefficient for future time- steps. Hint: each loss computation involves `horizon` steps worth of actions starting from the current time step. target_model_momentum: Momentum (α) used for EMA updates of the target models. Updates are calculated as ϕ ← αϕ + (1-α)θ where ϕ are the parameters of the target model and θ are the parameters of the model being trained. """ # Input / output structure. n_obs_steps: int = 1 n_action_repeats: int = 2 horizon: int = 5 n_action_steps: int = 1 normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { "VISUAL": NormalizationMode.IDENTITY, "STATE": NormalizationMode.IDENTITY, "ENV": NormalizationMode.IDENTITY, "ACTION": NormalizationMode.MIN_MAX, } ) # Architecture / modeling. # Neural networks. image_encoder_hidden_dim: int = 32 state_encoder_hidden_dim: int = 256 latent_dim: int = 50 q_ensemble_size: int = 5 mlp_dim: int = 512 # Reinforcement learning. discount: float = 0.9 # Inference. use_mpc: bool = True cem_iterations: int = 6 max_std: float = 2.0 min_std: float = 0.05 n_gaussian_samples: int = 512 n_pi_samples: int = 51 uncertainty_regularizer_coeff: float = 1.0 n_elites: int = 50 elite_weighting_temperature: float = 0.5 gaussian_mean_momentum: float = 0.1 # Training and loss computation. max_random_shift_ratio: float = 0.0476 # Loss coefficients. reward_coeff: float = 0.5 expectile_weight: float = 0.9 value_coeff: float = 0.1 consistency_coeff: float = 20.0 advantage_scaling: float = 3.0 pi_coeff: float = 0.5 temporal_decay_coeff: float = 0.5 # Target model. target_model_momentum: float = 0.995 # Training presets optimizer_lr: float = 3e-4 def __post_init__(self): super().__post_init__() """Input validation (not exhaustive).""" if self.n_gaussian_samples <= 0: raise ValueError( f"The number of gaussian samples for CEM should be non-zero. Got `{self.n_gaussian_samples=}`" ) if self.normalization_mapping["ACTION"] is not NormalizationMode.MIN_MAX: raise ValueError( "TD-MPC assumes the action space dimensions to all be in [-1, 1]. Therefore it is strongly " f"advised that you stick with the default. See {self.__class__.__name__} docstring for more " "information." ) if self.n_obs_steps != 1: raise ValueError( f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`" ) if self.n_action_steps > 1: if self.n_action_repeats != 1: raise ValueError( "If `n_action_steps > 1`, `n_action_repeats` must be left to its default value of 1." ) if not self.use_mpc: raise ValueError("If `n_action_steps > 1`, `use_mpc` must be set to `True`.") if self.n_action_steps > self.horizon: raise ValueError("`n_action_steps` must be less than or equal to `horizon`.") def get_optimizer_preset(self) -> AdamConfig: return AdamConfig(lr=self.optimizer_lr) def get_scheduler_preset(self) -> None: return None def validate_features(self) -> None: # There should only be one image key. if len(self.image_features) > 1: raise ValueError( f"{self.__class__.__name__} handles at most one image for now. Got image keys {self.image_features}." ) if len(self.image_features) > 0: image_ft = next(iter(self.image_features.values())) if image_ft.shape[-2] != image_ft.shape[-1]: # TODO(alexander-soare): This limitation is solely because of code in the random shift # augmentation. It should be able to be removed. raise ValueError(f"Only square images are handled now. Got image shape {image_ft.shape}.") @property def observation_delta_indices(self) -> list: return list(range(self.horizon + 1)) @property def action_delta_indices(self) -> list: return list(range(self.horizon)) @property def reward_delta_indices(self) -> None: return list(range(self.horizon))
lerobot/src/lerobot/policies/tdmpc/configuration_tdmpc.py/0
{ "file_path": "lerobot/src/lerobot/policies/tdmpc/configuration_tdmpc.py", "repo_id": "lerobot", "token_count": 4265 }
213
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import time from functools import cached_property from itertools import chain from typing import Any import numpy as np from lerobot.cameras.utils import make_cameras_from_configs from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from lerobot.motors import Motor, MotorCalibration, MotorNormMode from lerobot.motors.feetech import ( FeetechMotorsBus, OperatingMode, ) from ..robot import Robot from ..utils import ensure_safe_goal_position from .config_lekiwi import LeKiwiConfig logger = logging.getLogger(__name__) class LeKiwi(Robot): """ The robot includes a three omniwheel mobile base and a remote follower arm. The leader arm is connected locally (on the laptop) and its joint positions are recorded and then forwarded to the remote follower arm (after applying a safety clamp). In parallel, keyboard teleoperation is used to generate raw velocity commands for the wheels. """ config_class = LeKiwiConfig name = "lekiwi" def __init__(self, config: LeKiwiConfig): super().__init__(config) self.config = config norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100 self.bus = FeetechMotorsBus( port=self.config.port, motors={ # arm "arm_shoulder_pan": Motor(1, "sts3215", norm_mode_body), "arm_shoulder_lift": Motor(2, "sts3215", norm_mode_body), "arm_elbow_flex": Motor(3, "sts3215", norm_mode_body), "arm_wrist_flex": Motor(4, "sts3215", norm_mode_body), "arm_wrist_roll": Motor(5, "sts3215", norm_mode_body), "arm_gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100), # base "base_left_wheel": Motor(7, "sts3215", MotorNormMode.RANGE_M100_100), "base_back_wheel": Motor(8, "sts3215", MotorNormMode.RANGE_M100_100), "base_right_wheel": Motor(9, "sts3215", MotorNormMode.RANGE_M100_100), }, calibration=self.calibration, ) self.arm_motors = [motor for motor in self.bus.motors if motor.startswith("arm")] self.base_motors = [motor for motor in self.bus.motors if motor.startswith("base")] self.cameras = make_cameras_from_configs(config.cameras) @property def _state_ft(self) -> dict[str, type]: return dict.fromkeys( ( "arm_shoulder_pan.pos", "arm_shoulder_lift.pos", "arm_elbow_flex.pos", "arm_wrist_flex.pos", "arm_wrist_roll.pos", "arm_gripper.pos", "x.vel", "y.vel", "theta.vel", ), float, ) @property def _cameras_ft(self) -> dict[str, tuple]: return { cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras } @cached_property def observation_features(self) -> dict[str, type | tuple]: return {**self._state_ft, **self._cameras_ft} @cached_property def action_features(self) -> dict[str, type]: return self._state_ft @property def is_connected(self) -> bool: return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) def connect(self, calibrate: bool = True) -> None: if self.is_connected: raise DeviceAlreadyConnectedError(f"{self} already connected") self.bus.connect() if not self.is_calibrated and calibrate: logger.info( "Mismatch between calibration values in the motor and the calibration file or no calibration file found" ) self.calibrate() for cam in self.cameras.values(): cam.connect() self.configure() logger.info(f"{self} connected.") @property def is_calibrated(self) -> bool: return self.bus.is_calibrated def calibrate(self) -> None: if self.calibration: # Calibration file exists, ask user whether to use it or run new calibration user_input = input( f"Press ENTER to use provided calibration file associated with the id {self.id}, or type 'c' and press ENTER to run calibration: " ) if user_input.strip().lower() != "c": logger.info(f"Writing calibration file associated with the id {self.id} to the motors") self.bus.write_calibration(self.calibration) return logger.info(f"\nRunning calibration of {self}") motors = self.arm_motors + self.base_motors self.bus.disable_torque(self.arm_motors) for name in self.arm_motors: self.bus.write("Operating_Mode", name, OperatingMode.POSITION.value) input("Move robot to the middle of its range of motion and press ENTER....") homing_offsets = self.bus.set_half_turn_homings(self.arm_motors) homing_offsets.update(dict.fromkeys(self.base_motors, 0)) full_turn_motor = [ motor for motor in motors if any(keyword in motor for keyword in ["wheel", "wrist"]) ] unknown_range_motors = [motor for motor in motors if motor not in full_turn_motor] print( f"Move all arm joints except '{full_turn_motor}' sequentially through their " "entire ranges of motion.\nRecording positions. Press ENTER to stop..." ) range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors) for name in full_turn_motor: range_mins[name] = 0 range_maxes[name] = 4095 self.calibration = {} for name, motor in self.bus.motors.items(): self.calibration[name] = MotorCalibration( id=motor.id, drive_mode=0, homing_offset=homing_offsets[name], range_min=range_mins[name], range_max=range_maxes[name], ) self.bus.write_calibration(self.calibration) self._save_calibration() print("Calibration saved to", self.calibration_fpath) def configure(self): # Set-up arm actuators (position mode) # We assume that at connection time, arm is in a rest position, # and torque can be safely disabled to run calibration. self.bus.disable_torque() self.bus.configure_motors() for name in self.arm_motors: self.bus.write("Operating_Mode", name, OperatingMode.POSITION.value) # Set P_Coefficient to lower value to avoid shakiness (Default is 32) self.bus.write("P_Coefficient", name, 16) # Set I_Coefficient and D_Coefficient to default value 0 and 32 self.bus.write("I_Coefficient", name, 0) self.bus.write("D_Coefficient", name, 32) for name in self.base_motors: self.bus.write("Operating_Mode", name, OperatingMode.VELOCITY.value) self.bus.enable_torque() def setup_motors(self) -> None: for motor in chain(reversed(self.arm_motors), reversed(self.base_motors)): input(f"Connect the controller board to the '{motor}' motor only and press enter.") self.bus.setup_motor(motor) print(f"'{motor}' motor id set to {self.bus.motors[motor].id}") @staticmethod def _degps_to_raw(degps: float) -> int: steps_per_deg = 4096.0 / 360.0 speed_in_steps = degps * steps_per_deg speed_int = int(round(speed_in_steps)) # Cap the value to fit within signed 16-bit range (-32768 to 32767) if speed_int > 0x7FFF: speed_int = 0x7FFF # 32767 -> maximum positive value elif speed_int < -0x8000: speed_int = -0x8000 # -32768 -> minimum negative value return speed_int @staticmethod def _raw_to_degps(raw_speed: int) -> float: steps_per_deg = 4096.0 / 360.0 magnitude = raw_speed degps = magnitude / steps_per_deg return degps def _body_to_wheel_raw( self, x: float, y: float, theta: float, wheel_radius: float = 0.05, base_radius: float = 0.125, max_raw: int = 3000, ) -> dict: """ Convert desired body-frame velocities into wheel raw commands. Parameters: x_cmd : Linear velocity in x (m/s). y_cmd : Linear velocity in y (m/s). theta_cmd : Rotational velocity (deg/s). wheel_radius: Radius of each wheel (meters). base_radius : Distance from the center of rotation to each wheel (meters). max_raw : Maximum allowed raw command (ticks) per wheel. Returns: A dictionary with wheel raw commands: {"base_left_wheel": value, "base_back_wheel": value, "base_right_wheel": value}. Notes: - Internally, the method converts theta_cmd to rad/s for the kinematics. - The raw command is computed from the wheels angular speed in deg/s using _degps_to_raw(). If any command exceeds max_raw, all commands are scaled down proportionally. """ # Convert rotational velocity from deg/s to rad/s. theta_rad = theta * (np.pi / 180.0) # Create the body velocity vector [x, y, theta_rad]. velocity_vector = np.array([x, y, theta_rad]) # Define the wheel mounting angles with a -90° offset. angles = np.radians(np.array([240, 0, 120]) - 90) # Build the kinematic matrix: each row maps body velocities to a wheel’s linear speed. # The third column (base_radius) accounts for the effect of rotation. m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles]) # Compute each wheel’s linear speed (m/s) and then its angular speed (rad/s). wheel_linear_speeds = m.dot(velocity_vector) wheel_angular_speeds = wheel_linear_speeds / wheel_radius # Convert wheel angular speeds from rad/s to deg/s. wheel_degps = wheel_angular_speeds * (180.0 / np.pi) # Scaling steps_per_deg = 4096.0 / 360.0 raw_floats = [abs(degps) * steps_per_deg for degps in wheel_degps] max_raw_computed = max(raw_floats) if max_raw_computed > max_raw: scale = max_raw / max_raw_computed wheel_degps = wheel_degps * scale # Convert each wheel’s angular speed (deg/s) to a raw integer. wheel_raw = [self._degps_to_raw(deg) for deg in wheel_degps] return { "base_left_wheel": wheel_raw[0], "base_back_wheel": wheel_raw[1], "base_right_wheel": wheel_raw[2], } def _wheel_raw_to_body( self, left_wheel_speed, back_wheel_speed, right_wheel_speed, wheel_radius: float = 0.05, base_radius: float = 0.125, ) -> dict[str, Any]: """ Convert wheel raw command feedback back into body-frame velocities. Parameters: wheel_raw : Vector with raw wheel commands ("base_left_wheel", "base_back_wheel", "base_right_wheel"). wheel_radius: Radius of each wheel (meters). base_radius : Distance from the robot center to each wheel (meters). Returns: A dict (x.vel, y.vel, theta.vel) all in m/s """ # Convert each raw command back to an angular speed in deg/s. wheel_degps = np.array( [ self._raw_to_degps(left_wheel_speed), self._raw_to_degps(back_wheel_speed), self._raw_to_degps(right_wheel_speed), ] ) # Convert from deg/s to rad/s. wheel_radps = wheel_degps * (np.pi / 180.0) # Compute each wheel’s linear speed (m/s) from its angular speed. wheel_linear_speeds = wheel_radps * wheel_radius # Define the wheel mounting angles with a -90° offset. angles = np.radians(np.array([240, 0, 120]) - 90) m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles]) # Solve the inverse kinematics: body_velocity = M⁻¹ · wheel_linear_speeds. m_inv = np.linalg.inv(m) velocity_vector = m_inv.dot(wheel_linear_speeds) x, y, theta_rad = velocity_vector theta = theta_rad * (180.0 / np.pi) return { "x.vel": x, "y.vel": y, "theta.vel": theta, } # m/s and deg/s def get_observation(self) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") # Read actuators position for arm and vel for base start = time.perf_counter() arm_pos = self.bus.sync_read("Present_Position", self.arm_motors) base_wheel_vel = self.bus.sync_read("Present_Velocity", self.base_motors) base_vel = self._wheel_raw_to_body( base_wheel_vel["base_left_wheel"], base_wheel_vel["base_back_wheel"], base_wheel_vel["base_right_wheel"], ) arm_state = {f"{k}.pos": v for k, v in arm_pos.items()} obs_dict = {**arm_state, **base_vel} dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read state: {dt_ms:.1f}ms") # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() obs_dict[cam_key] = cam.async_read() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") return obs_dict def send_action(self, action: dict[str, Any]) -> dict[str, Any]: """Command lekiwi to move to a target joint configuration. The relative action magnitude may be clipped depending on the configuration parameter `max_relative_target`. In this case, the action sent differs from original action. Thus, this function always returns the action actually sent. Raises: RobotDeviceNotConnectedError: if robot is not connected. Returns: np.ndarray: the action sent to the motors, potentially clipped. """ if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") arm_goal_pos = {k: v for k, v in action.items() if k.endswith(".pos")} base_goal_vel = {k: v for k, v in action.items() if k.endswith(".vel")} base_wheel_goal_vel = self._body_to_wheel_raw( base_goal_vel["x.vel"], base_goal_vel["y.vel"], base_goal_vel["theta.vel"] ) # Cap goal position when too far away from present position. # /!\ Slower fps expected due to reading from the follower. if self.config.max_relative_target is not None: present_pos = self.bus.sync_read("Present_Position", self.arm_motors) goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in arm_goal_pos.items()} arm_safe_goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target) arm_goal_pos = arm_safe_goal_pos # Send goal position to the actuators arm_goal_pos_raw = {k.replace(".pos", ""): v for k, v in arm_goal_pos.items()} self.bus.sync_write("Goal_Position", arm_goal_pos_raw) self.bus.sync_write("Goal_Velocity", base_wheel_goal_vel) return {**arm_goal_pos, **base_goal_vel} def stop_base(self): self.bus.sync_write("Goal_Velocity", dict.fromkeys(self.base_motors, 0), num_retry=5) logger.info("Base motors stopped") def disconnect(self): if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") self.stop_base() self.bus.disconnect(self.config.disable_torque_on_disconnect) for cam in self.cameras.values(): cam.disconnect() logger.info(f"{self} disconnected.")
lerobot/src/lerobot/robots/lekiwi/lekiwi.py/0
{ "file_path": "lerobot/src/lerobot/robots/lekiwi/lekiwi.py", "repo_id": "lerobot", "token_count": 7410 }
214
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import numpy as np from stretch_body.gamepad_teleop import GamePadTeleop from stretch_body.robot import Robot as StretchAPI from stretch_body.robot_params import RobotParams from lerobot.cameras.utils import make_cameras_from_configs from lerobot.constants import OBS_IMAGES, OBS_STATE from lerobot.datasets.utils import get_nested_item from ..robot import Robot from .configuration_stretch3 import Stretch3RobotConfig # {lerobot_keys: stretch.api.keys} STRETCH_MOTORS = { "head_pan.pos": "head.head_pan.pos", "head_tilt.pos": "head.head_tilt.pos", "lift.pos": "lift.pos", "arm.pos": "arm.pos", "wrist_pitch.pos": "end_of_arm.wrist_pitch.pos", "wrist_roll.pos": "end_of_arm.wrist_roll.pos", "wrist_yaw.pos": "end_of_arm.wrist_yaw.pos", "gripper.pos": "end_of_arm.stretch_gripper.pos", "base_x.vel": "base.x_vel", "base_y.vel": "base.y_vel", "base_theta.vel": "base.theta_vel", } class Stretch3Robot(Robot): """[Stretch 3](https://hello-robot.com/stretch-3-product), by Hello Robot.""" config_class = Stretch3RobotConfig name = "stretch3" def __init__(self, config: Stretch3RobotConfig): raise NotImplementedError super().__init__(config) self.config = config self.robot_type = self.config.type self.api = StretchAPI() self.cameras = make_cameras_from_configs(config.cameras) self.is_connected = False self.logs = {} self.teleop = None # TODO remove # TODO(aliberts): test this RobotParams.set_logging_level("WARNING") RobotParams.set_logging_formatter("brief_console_formatter") self.state_keys = None self.action_keys = None @property def observation_features(self) -> dict: return { "dtype": "float32", "shape": (len(STRETCH_MOTORS),), "names": {"motors": list(STRETCH_MOTORS)}, } @property def action_features(self) -> dict: return self.observation_features @property def camera_features(self) -> dict[str, dict]: cam_ft = {} for cam_key, cam in self.cameras.items(): cam_ft[cam_key] = { "shape": (cam.height, cam.width, cam.channels), "names": ["height", "width", "channels"], "info": None, } return cam_ft def connect(self) -> None: self.is_connected = self.api.startup() if not self.is_connected: print("Another process is already using Stretch. Try running 'stretch_free_robot_process.py'") raise ConnectionError() for cam in self.cameras.values(): cam.connect() self.is_connected = self.is_connected and cam.is_connected if not self.is_connected: print("Could not connect to the cameras, check that all cameras are plugged-in.") raise ConnectionError() self.calibrate() def calibrate(self) -> None: if not self.api.is_homed(): self.api.home() def _get_state(self) -> dict: status = self.api.get_status() return {k: get_nested_item(status, v, sep=".") for k, v in STRETCH_MOTORS.items()} def get_observation(self) -> dict[str, np.ndarray]: obs_dict = {} # Read Stretch state before_read_t = time.perf_counter() state = self._get_state() self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t if self.state_keys is None: self.state_keys = list(state) state = np.asarray(list(state.values())) obs_dict[OBS_STATE] = state # Capture images from cameras for cam_key, cam in self.cameras.items(): before_camread_t = time.perf_counter() obs_dict[f"{OBS_IMAGES}.{cam_key}"] = cam.async_read() self.logs[f"read_camera_{cam_key}_dt_s"] = cam.logs["delta_timestamp_s"] self.logs[f"async_read_camera_{cam_key}_dt_s"] = time.perf_counter() - before_camread_t return obs_dict def send_action(self, action: np.ndarray) -> np.ndarray: if not self.is_connected: raise ConnectionError() if self.teleop is None: self.teleop = GamePadTeleop(robot_instance=False) self.teleop.startup(robot=self) if self.action_keys is None: dummy_action = self.teleop.gamepad_controller.get_state() self.action_keys = list(dummy_action.keys()) action_dict = dict(zip(self.action_keys, action.tolist(), strict=True)) before_write_t = time.perf_counter() self.teleop.do_motion(state=action_dict, robot=self) self.push_command() self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t # TODO(aliberts): return action_sent when motion is limited return action def print_logs(self) -> None: pass # TODO(aliberts): move robot-specific logs logic here def teleop_safety_stop(self) -> None: if self.teleop is not None: self.teleop._safety_stop(robot=self) def disconnect(self) -> None: self.api.stop() if self.teleop is not None: self.teleop.gamepad_controller.stop() self.teleop.stop() for cam in self.cameras.values(): cam.disconnect() self.is_connected = False
lerobot/src/lerobot/robots/stretch3/robot_stretch3.py/0
{ "file_path": "lerobot/src/lerobot/robots/stretch3/robot_stretch3.py", "repo_id": "lerobot", "token_count": 2634 }
215
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Client side: The environment evolves with a time resolution equal to 1/fps""" DEFAULT_FPS = 30 """Server side: Running inference on (at most) 1/fps""" DEFAULT_INFERENCE_LATENCY = 1 / DEFAULT_FPS """Server side: Timeout for observation queue in seconds""" DEFAULT_OBS_QUEUE_TIMEOUT = 2 # All action chunking policies SUPPORTED_POLICIES = ["act", "smolvla", "diffusion", "pi0", "tdmpc", "vqbet"] # TODO: Add all other robots SUPPORTED_ROBOTS = ["so100_follower", "so101_follower"]
lerobot/src/lerobot/scripts/server/constants.py/0
{ "file_path": "lerobot/src/lerobot/scripts/server/constants.py", "repo_id": "lerobot", "token_count": 328 }
216
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import time from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from lerobot.motors import Motor, MotorCalibration, MotorNormMode from lerobot.motors.feetech import ( FeetechMotorsBus, OperatingMode, ) from ..teleoperator import Teleoperator from .config_so100_leader import SO100LeaderConfig logger = logging.getLogger(__name__) class SO100Leader(Teleoperator): """ [SO-100 Leader Arm](https://github.com/TheRobotStudio/SO-ARM100) designed by TheRobotStudio """ config_class = SO100LeaderConfig name = "so100_leader" def __init__(self, config: SO100LeaderConfig): super().__init__(config) self.config = config self.bus = FeetechMotorsBus( port=self.config.port, motors={ "shoulder_pan": Motor(1, "sts3215", MotorNormMode.RANGE_M100_100), "shoulder_lift": Motor(2, "sts3215", MotorNormMode.RANGE_M100_100), "elbow_flex": Motor(3, "sts3215", MotorNormMode.RANGE_M100_100), "wrist_flex": Motor(4, "sts3215", MotorNormMode.RANGE_M100_100), "wrist_roll": Motor(5, "sts3215", MotorNormMode.RANGE_M100_100), "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100), }, calibration=self.calibration, ) @property def action_features(self) -> dict[str, type]: return {f"{motor}.pos": float for motor in self.bus.motors} @property def feedback_features(self) -> dict[str, type]: return {} @property def is_connected(self) -> bool: return self.bus.is_connected def connect(self, calibrate: bool = True) -> None: if self.is_connected: raise DeviceAlreadyConnectedError(f"{self} already connected") self.bus.connect() if not self.is_calibrated and calibrate: logger.info( "Mismatch between calibration values in the motor and the calibration file or no calibration file found" ) self.calibrate() self.configure() logger.info(f"{self} connected.") @property def is_calibrated(self) -> bool: return self.bus.is_calibrated def calibrate(self) -> None: if self.calibration: # Calibration file exists, ask user whether to use it or run new calibration user_input = input( f"Press ENTER to use provided calibration file associated with the id {self.id}, or type 'c' and press ENTER to run calibration: " ) if user_input.strip().lower() != "c": logger.info(f"Writing calibration file associated with the id {self.id} to the motors") self.bus.write_calibration(self.calibration) return logger.info(f"\nRunning calibration of {self}") self.bus.disable_torque() for motor in self.bus.motors: self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) input(f"Move {self} to the middle of its range of motion and press ENTER....") homing_offsets = self.bus.set_half_turn_homings() full_turn_motor = "wrist_roll" unknown_range_motors = [motor for motor in self.bus.motors if motor != full_turn_motor] print( f"Move all joints except '{full_turn_motor}' sequentially through their " "entire ranges of motion.\nRecording positions. Press ENTER to stop..." ) range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors) range_mins[full_turn_motor] = 0 range_maxes[full_turn_motor] = 4095 self.calibration = {} for motor, m in self.bus.motors.items(): self.calibration[motor] = MotorCalibration( id=m.id, drive_mode=0, homing_offset=homing_offsets[motor], range_min=range_mins[motor], range_max=range_maxes[motor], ) self.bus.write_calibration(self.calibration) self._save_calibration() print(f"Calibration saved to {self.calibration_fpath}") def configure(self) -> None: self.bus.disable_torque() self.bus.configure_motors() for motor in self.bus.motors: self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) def setup_motors(self) -> None: for motor in reversed(self.bus.motors): input(f"Connect the controller board to the '{motor}' motor only and press enter.") self.bus.setup_motor(motor) print(f"'{motor}' motor id set to {self.bus.motors[motor].id}") def get_action(self) -> dict[str, float]: start = time.perf_counter() action = self.bus.sync_read("Present_Position") action = {f"{motor}.pos": val for motor, val in action.items()} dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read action: {dt_ms:.1f}ms") return action def send_feedback(self, feedback: dict[str, float]) -> None: # TODO(rcadene, aliberts): Implement force feedback raise NotImplementedError def disconnect(self) -> None: if not self.is_connected: DeviceNotConnectedError(f"{self} is not connected.") self.bus.disconnect() logger.info(f"{self} disconnected.")
lerobot/src/lerobot/teleoperators/so100_leader/so100_leader.py/0
{ "file_path": "lerobot/src/lerobot/teleoperators/so100_leader/so100_leader.py", "repo_id": "lerobot", "token_count": 2559 }
217
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TypedDict import torch class Transition(TypedDict): state: dict[str, torch.Tensor] action: torch.Tensor reward: float next_state: dict[str, torch.Tensor] done: bool truncated: bool complementary_info: dict[str, torch.Tensor | float | int] | None = None def move_transition_to_device(transition: Transition, device: str = "cpu") -> Transition: device = torch.device(device) non_blocking = device.type == "cuda" # Move state tensors to device transition["state"] = { key: val.to(device, non_blocking=non_blocking) for key, val in transition["state"].items() } # Move action to device transition["action"] = transition["action"].to(device, non_blocking=non_blocking) # Move reward and done if they are tensors if isinstance(transition["reward"], torch.Tensor): transition["reward"] = transition["reward"].to(device, non_blocking=non_blocking) if isinstance(transition["done"], torch.Tensor): transition["done"] = transition["done"].to(device, non_blocking=non_blocking) if isinstance(transition["truncated"], torch.Tensor): transition["truncated"] = transition["truncated"].to(device, non_blocking=non_blocking) # Move next_state tensors to device transition["next_state"] = { key: val.to(device, non_blocking=non_blocking) for key, val in transition["next_state"].items() } # Move complementary_info tensors if present if transition.get("complementary_info") is not None: for key, val in transition["complementary_info"].items(): if isinstance(val, torch.Tensor): transition["complementary_info"][key] = val.to(device, non_blocking=non_blocking) elif isinstance(val, (int, float, bool)): transition["complementary_info"][key] = torch.tensor(val, device=device) else: raise ValueError(f"Unsupported type {type(val)} for complementary_info[{key}]") return transition def move_state_dict_to_device(state_dict, device="cpu"): """ Recursively move all tensors in a (potentially) nested dict/list/tuple structure to the CPU. """ if isinstance(state_dict, torch.Tensor): return state_dict.to(device) elif isinstance(state_dict, dict): return {k: move_state_dict_to_device(v, device=device) for k, v in state_dict.items()} elif isinstance(state_dict, list): return [move_state_dict_to_device(v, device=device) for v in state_dict] elif isinstance(state_dict, tuple): return tuple(move_state_dict_to_device(v, device=device) for v in state_dict) else: return state_dict
lerobot/src/lerobot/utils/transition.py/0
{ "file_path": "lerobot/src/lerobot/utils/transition.py", "repo_id": "lerobot", "token_count": 1183 }
218
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import torch from packaging import version from safetensors.torch import load_file from torchvision.transforms import v2 from torchvision.transforms.v2 import functional as F # noqa: N812 from lerobot.datasets.transforms import ( ImageTransformConfig, ImageTransforms, ImageTransformsConfig, RandomSubsetApply, SharpnessJitter, make_transform_from_config, ) from lerobot.scripts.visualize_image_transforms import ( save_all_transforms, save_each_transform, ) from lerobot.utils.random_utils import seeded_context from tests.artifacts.image_transforms.save_image_transforms_to_safetensors import ARTIFACT_DIR from tests.utils import require_x86_64_kernel @pytest.fixture def color_jitters(): return [ v2.ColorJitter(brightness=0.5), v2.ColorJitter(contrast=0.5), v2.ColorJitter(saturation=0.5), ] @pytest.fixture def single_transforms(): return load_file(ARTIFACT_DIR / "single_transforms.safetensors") @pytest.fixture def img_tensor(single_transforms): return single_transforms["original_frame"] @pytest.fixture def default_transforms(): return load_file(ARTIFACT_DIR / "default_transforms.safetensors") def test_get_image_transforms_no_transform_enable_false(img_tensor_factory): img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig() # default is enable=False tf_actual = ImageTransforms(tf_cfg) torch.testing.assert_close(tf_actual(img_tensor), img_tensor) def test_get_image_transforms_no_transform_max_num_transforms_0(img_tensor_factory): img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig(enable=True, max_num_transforms=0) tf_actual = ImageTransforms(tf_cfg) torch.testing.assert_close(tf_actual(img_tensor), img_tensor) @pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)]) def test_get_image_transforms_brightness(img_tensor_factory, min_max): img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig( enable=True, tfs={"brightness": ImageTransformConfig(type="ColorJitter", kwargs={"brightness": min_max})}, ) tf_actual = ImageTransforms(tf_cfg) tf_expected = v2.ColorJitter(brightness=min_max) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) @pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)]) def test_get_image_transforms_contrast(img_tensor_factory, min_max): img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig( enable=True, tfs={"contrast": ImageTransformConfig(type="ColorJitter", kwargs={"contrast": min_max})} ) tf_actual = ImageTransforms(tf_cfg) tf_expected = v2.ColorJitter(contrast=min_max) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) @pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)]) def test_get_image_transforms_saturation(img_tensor_factory, min_max): img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig( enable=True, tfs={"saturation": ImageTransformConfig(type="ColorJitter", kwargs={"saturation": min_max})}, ) tf_actual = ImageTransforms(tf_cfg) tf_expected = v2.ColorJitter(saturation=min_max) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) @pytest.mark.parametrize("min_max", [(-0.25, -0.25), (0.25, 0.25)]) def test_get_image_transforms_hue(img_tensor_factory, min_max): img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig( enable=True, tfs={"hue": ImageTransformConfig(type="ColorJitter", kwargs={"hue": min_max})} ) tf_actual = ImageTransforms(tf_cfg) tf_expected = v2.ColorJitter(hue=min_max) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) @pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)]) def test_get_image_transforms_sharpness(img_tensor_factory, min_max): img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig( enable=True, tfs={"sharpness": ImageTransformConfig(type="SharpnessJitter", kwargs={"sharpness": min_max})}, ) tf_actual = ImageTransforms(tf_cfg) tf_expected = SharpnessJitter(sharpness=min_max) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) def test_get_image_transforms_max_num_transforms(img_tensor_factory): img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig( enable=True, max_num_transforms=5, tfs={ "brightness": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"brightness": (0.5, 0.5)}, ), "contrast": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"contrast": (0.5, 0.5)}, ), "saturation": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"saturation": (0.5, 0.5)}, ), "hue": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"hue": (0.5, 0.5)}, ), "sharpness": ImageTransformConfig( weight=1.0, type="SharpnessJitter", kwargs={"sharpness": (0.5, 0.5)}, ), }, ) tf_actual = ImageTransforms(tf_cfg) tf_expected = v2.Compose( [ v2.ColorJitter(brightness=(0.5, 0.5)), v2.ColorJitter(contrast=(0.5, 0.5)), v2.ColorJitter(saturation=(0.5, 0.5)), v2.ColorJitter(hue=(0.5, 0.5)), SharpnessJitter(sharpness=(0.5, 0.5)), ] ) torch.testing.assert_close(tf_actual(img_tensor), tf_expected(img_tensor)) @require_x86_64_kernel def test_get_image_transforms_random_order(img_tensor_factory): out_imgs = [] img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig( enable=True, random_order=True, tfs={ "brightness": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"brightness": (0.5, 0.5)}, ), "contrast": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"contrast": (0.5, 0.5)}, ), "saturation": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"saturation": (0.5, 0.5)}, ), "hue": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"hue": (0.5, 0.5)}, ), "sharpness": ImageTransformConfig( weight=1.0, type="SharpnessJitter", kwargs={"sharpness": (0.5, 0.5)}, ), }, ) tf = ImageTransforms(tf_cfg) with seeded_context(1338): for _ in range(10): out_imgs.append(tf(img_tensor)) tmp_img_tensor = img_tensor for sub_tf in tf.tf.selected_transforms: tmp_img_tensor = sub_tf(tmp_img_tensor) torch.testing.assert_close(tmp_img_tensor, out_imgs[-1]) for i in range(1, len(out_imgs)): with pytest.raises(AssertionError): torch.testing.assert_close(out_imgs[0], out_imgs[i]) @pytest.mark.parametrize( "tf_type, tf_name, min_max_values", [ ("ColorJitter", "brightness", [(0.5, 0.5), (2.0, 2.0)]), ("ColorJitter", "contrast", [(0.5, 0.5), (2.0, 2.0)]), ("ColorJitter", "saturation", [(0.5, 0.5), (2.0, 2.0)]), ("ColorJitter", "hue", [(-0.25, -0.25), (0.25, 0.25)]), ("SharpnessJitter", "sharpness", [(0.5, 0.5), (2.0, 2.0)]), ], ) def test_backward_compatibility_single_transforms( img_tensor, tf_type, tf_name, min_max_values, single_transforms ): for min_max in min_max_values: tf_cfg = ImageTransformConfig(type=tf_type, kwargs={tf_name: min_max}) tf = make_transform_from_config(tf_cfg) actual = tf(img_tensor) key = f"{tf_name}_{min_max[0]}_{min_max[1]}" expected = single_transforms[key] torch.testing.assert_close(actual, expected) @require_x86_64_kernel @pytest.mark.skipif( version.parse(torch.__version__) < version.parse("2.7.0"), reason="Test artifacts were generated with PyTorch >= 2.7.0 which has different multinomial behavior", ) def test_backward_compatibility_default_config(img_tensor, default_transforms): # NOTE: PyTorch versions have different randomness, it might break this test. # See this PR: https://github.com/huggingface/lerobot/pull/1127. cfg = ImageTransformsConfig(enable=True) default_tf = ImageTransforms(cfg) with seeded_context(1337): actual = default_tf(img_tensor) expected = default_transforms["default"] torch.testing.assert_close(actual, expected) @pytest.mark.parametrize("p", [[0, 1], [1, 0]]) def test_random_subset_apply_single_choice(img_tensor_factory, p): img_tensor = img_tensor_factory() flips = [v2.RandomHorizontalFlip(p=1), v2.RandomVerticalFlip(p=1)] random_choice = RandomSubsetApply(flips, p=p, n_subset=1, random_order=False) actual = random_choice(img_tensor) p_horz, _ = p if p_horz: torch.testing.assert_close(actual, F.horizontal_flip(img_tensor)) else: torch.testing.assert_close(actual, F.vertical_flip(img_tensor)) def test_random_subset_apply_random_order(img_tensor_factory): img_tensor = img_tensor_factory() flips = [v2.RandomHorizontalFlip(p=1), v2.RandomVerticalFlip(p=1)] random_order = RandomSubsetApply(flips, p=[0.5, 0.5], n_subset=2, random_order=True) # We can't really check whether the transforms are actually applied in random order. However, # horizontal and vertical flip are commutative. Meaning, even under the assumption that the transform # applies them in random order, we can use a fixed order to compute the expected value. actual = random_order(img_tensor) expected = v2.Compose(flips)(img_tensor) torch.testing.assert_close(actual, expected) def test_random_subset_apply_valid_transforms(img_tensor_factory, color_jitters): img_tensor = img_tensor_factory() transform = RandomSubsetApply(color_jitters) output = transform(img_tensor) assert output.shape == img_tensor.shape def test_random_subset_apply_probability_length_mismatch(color_jitters): with pytest.raises(ValueError): RandomSubsetApply(color_jitters, p=[0.5, 0.5]) @pytest.mark.parametrize("n_subset", [0, 5]) def test_random_subset_apply_invalid_n_subset(color_jitters, n_subset): with pytest.raises(ValueError): RandomSubsetApply(color_jitters, n_subset=n_subset) def test_sharpness_jitter_valid_range_tuple(img_tensor_factory): img_tensor = img_tensor_factory() tf = SharpnessJitter((0.1, 2.0)) output = tf(img_tensor) assert output.shape == img_tensor.shape def test_sharpness_jitter_valid_range_float(img_tensor_factory): img_tensor = img_tensor_factory() tf = SharpnessJitter(0.5) output = tf(img_tensor) assert output.shape == img_tensor.shape def test_sharpness_jitter_invalid_range_min_negative(): with pytest.raises(ValueError): SharpnessJitter((-0.1, 2.0)) def test_sharpness_jitter_invalid_range_max_smaller(): with pytest.raises(ValueError): SharpnessJitter((2.0, 0.1)) def test_save_all_transforms(img_tensor_factory, tmp_path): img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig(enable=True) n_examples = 3 save_all_transforms(tf_cfg, img_tensor, tmp_path, n_examples) # Check if the combined transforms directory exists and contains the right files combined_transforms_dir = tmp_path / "all" assert combined_transforms_dir.exists(), "Combined transforms directory was not created." assert any(combined_transforms_dir.iterdir()), ( "No transformed images found in combined transforms directory." ) for i in range(1, n_examples + 1): assert (combined_transforms_dir / f"{i}.png").exists(), ( f"Combined transform image {i}.png was not found." ) def test_save_each_transform(img_tensor_factory, tmp_path): img_tensor = img_tensor_factory() tf_cfg = ImageTransformsConfig(enable=True) n_examples = 3 save_each_transform(tf_cfg, img_tensor, tmp_path, n_examples) # Check if the transformed images exist for each transform type transforms = ["brightness", "contrast", "saturation", "hue", "sharpness"] for transform in transforms: transform_dir = tmp_path / transform assert transform_dir.exists(), f"{transform} directory was not created." assert any(transform_dir.iterdir()), f"No transformed images found in {transform} directory." # Check for specific files within each transform directory expected_files = [f"{i}.png" for i in range(1, n_examples + 1)] + ["min.png", "max.png", "mean.png"] for file_name in expected_files: assert (transform_dir / file_name).exists(), ( f"{file_name} was not found in {transform} directory." )
lerobot/tests/datasets/test_image_transforms.py/0
{ "file_path": "lerobot/tests/datasets/test_image_transforms.py", "repo_id": "lerobot", "token_count": 6067 }
219
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random from dataclasses import dataclass, field from functools import cached_property from typing import Any from lerobot.cameras import CameraConfig, make_cameras_from_configs from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from lerobot.robots import Robot, RobotConfig @RobotConfig.register_subclass("mock_robot") @dataclass class MockRobotConfig(RobotConfig): n_motors: int = 3 cameras: dict[str, CameraConfig] = field(default_factory=dict) random_values: bool = True static_values: list[float] | None = None calibrated: bool = True def __post_init__(self): if self.n_motors < 1: raise ValueError(self.n_motors) if self.random_values and self.static_values is not None: raise ValueError("Choose either random values or static values") if self.static_values is not None and len(self.static_values) != self.n_motors: raise ValueError("Specify the same number of static values as motors") if len(self.cameras) > 0: raise NotImplementedError # TODO with the cameras refactor class MockRobot(Robot): """Mock Robot to be used for testing.""" config_class = MockRobotConfig name = "mock_robot" def __init__(self, config: MockRobotConfig): super().__init__(config) self.config = config self._is_connected = False self._is_calibrated = config.calibrated self.motors = [f"motor_{i + 1}" for i in range(config.n_motors)] self.cameras = make_cameras_from_configs(config.cameras) @property def _motors_ft(self) -> dict[str, type]: return {f"{motor}.pos": float for motor in self.motors} @property def _cameras_ft(self) -> dict[str, tuple]: return { cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras } @cached_property def observation_features(self) -> dict[str, type | tuple]: return {**self._motors_ft, **self._cameras_ft} @cached_property def action_features(self) -> dict[str, type]: return self._motors_ft @property def is_connected(self) -> bool: return self._is_connected def connect(self, calibrate: bool = True) -> None: if self.is_connected: raise DeviceAlreadyConnectedError(f"{self} already connected") self._is_connected = True if calibrate: self.calibrate() @property def is_calibrated(self) -> bool: return self._is_calibrated def calibrate(self) -> None: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") self._is_calibrated = True def configure(self) -> None: pass def get_observation(self) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") if self.config.random_values: return {f"{motor}.pos": random.uniform(-100, 100) for motor in self.motors} else: return { f"{motor}.pos": val for motor, val in zip(self.motors, self.config.static_values, strict=True) } def send_action(self, action: dict[str, Any]) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") return action def disconnect(self) -> None: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") self._is_connected = False
lerobot/tests/mocks/mock_robot.py/0
{ "file_path": "lerobot/tests/mocks/mock_robot.py", "repo_id": "lerobot", "token_count": 1646 }
220
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile from pathlib import Path import numpy as np import torch from lerobot.configs.types import FeatureType from lerobot.processor import ProcessorStepRegistry, RenameProcessor, RobotProcessor, TransitionKey from tests.conftest import assert_contract_is_typed def create_transition( observation=None, action=None, reward=None, done=None, truncated=None, info=None, complementary_data=None ): """Helper to create an EnvTransition dictionary.""" return { TransitionKey.OBSERVATION: observation, TransitionKey.ACTION: action, TransitionKey.REWARD: reward, TransitionKey.DONE: done, TransitionKey.TRUNCATED: truncated, TransitionKey.INFO: info, TransitionKey.COMPLEMENTARY_DATA: complementary_data, } def test_basic_renaming(): """Test basic key renaming functionality.""" rename_map = { "old_key1": "new_key1", "old_key2": "new_key2", } processor = RenameProcessor(rename_map=rename_map) observation = { "old_key1": torch.tensor([1.0, 2.0]), "old_key2": np.array([3.0, 4.0]), "unchanged_key": "keep_me", } transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check renamed keys assert "new_key1" in processed_obs assert "new_key2" in processed_obs assert "old_key1" not in processed_obs assert "old_key2" not in processed_obs # Check values are preserved torch.testing.assert_close(processed_obs["new_key1"], torch.tensor([1.0, 2.0])) np.testing.assert_array_equal(processed_obs["new_key2"], np.array([3.0, 4.0])) # Check unchanged key is preserved assert processed_obs["unchanged_key"] == "keep_me" def test_empty_rename_map(): """Test processor with empty rename map (should pass through unchanged).""" processor = RenameProcessor(rename_map={}) observation = { "key1": torch.tensor([1.0]), "key2": "value2", } transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # All keys should be unchanged assert processed_obs.keys() == observation.keys() torch.testing.assert_close(processed_obs["key1"], observation["key1"]) assert processed_obs["key2"] == observation["key2"] def test_none_observation(): """Test processor with None observation.""" processor = RenameProcessor(rename_map={"old": "new"}) transition = create_transition() result = processor(transition) # Should return transition unchanged assert result == transition def test_overlapping_rename(): """Test renaming when new names might conflict.""" rename_map = { "a": "b", "b": "c", # This creates a potential conflict } processor = RenameProcessor(rename_map=rename_map) observation = { "a": 1, "b": 2, "x": 3, } transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check that renaming happens correctly assert "a" not in processed_obs assert processed_obs["b"] == 1 # 'a' renamed to 'b' assert processed_obs["c"] == 2 # original 'b' renamed to 'c' assert processed_obs["x"] == 3 def test_partial_rename(): """Test renaming only some keys.""" rename_map = { "observation.state": "observation.proprio_state", "pixels": "observation.image", } processor = RenameProcessor(rename_map=rename_map) observation = { "observation.state": torch.randn(10), "pixels": np.random.randint(0, 256, (64, 64, 3), dtype=np.uint8), "reward": 1.0, "info": {"episode": 1}, } transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check renamed keys assert "observation.proprio_state" in processed_obs assert "observation.image" in processed_obs assert "observation.state" not in processed_obs assert "pixels" not in processed_obs # Check unchanged keys assert processed_obs["reward"] == 1.0 assert processed_obs["info"] == {"episode": 1} def test_get_config(): """Test configuration serialization.""" rename_map = { "old1": "new1", "old2": "new2", } processor = RenameProcessor(rename_map=rename_map) config = processor.get_config() assert config == {"rename_map": rename_map} def test_state_dict(): """Test state dict (should be empty for RenameProcessor).""" processor = RenameProcessor(rename_map={"old": "new"}) state = processor.state_dict() assert state == {} # Load state dict should work even with empty dict processor.load_state_dict({}) def test_integration_with_robot_processor(): """Test integration with RobotProcessor pipeline.""" rename_map = { "agent_pos": "observation.state", "pixels": "observation.image", } rename_processor = RenameProcessor(rename_map=rename_map) pipeline = RobotProcessor([rename_processor]) observation = { "agent_pos": np.array([1.0, 2.0, 3.0]), "pixels": np.zeros((32, 32, 3), dtype=np.uint8), "other_data": "preserve_me", } transition = create_transition( observation=observation, reward=0.5, done=False, truncated=False, info={}, complementary_data={} ) result = pipeline(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check renaming worked through pipeline assert "observation.state" in processed_obs assert "observation.image" in processed_obs assert "agent_pos" not in processed_obs assert "pixels" not in processed_obs assert processed_obs["other_data"] == "preserve_me" # Check other transition elements unchanged assert result[TransitionKey.REWARD] == 0.5 assert result[TransitionKey.DONE] is False def test_save_and_load_pretrained(): """Test saving and loading processor with RobotProcessor.""" rename_map = { "old_state": "observation.state", "old_image": "observation.image", } processor = RenameProcessor(rename_map=rename_map) pipeline = RobotProcessor([processor], name="TestRenameProcessor") with tempfile.TemporaryDirectory() as tmp_dir: # Save pipeline pipeline.save_pretrained(tmp_dir) # Check files were created config_path = Path(tmp_dir) / "testrenameprocessor.json" # Based on name="TestRenameProcessor" assert config_path.exists() # No state files should be created for RenameProcessor state_files = list(Path(tmp_dir).glob("*.safetensors")) assert len(state_files) == 0 # Load pipeline loaded_pipeline = RobotProcessor.from_pretrained(tmp_dir) assert loaded_pipeline.name == "TestRenameProcessor" assert len(loaded_pipeline) == 1 # Check that loaded processor works correctly loaded_processor = loaded_pipeline.steps[0] assert isinstance(loaded_processor, RenameProcessor) assert loaded_processor.rename_map == rename_map # Test functionality after loading observation = {"old_state": [1, 2, 3], "old_image": "image_data"} transition = create_transition(observation=observation) result = loaded_pipeline(transition) processed_obs = result[TransitionKey.OBSERVATION] assert "observation.state" in processed_obs assert "observation.image" in processed_obs assert processed_obs["observation.state"] == [1, 2, 3] assert processed_obs["observation.image"] == "image_data" def test_registry_functionality(): """Test that RenameProcessor is properly registered.""" # Check that it's registered assert "rename_processor" in ProcessorStepRegistry.list() # Get from registry retrieved_class = ProcessorStepRegistry.get("rename_processor") assert retrieved_class is RenameProcessor # Create instance from registry instance = retrieved_class(rename_map={"old": "new"}) assert isinstance(instance, RenameProcessor) assert instance.rename_map == {"old": "new"} def test_registry_based_save_load(): """Test save/load using registry name instead of module path.""" processor = RenameProcessor(rename_map={"key1": "renamed_key1"}) pipeline = RobotProcessor([processor]) with tempfile.TemporaryDirectory() as tmp_dir: # Save and load pipeline.save_pretrained(tmp_dir) # Verify config uses registry name import json with open(Path(tmp_dir) / "robotprocessor.json") as f: # Default name is "RobotProcessor" config = json.load(f) assert "registry_name" in config["steps"][0] assert config["steps"][0]["registry_name"] == "rename_processor" assert "class" not in config["steps"][0] # Should use registry, not module path # Load should work loaded_pipeline = RobotProcessor.from_pretrained(tmp_dir) loaded_processor = loaded_pipeline.steps[0] assert isinstance(loaded_processor, RenameProcessor) assert loaded_processor.rename_map == {"key1": "renamed_key1"} def test_chained_rename_processors(): """Test multiple RenameProcessors in a pipeline.""" # First processor: rename raw keys to intermediate format processor1 = RenameProcessor( rename_map={ "pos": "agent_position", "img": "camera_image", } ) # Second processor: rename to final format processor2 = RenameProcessor( rename_map={ "agent_position": "observation.state", "camera_image": "observation.image", } ) pipeline = RobotProcessor([processor1, processor2]) observation = { "pos": np.array([1.0, 2.0]), "img": "image_data", "extra": "keep_me", } transition = create_transition(observation=observation) # Step through to see intermediate results results = list(pipeline.step_through(transition)) # After first processor assert "agent_position" in results[1][TransitionKey.OBSERVATION] assert "camera_image" in results[1][TransitionKey.OBSERVATION] # After second processor final_obs = results[2][TransitionKey.OBSERVATION] assert "observation.state" in final_obs assert "observation.image" in final_obs assert final_obs["extra"] == "keep_me" # Original keys should be gone assert "pos" not in final_obs assert "img" not in final_obs assert "agent_position" not in final_obs assert "camera_image" not in final_obs def test_nested_observation_rename(): """Test renaming with nested observation structures.""" rename_map = { "observation.images.left": "observation.camera.left_view", "observation.images.right": "observation.camera.right_view", "observation.proprio": "observation.proprioception", } processor = RenameProcessor(rename_map=rename_map) observation = { "observation.images.left": torch.randn(3, 64, 64), "observation.images.right": torch.randn(3, 64, 64), "observation.proprio": torch.randn(7), "observation.gripper": torch.tensor([0.0]), # Not renamed } transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check renames assert "observation.camera.left_view" in processed_obs assert "observation.camera.right_view" in processed_obs assert "observation.proprioception" in processed_obs # Check unchanged key assert "observation.gripper" in processed_obs # Check old keys removed assert "observation.images.left" not in processed_obs assert "observation.images.right" not in processed_obs assert "observation.proprio" not in processed_obs def test_value_types_preserved(): """Test that various value types are preserved during renaming.""" rename_map = {"old_tensor": "new_tensor", "old_array": "new_array", "old_scalar": "new_scalar"} processor = RenameProcessor(rename_map=rename_map) tensor_value = torch.randn(3, 3) array_value = np.random.rand(2, 2) observation = { "old_tensor": tensor_value, "old_array": array_value, "old_scalar": 42, "old_string": "hello", "old_dict": {"nested": "value"}, "old_list": [1, 2, 3], } transition = create_transition(observation=observation) result = processor(transition) processed_obs = result[TransitionKey.OBSERVATION] # Check that values and types are preserved assert torch.equal(processed_obs["new_tensor"], tensor_value) assert np.array_equal(processed_obs["new_array"], array_value) assert processed_obs["new_scalar"] == 42 assert processed_obs["old_string"] == "hello" assert processed_obs["old_dict"] == {"nested": "value"} assert processed_obs["old_list"] == [1, 2, 3] def test_feature_contract_basic_renaming(policy_feature_factory): processor = RenameProcessor(rename_map={"a": "x", "b": "y"}) features = { "a": policy_feature_factory(FeatureType.STATE, (2,)), "b": policy_feature_factory(FeatureType.ACTION, (3,)), "c": policy_feature_factory(FeatureType.ENV, (1,)), } out = processor.feature_contract(features.copy()) # Values preserved and typed assert out["x"] == features["a"] assert out["y"] == features["b"] assert out["c"] == features["c"] assert_contract_is_typed(out) # Input not mutated assert set(features) == {"a", "b", "c"} def test_feature_contract_overlapping_keys(policy_feature_factory): # Overlapping renames: both 'a' and 'b' exist. 'a'->'b', 'b'->'c' processor = RenameProcessor(rename_map={"a": "b", "b": "c"}) features = { "a": policy_feature_factory(FeatureType.STATE, (1,)), "b": policy_feature_factory(FeatureType.STATE, (2,)), } out = processor.feature_contract(features) assert set(out) == {"b", "c"} assert out["b"] == features["a"] # 'a' renamed to'b' assert out["c"] == features["b"] # 'b' renamed to 'c' assert_contract_is_typed(out) def test_feature_contract_chained_processors(policy_feature_factory): # Chain two rename processors at the contract level processor1 = RenameProcessor(rename_map={"pos": "agent_position", "img": "camera_image"}) processor2 = RenameProcessor( rename_map={"agent_position": "observation.state", "camera_image": "observation.image"} ) pipeline = RobotProcessor([processor1, processor2]) spec = { "pos": policy_feature_factory(FeatureType.STATE, (7,)), "img": policy_feature_factory(FeatureType.VISUAL, (3, 64, 64)), "extra": policy_feature_factory(FeatureType.ENV, (1,)), } out = pipeline.feature_contract(initial_features=spec) assert set(out) == {"observation.state", "observation.image", "extra"} assert out["observation.state"] == spec["pos"] assert out["observation.image"] == spec["img"] assert out["extra"] == spec["extra"] assert_contract_is_typed(out)
lerobot/tests/processor/test_rename_processor.py/0
{ "file_path": "lerobot/tests/processor/test_rename_processor.py", "repo_id": "lerobot", "token_count": 5991 }
221
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from unittest.mock import Mock, patch from lerobot.constants import ( CHECKPOINTS_DIR, LAST_CHECKPOINT_LINK, OPTIMIZER_PARAM_GROUPS, OPTIMIZER_STATE, RNG_STATE, SCHEDULER_STATE, TRAINING_STATE_DIR, TRAINING_STEP, ) from lerobot.utils.train_utils import ( get_step_checkpoint_dir, get_step_identifier, load_training_state, load_training_step, save_checkpoint, save_training_state, save_training_step, update_last_checkpoint, ) def test_get_step_identifier(): assert get_step_identifier(5, 1000) == "000005" assert get_step_identifier(123, 100_000) == "000123" assert get_step_identifier(456789, 1_000_000) == "0456789" def test_get_step_checkpoint_dir(): output_dir = Path("/checkpoints") step_dir = get_step_checkpoint_dir(output_dir, 1000, 5) assert step_dir == output_dir / CHECKPOINTS_DIR / "000005" def test_save_load_training_step(tmp_path): save_training_step(5000, tmp_path) assert (tmp_path / TRAINING_STEP).is_file() def test_load_training_step(tmp_path): step = 5000 save_training_step(step, tmp_path) loaded_step = load_training_step(tmp_path) assert loaded_step == step def test_update_last_checkpoint(tmp_path): checkpoint = tmp_path / "0005" checkpoint.mkdir() update_last_checkpoint(checkpoint) last_checkpoint = tmp_path / LAST_CHECKPOINT_LINK assert last_checkpoint.is_symlink() assert last_checkpoint.resolve() == checkpoint @patch("lerobot.utils.train_utils.save_training_state") def test_save_checkpoint(mock_save_training_state, tmp_path, optimizer): policy = Mock() cfg = Mock() save_checkpoint(tmp_path, 10, cfg, policy, optimizer) policy.save_pretrained.assert_called_once() cfg.save_pretrained.assert_called_once() mock_save_training_state.assert_called_once() def test_save_training_state(tmp_path, optimizer, scheduler): save_training_state(tmp_path, 10, optimizer, scheduler) assert (tmp_path / TRAINING_STATE_DIR).is_dir() assert (tmp_path / TRAINING_STATE_DIR / TRAINING_STEP).is_file() assert (tmp_path / TRAINING_STATE_DIR / RNG_STATE).is_file() assert (tmp_path / TRAINING_STATE_DIR / OPTIMIZER_STATE).is_file() assert (tmp_path / TRAINING_STATE_DIR / OPTIMIZER_PARAM_GROUPS).is_file() assert (tmp_path / TRAINING_STATE_DIR / SCHEDULER_STATE).is_file() def test_save_load_training_state(tmp_path, optimizer, scheduler): save_training_state(tmp_path, 10, optimizer, scheduler) loaded_step, loaded_optimizer, loaded_scheduler = load_training_state(tmp_path, optimizer, scheduler) assert loaded_step == 10 assert loaded_optimizer is optimizer assert loaded_scheduler is scheduler
lerobot/tests/utils/test_train_utils.py/0
{ "file_path": "lerobot/tests/utils/test_train_utils.py", "repo_id": "lerobot", "token_count": 1236 }
222
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
open-r1/LICENSE/0
{ "file_path": "open-r1/LICENSE", "repo_id": "open-r1", "token_count": 3168 }
223
[isort] default_section = FIRSTPARTY ensure_newline_before_comments = True force_grid_wrap = 0 include_trailing_comma = True known_first_party = open_r1 known_third_party = transformers datasets fugashi git h5py matplotlib nltk numpy packaging pandas psutil pytest rouge_score sacrebleu seqeval sklearn streamlit torch tqdm line_length = 119 lines_after_imports = 2 multi_line_output = 3 use_parentheses = True [flake8] ignore = E203, E501, E741, W503, W605 max-line-length = 119 per-file-ignores = # imported but unused __init__.py: F401 [tool:pytest] doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
open-r1/setup.cfg/0
{ "file_path": "open-r1/setup.cfg", "repo_id": "open-r1", "token_count": 300 }
224
# coding=utf-8 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Literal, Optional import trl @dataclass class DatasetConfig: """Configuration for a dataset in a mixture.""" id: str config: Optional[str] = None split: str = "train" columns: Optional[list[str]] = None weight: Optional[float] = None @dataclass class DatasetMixtureConfig: """Configuration for a mixture of datasets.""" datasets: list[DatasetConfig] seed: int = 0 test_split_size: Optional[float] = None @dataclass class ScriptArguments(trl.ScriptArguments): """ Extended version of ScriptArguments with support for dataset mixtures. Args: dataset_mixture (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Configuration for creating dataset mixtures with advanced options. Format: dataset_mixture: datasets: - id: dataset_id1 config: config_name columns: - col1 - col2 weight: 0.5 - id: dataset_id2 config: config_name columns: - col1 - col2 weight: 0.5 seed: 42 test_split_size: 0.1 """ # Override the dataset_name to make it optional dataset_name: Optional[str] = field( default=None, metadata={"help": "Dataset name. Can be omitted if using dataset_mixture."} ) dataset_mixture: Optional[dict[str, Any]] = field( default=None, metadata={"help": "Configuration for creating dataset mixtures with advanced options like shuffling."}, ) def __post_init__(self): if self.dataset_name is None and self.dataset_mixture is None: raise ValueError("Either `dataset_name` or `dataset_mixture` must be provided") if self.dataset_mixture is not None: if not isinstance(self.dataset_mixture, dict) or "datasets" not in self.dataset_mixture: raise ValueError( "dataset_mixture must be a dictionary with a 'datasets' key. " "Expected format: {'datasets': [...], 'seed': int}" ) datasets_list = [] datasets_data = self.dataset_mixture.get("datasets", []) if isinstance(datasets_data, list): for dataset_config in datasets_data: datasets_list.append( DatasetConfig( id=dataset_config.get("id"), config=dataset_config.get("config"), split=dataset_config.get("split", "train"), columns=dataset_config.get("columns"), weight=dataset_config.get("weight", 1.0), ) ) else: raise ValueError("'datasets' must be a list of dataset configurations") self.dataset_mixture = DatasetMixtureConfig( datasets=datasets_list, seed=self.dataset_mixture.get("seed", 0), test_split_size=self.dataset_mixture.get("test_split_size", None), ) # Check that column names are consistent across all dataset configs columns_sets = [set(dataset.columns) for dataset in datasets_list if dataset.columns is not None] if columns_sets: first_columns = columns_sets[0] if not all(columns == first_columns for columns in columns_sets): raise ValueError( "Column names must be consistent across all dataset configurations in a mixture. " f"Found different column sets: {[list(cols) for cols in columns_sets]}" ) # TODO: add the shared options with a mixin to reduce code duplication @dataclass class GRPOConfig(trl.GRPOConfig): """ args for callbacks, benchmarks etc """ benchmarks: list[str] = field( default_factory=lambda: [], metadata={"help": "The benchmarks to run after training."}, ) callbacks: list[str] = field( default_factory=lambda: [], metadata={"help": "The callbacks to run during training."}, ) chat_template: Optional[str] = field(default=None, metadata={"help": "The chat template to use."}) hub_model_revision: Optional[str] = field( default="main", metadata={"help": "The Hub model branch to push the model to."} ) num_completions_to_print: int = field(default=0, metadata={"help": "Number of completions to print."}) overwrite_hub_revision: bool = field(default=False, metadata={"help": "Whether to overwrite the Hub revision."}) push_to_hub_revision: bool = field(default=False, metadata={"help": "Whether to push to a Hub revision/branch."}) system_prompt: Optional[str] = field( default=None, metadata={"help": "The optional system prompt to use."}, ) wandb_log_unique_prompts: bool = field( default=True, metadata={ "help": ("Whether to log the unique prompts to wandb. This will create a new run for each unique prompt.") }, ) wandb_entity: Optional[str] = field( default=None, metadata={"help": ("The entity to store runs under.")}, ) wandb_project: Optional[str] = field( default=None, metadata={"help": ("The project to store runs under.")}, ) wandb_run_group: Optional[str] = field( default=None, metadata={"help": ("The group to store runs under.")}, ) @dataclass class SFTConfig(trl.SFTConfig): """ args for callbacks, benchmarks etc """ benchmarks: list[str] = field( default_factory=lambda: [], metadata={"help": "The benchmarks to run after training."}, ) callbacks: list[str] = field( default_factory=lambda: [], metadata={"help": "The callbacks to run during training."}, ) chat_template: Optional[str] = field(default=None, metadata={"help": "The chat template to use."}) system_prompt: Optional[str] = field( default=None, metadata={"help": "The optional system prompt to use for benchmarking."}, ) hub_model_revision: Optional[str] = field( default="main", metadata={"help": "The Hub model branch to push the model to."}, ) overwrite_hub_revision: bool = field(default=False, metadata={"help": "Whether to overwrite the Hub revision."}) push_to_hub_revision: bool = field(default=False, metadata={"help": "Whether to push to a Hub revision/branch."}) wandb_entity: Optional[str] = field( default=None, metadata={"help": ("The entity to store runs under.")}, ) wandb_project: Optional[str] = field( default=None, metadata={"help": ("The project to store runs under.")}, ) wandb_run_group: Optional[str] = field( default=None, metadata={"help": ("The group to store runs under.")}, ) @dataclass class GRPOScriptArguments(ScriptArguments): """ Script arguments for the GRPO training script. Args: reward_funcs (`list[str]`): List of reward functions. Possible values: 'accuracy', 'format', 'reasoning_steps', 'cosine', 'repetition_penalty', 'length', 'tag_count', 'code', 'ioi_code', 'code_format', 'soft_overlong_punishment'. cosine_min_value_wrong (`float`): Minimum reward for cosine scaling for wrong answers. cosine_max_value_wrong (`float`): Maximum reward for cosine scaling for wrong answers. cosine_min_value_correct (`float`): Minimum reward for cosine scaling for correct answers. cosine_max_value_correct (`float`): Maximum reward for cosine scaling for correct answers. cosine_max_len (`int`): Maximum length for cosine scaling. code_language (`str`): Language for code format reward. max_completion_len (`int`): Maximum number of tokens in completion. soft_punish_cache (`int`): Minimum number of tokens in completion. """ reward_funcs: list[str] = field( default_factory=lambda: ["accuracy", "format", "tag_count"], metadata={ "help": "List of reward functions. Possible values: 'accuracy', 'format', 'reasoning_steps', 'cosine', 'repetition_penalty', 'length', tag_count', 'code', 'code_format'" }, ) cosine_min_value_wrong: float = field( default=0.0, metadata={"help": "Minimum reward for wrong answers"}, ) cosine_max_value_wrong: float = field( default=-0.5, metadata={"help": "Maximum reward for wrong answers"}, ) cosine_min_value_correct: float = field( default=0.5, metadata={"help": "Minimum reward for correct answers"}, ) cosine_max_value_correct: float = field( default=1.0, metadata={"help": "Maximum reward for correct answers"}, ) cosine_max_len: int = field( default=1000, metadata={"help": "Maximum length for scaling"}, ) repetition_n_grams: int = field( default=3, metadata={"help": "Number of n-grams for repetition penalty reward"}, ) repetition_max_penalty: float = field( default=-1.0, metadata={"help": "Maximum (negative) penalty for for repetition penalty reward"}, ) code_language: str = field( default="python", # '(?:python|cpp)' metadata={ "help": "Language for code format reward. Based on E2B supported languages https://e2b.dev/docs/code-interpreting/supported-languages", "choices": ["python", "javascript", "r", "java", "bash", "cpp"], }, ) code_eval_test_batch_size: int = field( default=1, metadata={ "help": "for each generation, evaluate these many test cases in parallel, then check if any of them failed (0 score): if so stop evaluating; otherwise continue with the next batch of test cases. Useful to avoid overloading the eval server + save time on wrong solutions" }, ) code_eval_scoring_mode: Literal["pass_fail", "partial", "weighted_sum"] = field( default="weighted_sum", metadata={"help": "use fraction of passed test cases as reward. If false, use 0/1 scoring."}, ) parallel_code_exec_per_proc: int = field( default=2, metadata={ "help": "Number of parallel E2B code executions per process. Default of 2 is suitable for the Free Hobby tier of E2B with 8 GPUs used for training." }, ) dataset_prompt_column: str = field( default="prompt", metadata={"help": "Column to use as prompts for training."}, ) e2b_router_url: Optional[str] = field( default=None, metadata={"help": "URL for the E2B router. See scripts/e2b_router.py"}, ) morph_router_url: Optional[str] = field( default=None, metadata={"help": "URL for the MorphCloud router. See scripts/morph_router.py"}, ) code_provider: Optional[str] = field( default="e2b", metadata={ "help": "Provider for code execution. Options: 'e2b', 'local', 'morph'.", "choices": ["e2b", "local", "morph"], }, ) ioi_provider: Optional[str] = field( default="piston", metadata={ "help": "Provider for IOI code execution. Options: 'piston', 'morph'.", "choices": ["piston", "morph"], }, ) max_completion_len: int = field( default=16384, metadata={"help": "Maximum number of characters in completion."}, ) soft_punish_cache: int = field( default=4096, metadata={"help": "Minimum number of characters in completion."}, )
open-r1/src/open_r1/configs.py/0
{ "file_path": "open-r1/src/open_r1/configs.py", "repo_id": "open-r1", "token_count": 5294 }
225
import logging import datasets from datasets import DatasetDict, concatenate_datasets from ..configs import ScriptArguments logger = logging.getLogger(__name__) def get_dataset(args: ScriptArguments) -> DatasetDict: """Load a dataset or a mixture of datasets based on the configuration. Args: args (ScriptArguments): Script arguments containing dataset configuration. Returns: DatasetDict: The loaded datasets. """ if args.dataset_name and not args.dataset_mixture: logger.info(f"Loading dataset: {args.dataset_name}") return datasets.load_dataset(args.dataset_name, args.dataset_config) elif args.dataset_mixture: logger.info(f"Creating dataset mixture with {len(args.dataset_mixture.datasets)} datasets") seed = args.dataset_mixture.seed datasets_list = [] for dataset_config in args.dataset_mixture.datasets: logger.info(f"Loading dataset for mixture: {dataset_config.id} (config: {dataset_config.config})") ds = datasets.load_dataset( dataset_config.id, dataset_config.config, split=dataset_config.split, ) if dataset_config.columns is not None: ds = ds.select_columns(dataset_config.columns) if dataset_config.weight is not None: ds = ds.shuffle(seed=seed).select(range(int(len(ds) * dataset_config.weight))) logger.info( f"Subsampled dataset '{dataset_config.id}' (config: {dataset_config.config}) with weight={dataset_config.weight} to {len(ds)} examples" ) datasets_list.append(ds) if datasets_list: combined_dataset = concatenate_datasets(datasets_list) combined_dataset = combined_dataset.shuffle(seed=seed) logger.info(f"Created dataset mixture with {len(combined_dataset)} examples") if args.dataset_mixture.test_split_size is not None: combined_dataset = combined_dataset.train_test_split( test_size=args.dataset_mixture.test_split_size, seed=seed ) logger.info( f"Split dataset into train and test sets with test size: {args.dataset_mixture.test_split_size}" ) return combined_dataset else: return DatasetDict({"train": combined_dataset}) else: raise ValueError("No datasets were loaded from the mixture configuration") else: raise ValueError("Either `dataset_name` or `dataset_mixture` must be provided")
open-r1/src/open_r1/utils/data.py/0
{ "file_path": "open-r1/src/open_r1/utils/data.py", "repo_id": "open-r1", "token_count": 1203 }
226
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Custom models Some fine-tuning techniques, such as prompt tuning, are specific to language models. That means in 🤗 PEFT, it is assumed a 🤗 Transformers model is being used. However, other fine-tuning techniques - like [LoRA](../conceptual_guides/lora) - are not restricted to specific model types. In this guide, we will see how LoRA can be applied to a multilayer perceptron, a computer vision model from the [timm](https://huggingface.co/docs/timm/index) library, or a new 🤗 Transformers architecture. ## Multilayer perceptron Let's assume that we want to fine-tune a multilayer perceptron with LoRA. Here is the definition: ```python from torch import nn class MLP(nn.Module): def __init__(self, num_units_hidden=2000): super().__init__() self.seq = nn.Sequential( nn.Linear(20, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, 2), nn.LogSoftmax(dim=-1), ) def forward(self, X): return self.seq(X) ``` This is a straightforward multilayer perceptron with an input layer, a hidden layer, and an output layer. <Tip> For this toy example, we choose an exceedingly large number of hidden units to highlight the efficiency gains from PEFT, but those gains are in line with more realistic examples. </Tip> There are a few linear layers in this model that could be tuned with LoRA. When working with common 🤗 Transformers models, PEFT will know which layers to apply LoRA to, but in this case, it is up to us as a user to choose the layers. To determine the names of the layers to tune: ```python print([(n, type(m)) for n, m in MLP().named_modules()]) ``` This should print: ``` [('', __main__.MLP), ('seq', torch.nn.modules.container.Sequential), ('seq.0', torch.nn.modules.linear.Linear), ('seq.1', torch.nn.modules.activation.ReLU), ('seq.2', torch.nn.modules.linear.Linear), ('seq.3', torch.nn.modules.activation.ReLU), ('seq.4', torch.nn.modules.linear.Linear), ('seq.5', torch.nn.modules.activation.LogSoftmax)] ``` Let's say we want to apply LoRA to the input layer and to the hidden layer, those are `'seq.0'` and `'seq.2'`. Moreover, let's assume we want to update the output layer without LoRA, that would be `'seq.4'`. The corresponding config would be: ```python from peft import LoraConfig config = LoraConfig( target_modules=["seq.0", "seq.2"], modules_to_save=["seq.4"], ) ``` With that, we can create our PEFT model and check the fraction of parameters trained: ```python from peft import get_peft_model model = MLP() peft_model = get_peft_model(model, config) peft_model.print_trainable_parameters() # prints trainable params: 56,164 || all params: 4,100,164 || trainable%: 1.369798866581922 ``` Finally, we can use any training framework we like, or write our own fit loop, to train the `peft_model`. For a complete example, check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/multilayer_perceptron/multilayer_perceptron_lora.ipynb). ## timm models The [timm](https://huggingface.co/docs/timm/index) library contains a large number of pretrained computer vision models. Those can also be fine-tuned with PEFT. Let's check out how this works in practice. To start, ensure that timm is installed in the Python environment: ```bash python -m pip install -U timm ``` Next we load a timm model for an image classification task: ```python import timm num_classes = ... model_id = "timm/poolformer_m36.sail_in1k" model = timm.create_model(model_id, pretrained=True, num_classes=num_classes) ``` Again, we need to make a decision about what layers to apply LoRA to. Since LoRA supports 2D conv layers, and since those are a major building block of this model, we should apply LoRA to the 2D conv layers. To identify the names of those layers, let's look at all the layer names: ```python print([(n, type(m)) for n, m in model.named_modules()]) ``` This will print a very long list, we'll only show the first few: ``` [('', timm.models.metaformer.MetaFormer), ('stem', timm.models.metaformer.Stem), ('stem.conv', torch.nn.modules.conv.Conv2d), ('stem.norm', torch.nn.modules.linear.Identity), ('stages', torch.nn.modules.container.Sequential), ('stages.0', timm.models.metaformer.MetaFormerStage), ('stages.0.downsample', torch.nn.modules.linear.Identity), ('stages.0.blocks', torch.nn.modules.container.Sequential), ('stages.0.blocks.0', timm.models.metaformer.MetaFormerBlock), ('stages.0.blocks.0.norm1', timm.layers.norm.GroupNorm1), ('stages.0.blocks.0.token_mixer', timm.models.metaformer.Pooling), ('stages.0.blocks.0.token_mixer.pool', torch.nn.modules.pooling.AvgPool2d), ('stages.0.blocks.0.drop_path1', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.layer_scale1', timm.models.metaformer.Scale), ('stages.0.blocks.0.res_scale1', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.norm2', timm.layers.norm.GroupNorm1), ('stages.0.blocks.0.mlp', timm.layers.mlp.Mlp), ('stages.0.blocks.0.mlp.fc1', torch.nn.modules.conv.Conv2d), ('stages.0.blocks.0.mlp.act', torch.nn.modules.activation.GELU), ('stages.0.blocks.0.mlp.drop1', torch.nn.modules.dropout.Dropout), ('stages.0.blocks.0.mlp.norm', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.mlp.fc2', torch.nn.modules.conv.Conv2d), ('stages.0.blocks.0.mlp.drop2', torch.nn.modules.dropout.Dropout), ('stages.0.blocks.0.drop_path2', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.layer_scale2', timm.models.metaformer.Scale), ('stages.0.blocks.0.res_scale2', torch.nn.modules.linear.Identity), ('stages.0.blocks.1', timm.models.metaformer.MetaFormerBlock), ('stages.0.blocks.1.norm1', timm.layers.norm.GroupNorm1), ('stages.0.blocks.1.token_mixer', timm.models.metaformer.Pooling), ('stages.0.blocks.1.token_mixer.pool', torch.nn.modules.pooling.AvgPool2d), ... ('head.global_pool.flatten', torch.nn.modules.linear.Identity), ('head.norm', timm.layers.norm.LayerNorm2d), ('head.flatten', torch.nn.modules.flatten.Flatten), ('head.drop', torch.nn.modules.linear.Identity), ('head.fc', torch.nn.modules.linear.Linear)] ] ``` Upon closer inspection, we see that the 2D conv layers have names such as `"stages.0.blocks.0.mlp.fc1"` and `"stages.0.blocks.0.mlp.fc2"`. How can we match those layer names specifically? You can write a [regular expressions](https://docs.python.org/3/library/re.html) to match the layer names. For our case, the regex `r".*\.mlp\.fc\d"` should do the job. Furthermore, as in the first example, we should ensure that the output layer, in this case the classification head, is also updated. Looking at the end of the list printed above, we can see that it's named `'head.fc'`. With that in mind, here is our LoRA config: ```python config = LoraConfig(target_modules=r".*\.mlp\.fc\d", modules_to_save=["head.fc"]) ``` Then we only need to create the PEFT model by passing our base model and the config to `get_peft_model`: ```python peft_model = get_peft_model(model, config) peft_model.print_trainable_parameters() # prints trainable params: 1,064,454 || all params: 56,467,974 || trainable%: 1.88505789139876 ``` This shows us that we only need to train less than 2% of all parameters, which is a huge efficiency gain. For a complete example, check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/image_classification/image_classification_timm_peft_lora.ipynb). ## New transformers architectures When new popular transformers architectures are released, we do our best to quickly add them to PEFT. If you come across a transformers model that is not supported out of the box, don't worry, it will most likely still work if the config is set correctly. Specifically, you have to identify the layers that should be adapted and set them correctly when initializing the corresponding config class, e.g. `LoraConfig`. Here are some tips to help with this. As a first step, it is a good idea to check the existing models for inspiration. You can find them inside of [constants.py](https://github.com/huggingface/peft/blob/main/src/peft/utils/constants.py) in the PEFT repository. Often, you'll find a similar architecture that uses the same names. For example, if the new model architecture is a variation of the "mistral" model and you want to apply LoRA, you can see that the entry for "mistral" in `TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING` contains `["q_proj", "v_proj"]`. This tells you that for "mistral" models, the `target_modules` for LoRA should be `["q_proj", "v_proj"]`: ```python from peft import LoraConfig, get_peft_model my_mistral_model = ... config = LoraConfig( target_modules=["q_proj", "v_proj"], ..., # other LoRA arguments ) peft_model = get_peft_model(my_mistral_model, config) ``` If that doesn't help, check the existing modules in your model architecture with the `named_modules` method and try to identify the attention layers, especially the key, query, and value layers. Those will often have names such as `c_attn`, `query`, `q_proj`, etc. The key layer is not always adapted, and ideally, you should check whether including it results in better performance. Additionally, linear layers are common targets to be adapted (e.g. in [QLoRA paper](https://huggingface.co/papers/2305.14314), authors suggest to adapt them as well). Their names will often contain the strings `fc` or `dense`. If you want to add a new model to PEFT, please create an entry in [constants.py](https://github.com/huggingface/peft/blob/main/src/peft/utils/constants.py) and open a pull request on the [repository](https://github.com/huggingface/peft/pulls). Don't forget to update the [README](https://github.com/huggingface/peft#models-support-matrix) as well. ## Verify parameters and layers You can verify whether you've correctly applied a PEFT method to your model in a few ways. * Check the fraction of parameters that are trainable with the [`~PeftModel.print_trainable_parameters`] method. If this number is lower or higher than expected, check the model `repr` by printing the model. This shows the names of all the layer types in the model. Ensure that only the intended target layers are replaced by the adapter layers. For example, if LoRA is applied to `nn.Linear` layers, then you should only see `lora.Linear` layers being used. ```py peft_model.print_trainable_parameters() ``` * Another way you can view the adapted layers is to use the `targeted_module_names` attribute to list the name of each module that was adapted. ```python print(peft_model.targeted_module_names) ``` ## Unsupported module types Methods like LoRA only work if the target modules are supported by PEFT. For example, it's possible to apply LoRA to `nn.Linear` and `nn.Conv2d` layers, but not, for instance, to `nn.LSTM`. If you find a layer class you want to apply PEFT to is not supported, you can: - define a custom mapping to dynamically dispatch custom modules in LoRA - open an [issue](https://github.com/huggingface/peft/issues) and request the feature where maintainers will implement it or guide you on how to implement it yourself if demand for this module type is sufficiently high ### Experimental support for dynamic dispatch of custom modules in LoRA > [!WARNING] > This feature is experimental and subject to change, depending on its reception by the community. We will introduce a public and stable API if there is significant demand for it. PEFT supports an experimental API for custom module types for LoRA. Let's assume you have a LoRA implementation for LSTMs. Normally, you would not be able to tell PEFT to use it, even if it would theoretically work with PEFT. However, this is possible with dynamic dispatch of custom layers. The experimental API currently looks like this: ```python class MyLoraLSTMLayer: ... base_model = ... # load the base model that uses LSTMs # add the LSTM layer names to target_modules config = LoraConfig(..., target_modules=["lstm"]) # define a mapping from base layer type to LoRA layer type custom_module_mapping = {nn.LSTM: MyLoraLSTMLayer} # register the new mapping config._register_custom_module(custom_module_mapping) # after registration, create the PEFT model peft_model = get_peft_model(base_model, config) # do training ``` <Tip> When you call [`get_peft_model`], you will see a warning because PEFT does not recognize the targeted module type. In this case, you can ignore this warning. </Tip> By supplying a custom mapping, PEFT first checks the base model's layers against the custom mapping and dispatches to the custom LoRA layer type if there is a match. If there is no match, PEFT checks the built-in LoRA layer types for a match. Therefore, this feature can also be used to override existing dispatch logic, e.g. if you want to use your own LoRA layer for `nn.Linear` instead of using the one provided by PEFT. When creating your custom LoRA module, please follow the same rules as the [existing LoRA modules](https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/layer.py). Some important constraints to consider: - The custom module should inherit from `nn.Module` and `peft.tuners.lora.layer.LoraLayer`. - The `__init__` method of the custom module should have the positional arguments `base_layer` and `adapter_name`. After this, there are additional `**kwargs` that you are free to use or ignore. - The learnable parameters should be stored in an `nn.ModuleDict` or `nn.ParameterDict`, where the key corresponds to the name of the specific adapter (remember that a model can have more than one adapter at a time). - The name of these learnable parameter attributes should start with `"lora_"`, e.g. `self.lora_new_param = ...`. - Some methods are optional, e.g. you only need to implement `merge` and `unmerge` if you want to support weight merging. Currently, the information about the custom module does not persist when you save the model. When loading the model, you have to register the custom modules again. ```python # saving works as always and includes the parameters of the custom modules peft_model.save_pretrained(<model-path>) # loading the model later: base_model = ... # load the LoRA config that you saved earlier config = LoraConfig.from_pretrained(<model-path>) # register the custom module again, the same way as the first time custom_module_mapping = {nn.LSTM: MyLoraLSTMLayer} config._register_custom_module(custom_module_mapping) # pass the config instance to from_pretrained: peft_model = PeftModel.from_pretrained(model, tmp_path / "lora-custom-module", config=config) ``` If you use this feature and find it useful, or if you encounter problems, let us know by creating an issue or a discussion on GitHub. This allows us to estimate the demand for this feature and add a public API if it is sufficiently high.
peft/docs/source/developer_guides/custom_models.md/0
{ "file_path": "peft/docs/source/developer_guides/custom_models.md", "repo_id": "peft", "token_count": 4859 }
227
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Configuration [`PeftConfigMixin`] is the base configuration class for storing the adapter configuration of a [`PeftModel`], and [`PromptLearningConfig`] is the base configuration class for soft prompt methods (p-tuning, prefix tuning, and prompt tuning). These base classes contain methods for saving and loading model configurations from the Hub, specifying the PEFT method to use, type of task to perform, and model configurations like number of layers and number of attention heads. ## PeftConfigMixin [[autodoc]] config.PeftConfigMixin - all ## PeftConfig [[autodoc]] PeftConfig - all ## PromptLearningConfig [[autodoc]] PromptLearningConfig - all
peft/docs/source/package_reference/config.md/0
{ "file_path": "peft/docs/source/package_reference/config.md", "repo_id": "peft", "token_count": 224 }
228
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # P-tuning [P-tuning](https://hf.co/papers/2103.10385) adds trainable prompt embeddings to the input that is optimized by a prompt encoder to find a better prompt, eliminating the need to manually design prompts. The prompt tokens can be added anywhere in the input sequence, and p-tuning also introduces anchor tokens for improving performance. The abstract from the paper is: *While GPTs with traditional fine-tuning fail to achieve strong results on natural language understanding (NLU), we show that GPTs can be better than or comparable to similar-sized BERTs on NLU tasks with a novel method P-tuning -- which employs trainable continuous prompt embeddings. On the knowledge probing (LAMA) benchmark, the best GPT recovers 64\% (P@1) of world knowledge without any additional text provided during test time, which substantially improves the previous best by 20+ percentage points. On the SuperGlue benchmark, GPTs achieve comparable and sometimes better performance to similar-sized BERTs in supervised learning. Importantly, we find that P-tuning also improves BERTs' performance in both few-shot and supervised settings while largely reducing the need for prompt engineering. Consequently, P-tuning outperforms the state-of-the-art approaches on the few-shot SuperGlue benchmark.*. ## PromptEncoderConfig [[autodoc]] tuners.p_tuning.config.PromptEncoderConfig ## PromptEncoder [[autodoc]] tuners.p_tuning.model.PromptEncoder
peft/docs/source/package_reference/p_tuning.md/0
{ "file_path": "peft/docs/source/package_reference/p_tuning.md", "repo_id": "peft", "token_count": 540 }
229
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LoRA methods A popular way to efficiently train large models is to insert (typically in the attention blocks) smaller trainable matrices that are a low-rank decomposition of the delta weight matrix to be learnt during finetuning. The pretrained model's original weight matrix is frozen and only the smaller matrices are updated during training. This reduces the number of trainable parameters, reducing memory usage and training time which can be very expensive for large models. There are several different ways to express the weight matrix as a low-rank decomposition, but [Low-Rank Adaptation (LoRA)](../conceptual_guides/adapter#low-rank-adaptation-lora) is the most common method. The PEFT library supports several other LoRA variants, such as [Low-Rank Hadamard Product (LoHa)](../conceptual_guides/adapter#low-rank-hadamard-product-loha), [Low-Rank Kronecker Product (LoKr)](../conceptual_guides/adapter#low-rank-kronecker-product-lokr), and [Adaptive Low-Rank Adaptation (AdaLoRA)](../conceptual_guides/adapter#adaptive-low-rank-adaptation-adalora). You can learn more about how these methods work conceptually in the [Adapters](../conceptual_guides/adapter) guide. If you're interested in applying these methods to other tasks and use cases like semantic segmentation, token classification, take a look at our [notebook collection](https://huggingface.co/collections/PEFT/notebooks-6573b28b33e5a4bf5b157fc1)! Additionally, PEFT supports the [X-LoRA](../conceptual_guides/adapter#mixture-of-lora-experts-x-lora) Mixture of LoRA Experts method. This guide will show you how to quickly train an image classification model - with a low-rank decomposition method - to identify the class of food shown in an image. <Tip> Some familiarity with the general process of training an image classification model would be really helpful and allow you to focus on the low-rank decomposition methods. If you're new, we recommend taking a look at the [Image classification](https://huggingface.co/docs/transformers/tasks/image_classification) guide first from the Transformers documentation. When you're ready, come back and see how easy it is to drop PEFT in to your training! </Tip> Before you begin, make sure you have all the necessary libraries installed. ```bash pip install -q peft transformers datasets ``` ## Dataset In this guide, you'll use the [Food-101](https://huggingface.co/datasets/food101) dataset which contains images of 101 food classes (take a look at the [dataset viewer](https://huggingface.co/datasets/food101/viewer/default/train) to get a better idea of what the dataset looks like). Load the dataset with the [`~datasets.load_dataset`] function. ```py from datasets import load_dataset ds = load_dataset("food101") ``` Each food class is labeled with an integer, so to make it easier to understand what these integers represent, you'll create a `label2id` and `id2label` dictionary to map the integer to its class label. ```py labels = ds["train"].features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = i id2label[i] = label id2label[2] "baklava" ``` Load an image processor to properly resize and normalize the pixel values of the training and evaluation images. ```py from transformers import AutoImageProcessor image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") ``` You can also use the image processor to prepare some transformation functions for data augmentation and pixel scaling. ```py from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) train_transforms = Compose( [ RandomResizedCrop(image_processor.size["height"]), RandomHorizontalFlip(), ToTensor(), normalize, ] ) val_transforms = Compose( [ Resize(image_processor.size["height"]), CenterCrop(image_processor.size["height"]), ToTensor(), normalize, ] ) def preprocess_train(example_batch): example_batch["pixel_values"] = [train_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch def preprocess_val(example_batch): example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch ``` Define the training and validation datasets, and use the [`~datasets.Dataset.set_transform`] function to apply the transformations on-the-fly. ```py train_ds = ds["train"] val_ds = ds["validation"] train_ds.set_transform(preprocess_train) val_ds.set_transform(preprocess_val) ``` Finally, you'll need a data collator to create a batch of training and evaluation data and convert the labels to `torch.tensor` objects. ```py import torch def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example["label"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels} ``` ## Model Now let's load a pretrained model to use as the base model. This guide uses the [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) model, but you can use any image classification model you want. Pass the `label2id` and `id2label` dictionaries to the model so it knows how to map the integer labels to their class labels, and you can optionally pass the `ignore_mismatched_sizes=True` parameter if you're finetuning a checkpoint that has already been finetuned. ```py from transformers import AutoModelForImageClassification, TrainingArguments, Trainer model = AutoModelForImageClassification.from_pretrained( "google/vit-base-patch16-224-in21k", label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, ) ``` ### PEFT configuration and model Every PEFT method requires a configuration that holds all the parameters specifying how the PEFT method should be applied. Once the configuration is setup, pass it to the [`~peft.get_peft_model`] function along with the base model to create a trainable [`PeftModel`]. <Tip> Call the [`~PeftModel.print_trainable_parameters`] method to compare the number of parameters of [`PeftModel`] versus the number of parameters in the base model! </Tip> <hfoptions id="loras"> <hfoption id="LoRA"> [LoRA](../conceptual_guides/adapter#low-rank-adaptation-lora) decomposes the weight update matrix into *two* smaller matrices. The size of these low-rank matrices is determined by its *rank* or `r`. A higher rank means the model has more parameters to train, but it also means the model has more learning capacity. You'll also want to specify the `target_modules` which determine where the smaller matrices are inserted. For this guide, you'll target the *query* and *value* matrices of the attention blocks. Other important parameters to set are `lora_alpha` (scaling factor), `bias` (whether `none`, `all` or only the LoRA bias parameters should be trained), and `modules_to_save` (the modules apart from the LoRA layers to be trained and saved). All of these parameters - and more - are found in the [`LoraConfig`]. ```py from peft import LoraConfig, get_peft_model config = LoraConfig( r=16, lora_alpha=16, target_modules=["query", "value"], lora_dropout=0.1, bias="none", modules_to_save=["classifier"], ) model = get_peft_model(model, config) model.print_trainable_parameters() "trainable params: 667,493 || all params: 86,543,818 || trainable%: 0.7712775047664294" ``` </hfoption> <hfoption id="LoHa"> [LoHa](../conceptual_guides/adapter#low-rank-hadamard-product-loha) decomposes the weight update matrix into *four* smaller matrices and each pair of smaller matrices is combined with the Hadamard product. This allows the weight update matrix to keep the same number of trainable parameters when compared to LoRA, but with a higher rank (`r^2` for LoHA when compared to `2*r` for LoRA). The size of the smaller matrices is determined by its *rank* or `r`. You'll also want to specify the `target_modules` which determines where the smaller matrices are inserted. For this guide, you'll target the *query* and *value* matrices of the attention blocks. Other important parameters to set are `alpha` (scaling factor), and `modules_to_save` (the modules apart from the LoHa layers to be trained and saved). All of these parameters - and more - are found in the [`LoHaConfig`]. ```py from peft import LoHaConfig, get_peft_model config = LoHaConfig( r=16, alpha=16, target_modules=["query", "value"], module_dropout=0.1, modules_to_save=["classifier"], ) model = get_peft_model(model, config) model.print_trainable_parameters() "trainable params: 1,257,317 || all params: 87,133,642 || trainable%: 1.4429753779831676" ``` </hfoption> <hfoption id="LoKr"> [LoKr](../conceptual_guides/adapter#low-rank-kronecker-product-lokr) expresses the weight update matrix as a decomposition of a Kronecker product, creating a block matrix that is able to preserve the rank of the original weight matrix. The size of the smaller matrices are determined by its *rank* or `r`. You'll also want to specify the `target_modules` which determines where the smaller matrices are inserted. For this guide, you'll target the *query* and *value* matrices of the attention blocks. Other important parameters to set are `alpha` (scaling factor), and `modules_to_save` (the modules apart from the LoKr layers to be trained and saved). All of these parameters - and more - are found in the [`LoKrConfig`]. ```py from peft import LoKrConfig, get_peft_model config = LoKrConfig( r=16, alpha=16, target_modules=["query", "value"], module_dropout=0.1, modules_to_save=["classifier"], ) model = get_peft_model(model, config) model.print_trainable_parameters() "trainable params: 116,069 || all params: 87,172,042 || trainable%: 0.13314934162033282" ``` </hfoption> <hfoption id="AdaLoRA"> [AdaLoRA](../conceptual_guides/adapter#adaptive-low-rank-adaptation-adalora) efficiently manages the LoRA parameter budget by assigning important weight matrices more parameters and pruning less important ones. In contrast, LoRA evenly distributes parameters across all modules. You can control the average desired *rank* or `r` of the matrices, and which modules to apply AdaLoRA to with `target_modules`. Other important parameters to set are `lora_alpha` (scaling factor), and `modules_to_save` (the modules apart from the AdaLoRA layers to be trained and saved). All of these parameters - and more - are found in the [`AdaLoraConfig`]. ```py from peft import AdaLoraConfig, get_peft_model config = AdaLoraConfig( r=8, init_r=12, tinit=200, tfinal=1000, deltaT=10, target_modules=["query", "value"], modules_to_save=["classifier"], ) model = get_peft_model(model, config) model.print_trainable_parameters() "trainable params: 520,325 || all params: 87,614,722 || trainable%: 0.5938785036606062" ``` </hfoption> </hfoptions> ### Training For training, let's use the [`~transformers.Trainer`] class from Transformers. The [`Trainer`] contains a PyTorch training loop, and when you're ready, call [`~transformers.Trainer.train`] to start training. To customize the training run, configure the training hyperparameters in the [`~transformers.TrainingArguments`] class. With LoRA-like methods, you can afford to use a higher batch size and learning rate. > [!WARNING] > AdaLoRA has an [`~AdaLoraModel.update_and_allocate`] method that should be called at each training step to update the parameter budget and mask, otherwise the adaptation step is not performed. This requires writing a custom training loop or subclassing the [`~transformers.Trainer`] to incorporate this method. As an example, take a look at this [custom training loop](https://github.com/huggingface/peft/blob/912ad41e96e03652cabf47522cd876076f7a0c4f/examples/conditional_generation/peft_adalora_seq2seq.py#L120). ```py from transformers import TrainingArguments, Trainer account = "stevhliu" peft_model_id = f"{account}/google/vit-base-patch16-224-in21k-lora" batch_size = 128 args = TrainingArguments( peft_model_id, remove_unused_columns=False, eval_strategy="epoch", save_strategy="epoch", learning_rate=5e-3, per_device_train_batch_size=batch_size, gradient_accumulation_steps=4, per_device_eval_batch_size=batch_size, fp16=True, num_train_epochs=5, logging_steps=10, load_best_model_at_end=True, label_names=["labels"], ) ``` Begin training with [`~transformers.Trainer.train`]. ```py trainer = Trainer( model, args, train_dataset=train_ds, eval_dataset=val_ds, processing_class=image_processor, data_collator=collate_fn, ) trainer.train() ``` ## Share your model Once training is complete, you can upload your model to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method. You’ll need to login to your Hugging Face account first and enter your token when prompted. ```py from huggingface_hub import notebook_login notebook_login() ``` Call [`~transformers.PreTrainedModel.push_to_hub`] to save your model to your repositoy. ```py model.push_to_hub(peft_model_id) ``` ## Inference Let's load the model from the Hub and test it out on a food image. ```py from peft import PeftConfig, PeftModel from transformers import AutoImageProcessor from PIL import Image import requests config = PeftConfig.from_pretrained("stevhliu/vit-base-patch16-224-in21k-lora") model = AutoModelForImageClassification.from_pretrained( config.base_model_name_or_path, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, ) model = PeftModel.from_pretrained(model, "stevhliu/vit-base-patch16-224-in21k-lora") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg" image = Image.open(requests.get(url, stream=True).raw) image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg"> </div> Convert the image to RGB and return the underlying PyTorch tensors. ```py encoding = image_processor(image.convert("RGB"), return_tensors="pt") ``` Now run the model and return the predicted class! ```py with torch.no_grad(): outputs = model(**encoding) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) "Predicted class: beignets" ```
peft/docs/source/task_guides/lora_based_methods.md/0
{ "file_path": "peft/docs/source/task_guides/lora_based_methods.md", "repo_id": "peft", "token_count": 4896 }
230
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Union import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.modeling_utils import ModelMixin from diffusers.models.unets.unet_2d_blocks import ( CrossAttnDownBlock2D, DownBlock2D, ) from diffusers.utils import BaseOutput, logging from torch import nn from torch.nn import functional as F logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class ControlNetOutput(BaseOutput): down_block_res_samples: tuple[torch.Tensor] mid_block_res_sample: torch.Tensor class ControlNetConditioningEmbedding(nn.Module): """ Quoting from https://huggingface.co/papers/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full model) to encode image-space conditions ... into feature maps ..." """ def __init__( self, conditioning_embedding_channels: int, conditioning_channels: int = 3, block_out_channels: tuple[int] = (16, 32, 96, 256), ): super().__init__() self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) self.blocks = nn.ModuleList([]) for i in range(len(block_out_channels) - 1): channel_in = block_out_channels[i] channel_out = block_out_channels[i + 1] self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) self.conv_out = zero_module( nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) ) def forward(self, conditioning): embedding = self.conv_in(conditioning) embedding = F.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = F.silu(embedding) embedding = self.conv_out(embedding) return embedding class ControlNetModel(ModelMixin, ConfigMixin): _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 4, out_channels: int = 320, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[tuple[int]] = (16, 32, 96, 256), ): super().__init__() # for control image self.controlnet_cond_embedding = ControlNetConditioningEmbedding( conditioning_embedding_channels=out_channels, block_out_channels=conditioning_embedding_out_channels, ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: dict[str, AttentionProcessor]): if hasattr(module, "set_processor"): processors[f"{name}.processor"] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, dict[str, AttentionProcessor]]): r""" Parameters: `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor of **all** `Attention` layers. In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.: """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module will split the input tensor in slices, to compute attention in several steps. This is useful to save some memory in exchange for a small speed decrease. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: list[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)): module.gradient_checkpointing = value def forward( self, controlnet_cond: torch.FloatTensor, ) -> Union[ControlNetOutput, tuple]: # check channel order channel_order = self.config.controlnet_conditioning_channel_order if channel_order == "rgb": # in rgb order by default ... elif channel_order == "bgr": controlnet_cond = torch.flip(controlnet_cond, dims=[1]) else: raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") # 2. pre-process controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) return controlnet_cond def zero_module(module): for p in module.parameters(): nn.init.zeros_(p) return module
peft/examples/boft_controlnet/utils/light_controlnet.py/0
{ "file_path": "peft/examples/boft_controlnet/utils/light_controlnet.py", "repo_id": "peft", "token_count": 4308 }
231
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from dataclasses import dataclass, field from typing import Literal, Optional import torch from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser from trl import SFTConfig, SFTTrainer from peft import BoneConfig, get_peft_model @dataclass class ScriptArguments(SFTConfig): # model configs base_model_name_or_path: Optional[str] = field( default=None, metadata={"help": "The name or path of the fp32/16 base model."} ) bits: str = field(default="bf16", metadata={"help": "(`['bf16', 'fp16', fp32]`)"}) init_weights: Literal[True, "bat"] = field( default=True, metadata={ "help": ("True -> Bone; `bat` -> Bat"), }, ) bone_r: int = field(default=16) merge_and_save: bool = field(default=False) # dataset configs data_path: str = field(default="imdb", metadata={"help": "Path to the training data."}) dataset_split: str = field(default="train[:1%]", metadata={"help": "(`['train', 'test', 'eval']`):"}) dataset_field: list[str] = field(default=None, metadata={"help": "Fields of dataset input and output."}) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] print(script_args) print(f"Load pre-processed residual model in {script_args.bits} bits.") if script_args.bits in ["nf4", "fp4", "int8"]: print("Bone currently does not support quantization.") elif script_args.base_model_name_or_path is not None: print(f"No available pre-processed model, manually initialize a Bone using {script_args.base_model_name_or_path}.") model = AutoModelForCausalLM.from_pretrained( script_args.base_model_name_or_path, torch_dtype=( torch.float16 if script_args.bits == "fp16" else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32) ), device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name_or_path) tokenizer.pad_token_id = tokenizer.eos_token_id bone_config = BoneConfig( r=script_args.bone_r, target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"], bias="none", task_type="CAUSAL_LM", init_weights=script_args.init_weights, ) peft_model = get_peft_model(model, bone_config) print(peft_model) peft_model.print_trainable_parameters() print(f"Training Bone with trl on the {script_args.data_path}[{script_args.dataset_split}] dataset.") dataset = load_dataset(script_args.data_path, split=script_args.dataset_split) dataset = dataset.map( lambda example: { "text": f"### USER: {example[script_args.dataset_field[0]]}\n### ASSISTANT: {example[script_args.dataset_field[1]]}" } ) trainer = SFTTrainer( model=peft_model, args=script_args, train_dataset=dataset, processing_class=tokenizer, ) trainer.train() trainer.save_state() peft_model.save_pretrained( os.path.join(script_args.output_dir, "bone_ft"), ) if script_args.merge_and_save: model = peft_model.merge_and_unload() model.save_pretrained(os.path.join(script_args.output_dir, "bone_merged")) tokenizer.save_pretrained(os.path.join(script_args.output_dir, "bone_merged"))
peft/examples/bone_finetuning/bone_finetuning.py/0
{ "file_path": "peft/examples/bone_finetuning/bone_finetuning.py", "repo_id": "peft", "token_count": 1491 }
232
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch import torch.distributed as dist from datasets import load_dataset from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments from utils import DataCollator, TokenizerMetaMath from peft import EvaConfig, LoraConfig, get_eva_state_dict, get_peft_model, initialize_lora_eva_weights # run this script e.g. with: torchrun --nproc_per_node=4 eva_finetuning_multi_gpu.py # config model_name = "meta-llama/Llama-2-7b-hf" max_seq_len = 512 rank = 16 alpha = 1 rho = 2.0 target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"] svd_batch_size = 4 # can be different from the batch size used in finetuning batch_size = 4 learning_rate = 5e-4 gradient_accumulation_steps = 8 num_epochs = 1 output_dir = "outputs" bf16 = True # Initialize distributed environment if torch.cuda.is_available(): local_rank = int(os.environ.get("LOCAL_RANK", -1)) torch.cuda.set_device(local_rank) dist.init_process_group("nccl") world_size = dist.get_world_size() elif torch.xpu.is_available(): local_rank = int(os.environ.get("LOCAL_RANK", -1)) torch.xpu.set_device(local_rank) dist.init_process_group("xccl") world_size = dist.get_world_size() else: local_rank = -1 world_size = 1 # load model and tokenizer model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) # load dataset dataset = load_dataset("meta-math/MetaMathQA") dataset = dataset.map( TokenizerMetaMath(model_name), batched=True, remove_columns=dataset["train"].column_names, ) dataset.set_format(type="torch") # data collator data_collator = DataCollator(tokenizer.eos_token_id, max_length=max_seq_len) # Create sampler for distributed training sampler = DistributedSampler(dataset["train"], num_replicas=world_size, rank=local_rank) # dataloader dataloader = DataLoader( dataset["train"], batch_size=svd_batch_size, collate_fn=data_collator, sampler=sampler, shuffle=False, ) sampler.set_epoch(0) # Wrap model in DDP model = model.to(local_rank) model = DDP(model, device_ids=[local_rank], output_device=local_rank) # setup peft config eva_config = EvaConfig(rho=rho) peft_config = LoraConfig( r=rank, lora_alpha=alpha, target_modules=target_modules, init_lora_weights="eva", eva_config=eva_config ) # EVA initialization eva_state_dict = get_eva_state_dict(model, dataloader, peft_config) eva_state_dict = {".".join(["base_model.model"] + k.split(".")[1:]): v for k, v in eva_state_dict.items()} # cleanup ddp model = model.module # initialize peft model peft_model = get_peft_model(model, peft_config, low_cpu_mem_usage=True) initialize_lora_eva_weights(peft_model, eva_state_dict=eva_state_dict) # setup training arguments training_args = TrainingArguments( per_device_train_batch_size=batch_size, learning_rate=learning_rate, gradient_accumulation_steps=gradient_accumulation_steps, num_train_epochs=num_epochs, output_dir=output_dir, remove_unused_columns=False, bf16=bf16, ) # continue with standard finetuning trainer = Trainer( model=peft_model, args=training_args, train_dataset=dataset["train"], data_collator=data_collator, ) trainer.train()
peft/examples/eva_finetuning/eva_finetuning_multi_accelerator.py/0
{ "file_path": "peft/examples/eva_finetuning/eva_finetuning_multi_accelerator.py", "repo_id": "peft", "token_count": 1443 }
233
# adapted from [peft's boft_dreambooth](https://github.com/huggingface/peft/tree/main/examples/boft_dreambooth) import gc import threading import psutil import torch # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): self.device_type = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" self.device_module = getattr(torch, self.device_type, torch.cuda) gc.collect() self.device_module.empty_cache() self.device_module.reset_peak_memory_stats() # reset the peak gauge to zero self.begin = self.device_module.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() self.device_module.empty_cache() self.end = self.device_module.memory_allocated() self.peak = self.device_module.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
peft/examples/hra_dreambooth/utils/tracemalloc.py/0
{ "file_path": "peft/examples/hra_dreambooth/utils/tracemalloc.py", "repo_id": "peft", "token_count": 910 }
234
<jupyter_start><jupyter_code>!git clone https://huggingface.co/spaces/smangrul/peft-lora-sd-dreambooth %cd "peft-lora-sd-dreambooth" !pip install -r requirements.txt !python colab.py<jupyter_output><empty_output>
peft/examples/lora_dreambooth/colab_notebook.ipynb/0
{ "file_path": "peft/examples/lora_dreambooth/colab_notebook.ipynb", "repo_id": "peft", "token_count": 91 }
235
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfApi from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import get_peft_model from peft.tuners.oft.config import OFTConfig # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) UNET_TARGET_MODULES = ["to_q", "to_v", "query", "value"] # , "ff.net.0.proj"] TEXT_ENCODER_TARGET_MODULES = ["q_proj", "v_proj"] def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") # oft args parser.add_argument("--use_oft", action="store_true", help="Whether to use OFT for parameter efficient tuning") parser.add_argument("--oft_r", type=int, default=0, help="OFT rank, only used if use_oft is True") parser.add_argument("--oft_block_size", type=int, default=32, help="OFT block size, only used if use_oft is True") parser.add_argument("--oft_dropout", type=float, default=0.0, help="OFT dropout, only used if use_oft is True") parser.add_argument( "--oft_use_coft", action="store_true", help="Using constrained OFT, only used if use_oft is True" ) parser.add_argument( "--oft_eps", type=float, default=0.0, help="The control strength of COFT. Only has an effect if `oft_use_coft` is set to True.", ) parser.add_argument( "--oft_text_encoder_r", type=int, default=0, help="OFT rank for text encoder, only used if `use_oft` and `train_text_encoder` are True", ) parser.add_argument( "--oft_text_encoder_block_size", type=int, default=32, help="OFT block size for text encoder, only used if `use_oft` and `train_text_encoder` are True", ) parser.add_argument( "--oft_text_encoder_dropout", type=float, default=0.0, help="OFT dropout for text encoder, only used if `use_oft` and `train_text_encoder` are True", ) parser.add_argument( "--oft_text_encoder_use_coft", action="store_true", help="Using constrained OFT on the text encoder, only used if use_oft is True", ) parser.add_argument( "--oft_text_encoder_eps", type=float, default=0.0, help="The control strength of COFT on the text encoder. Only has an effect if `oft_text_encoder_use_coft` is set to True.", ) parser.add_argument( "--num_dataloader_workers", type=int, default=1, help="Num of workers for the training dataloader." ) parser.add_argument( "--no_tracemalloc", default=False, action="store_true", help="Flag to stop memory allocation tracing during training. This could speed up training on Windows.", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of accelerators, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--wandb_key", type=str, default=None, help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), ) parser.add_argument( "--wandb_project_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU or Intel XPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU or Intel XPU. Default to fp16 if a GPU/XPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): self.device_type = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" self.device_module = getattr(torch, self.device_type, torch.cuda) gc.collect() self.device_module.empty_cache() self.device_module.reset_peak_memory_stats() # reset the peak gauge to zero self.begin = self.device_module.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() self.device_module.empty_cache() self.end = self.device_module.memory_allocated() self.peak = self.device_module.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt_ids"] = self.tokenizer( self.class_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids return example def collate_fn(examples, with_prior_preservation=False): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple accelerators." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_dir=logging_dir, ) if args.report_to == "wandb": import wandb wandb.login(key=args.wandb_key) wandb.init(project=args.wandb_project_name) # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type in ["cuda", "xpu"] else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() elif torch.xpu.is_available(): torch.xpu.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: api = HfApi(token=args.hub_token) # Create repo (repo_name from args or inferred) repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name repo_id = api.create_repo(repo_name, exist_ok=True).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, ) # DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) if args.use_oft: config = OFTConfig( r=args.oft_r, oft_block_size=args.oft_block_size, target_modules=UNET_TARGET_MODULES, module_dropout=args.oft_dropout, init_weights=True, coft=args.oft_use_coft, eps=args.oft_eps, ) unet = get_peft_model(unet, config) unet.print_trainable_parameters() print(unet) vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) elif args.train_text_encoder and args.use_oft: config = OFTConfig( r=args.oft_text_encoder_r, oft_block_size=args.oft_text_encoder_block_size, target_modules=TEXT_ENCODER_TARGET_MODULES, module_dropout=args.oft_text_encoder_dropout, init_weights=True, coft=args.oft_text_encoder_use_coft, eps=args.oft_text_encoder_eps, ) text_encoder = get_peft_model(text_encoder, config) text_encoder.print_trainable_parameters() print(text_encoder) if args.enable_xformers_memory_efficient_attention: if accelerator.device.type == "xpu": logger.warn("XPU hasn't support xformers yet, ignore it.") elif is_xformers_available(): unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # below fails when using oft so commenting it out if args.train_text_encoder and not args.use_oft: text_encoder.gradient_checkpointing_enable() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32 and torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB accelerators if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.num_dataloader_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and text_encoder to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("dreambooth", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = resume_global_step // num_update_steps_per_epoch resume_step = resume_global_step % num_update_steps_per_epoch # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.train_text_encoder: text_encoder.train() with TorchTracemalloc() if not args.no_tracemalloc else nullcontext() as tracemalloc: for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * 0.18215 # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) global_step += 1 logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if ( args.validation_prompt is not None and (step + num_update_steps_per_epoch * epoch) % args.validation_steps == 0 ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, safety_checker=None, revision=args.revision, ) # set `keep_fp32_wrapper` to True because we do not want to remove # mixed precision hooks while we are still training pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True) pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference if args.seed is not None: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) else: generator = None images = [] for _ in range(args.num_validation_images): image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": import wandb tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() elif torch.xpu.is_available(): torch.xpu.empty_cache() if global_step >= args.max_train_steps: break # Printing the accelerator memory usage details such as allocated memory, peak memory, and total memory usage if not args.no_tracemalloc: accelerator.print( f"{accelerator.device.type.upper()} Memory before entering the train : {b2mb(tracemalloc.begin)}" ) accelerator.print( f"{accelerator.device.type.upper()} Memory consumed at the end of the train (end-begin): {tracemalloc.used}" ) accelerator.print( f"{accelerator.device.type.upper()} Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}" ) accelerator.print( f"{accelerator.device.type.upper()} Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) accelerator.print(f"CPU Memory before entering the train : {b2mb(tracemalloc.cpu_begin)}") accelerator.print(f"CPU Memory consumed at the end of the train (end-begin): {tracemalloc.cpu_used}") accelerator.print(f"CPU Peak Memory consumed during the train (max-begin): {tracemalloc.cpu_peaked}") accelerator.print( f"CPU Total Peak Memory consumed during the train (max): {tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)}" ) # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: if args.use_oft: unwarpped_unet = accelerator.unwrap_model(unet) unwarpped_unet.save_pretrained( os.path.join(args.output_dir, "unet"), state_dict=accelerator.get_state_dict(unet) ) if args.train_text_encoder: unwarpped_text_encoder = accelerator.unwrap_model(text_encoder) unwarpped_text_encoder.save_pretrained( os.path.join(args.output_dir, "text_encoder"), state_dict=accelerator.get_state_dict(text_encoder), ) else: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), revision=args.revision, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", run_as_future=True, ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
peft/examples/oft_dreambooth/train_dreambooth.py/0
{ "file_path": "peft/examples/oft_dreambooth/train_dreambooth.py", "repo_id": "peft", "token_count": 20454 }
236
<jupyter_start><jupyter_text>Using C3A for sequence classification In this example, we fine-tune Roberta (base) on a sequence classification task using C3A. Imports<jupyter_code># To run this notebook, please run `pip install evaluate` to install additional dependencies not covered by PEFT. import torch from torch.optim import AdamW from torch.utils.data import DataLoader from peft import ( get_peft_model, C3AConfig, PeftType, ) from peft.utils import infer_device import evaluate from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed, AutoConfig from tqdm import tqdm<jupyter_output><empty_output><jupyter_text>Parameters<jupyter_code>batch_size = 32 model_name_or_path = "roberta-base" task = "mrpc" peft_type = PeftType.C3A device = infer_device() num_epochs = 5 # for better results, increase this number block_size = 768 # for better results, increase this number max_length = 512 torch.manual_seed(0) peft_config = C3AConfig( task_type="SEQ_CLS", block_size=block_size, target_modules=["query", "value"], ) head_lr = 4e-6 # the learning rate for the classification head for NLU tasks ft_lr = 3e-1 # the learning rate for C3A parameters, a much larger LR than that is usually used, at least 1e-1<jupyter_output><empty_output><jupyter_text>Loading data<jupyter_code>if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=max_length) return outputs tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size )<jupyter_output><empty_output><jupyter_text>Preparing the C3A model<jupyter_code>model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True, max_length=None) model = get_peft_model(model, peft_config) model.print_trainable_parameters() head_param = list(map(id, model.classifier.parameters())) others_param = filter(lambda p: id(p) not in head_param, model.parameters()) optimizer = AdamW([ {"params": model.classifier.parameters(), "lr": head_lr}, {"params": others_param, "lr": ft_lr} ],weight_decay=0.) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs), num_training_steps=(len(train_dataloader) * num_epochs), )<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>model.to(device) for epoch in range(num_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(f"epoch {epoch}:", eval_metric)<jupyter_output>0%| | 0/115 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. 100%|██████████| 115/115 [00:04<00:00, 24.62it/s] 100%|██████████| 13/13 [00:00<00:00, 49.02it/s]<jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>account_id = "Your-Hugging-Face-Hub-Account" token = "Your-Hugging-Face-Hub-Token" model.push_to_hub(f"{account_id}/roberta-base-mrpc-peft-c3a", token=token)<jupyter_output><empty_output><jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch from peft import PeftModel, PeftConfig from transformers import AutoTokenizer peft_model_id = f"{account_id}/roberta-base-mrpc-peft-c3a" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the FourierFT model inference_model = PeftModel.from_pretrained(inference_model, peft_model_id, config=config) inference_model.to(device) inference_model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = inference_model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(eval_metric)<jupyter_output>0%| | 0/13 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. 100%|██████████| 13/13 [00:00<00:00, 51.18it/s]
peft/examples/sequence_classification/C3A.ipynb/0
{ "file_path": "peft/examples/sequence_classification/C3A.ipynb", "repo_id": "peft", "token_count": 2373 }
237
compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch: BACKWARD_PRE fsdp_cpu_ram_efficient_loading: true fsdp_forward_prefetch: false fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_use_orig_params: false machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 8 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false
peft/examples/sft/configs/fsdp_config.yaml/0
{ "file_path": "peft/examples/sft/configs/fsdp_config.yaml", "repo_id": "peft", "token_count": 265 }
238
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Optional import torch import transformers from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed from peft import ( PeftModel, ShiraConfig, get_peft_model, ) def train( base_model: str = "path/to/model", data_path: str = "yahma/alpaca-cleaned", output_dir: str = "shira", batch_size: int = 16, num_epochs: int = 1, learning_rate: float = 3e-4, cutoff_len: int = 256, val_set_size: int = 16, eval_step: int = 100, save_step: int = 100, device_map: str = "auto", shira_r: int = 32, shira_target_modules: list[str] = None, torch_dtype: str = "float16", seed: Optional[int] = None, use_custom_random_mask_function_with_custom_kwargs: Optional[bool] = False, ): # Set device_map to the right place when enabling DDP. world_size = int(os.environ.get("WORLD_SIZE", 0)) or int(os.environ.get("PMI_SIZE", 0)) if world_size > 1 and device_map != "cpu": from accelerate import Accelerator device_map = {"": Accelerator().process_index} # Set seed if seed is not None: set_seed(seed) model_kwargs = {"torch_dtype": getattr(torch, torch_dtype), "device_map": device_map} model = AutoModelForCausalLM.from_pretrained(base_model, **model_kwargs) tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True) # For some tokenizer with no pad token like llama if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token def tokenize(prompt, add_eos_token=True): result = tokenizer( prompt, truncation=True, max_length=cutoff_len, padding=False, return_tensors=None, ) if ( result["input_ids"][-1] != tokenizer.eos_token_id and len(result["input_ids"]) < cutoff_len and add_eos_token ): result["input_ids"].append(tokenizer.eos_token_id) result["attention_mask"].append(1) result["labels"] = result["input_ids"].copy() return result def generate_and_tokenize_prompt(example): full_prompt = generate_prompt(example) tokenized_full_prompt = tokenize(full_prompt) return tokenized_full_prompt def custom_random_mask_function_with_custom_kwargs(custom_arg): def mask_fn(base_layer, r): """ This mask function is similar to the random_mask provided in src/peft/tuners/shira/mask_functions.py except the seed is derived from custom_kwargs. Please use this as an example to create your own custom sparse masks that may use custom_kwargs. Remember, for a pretrained weight with shape m, n, mask_fn must return only one mask (shape: m, n) which must be binary 0 or 1 with num_shira_parameters = r(m+n) for linear layers. Device and dtype of mask must be same as base layer's weight's device and dtype. """ new_seed = custom_arg shape = base_layer.weight.shape num_shira_weights = r * (shape[0] + shape[1]) random_generator = torch.Generator() random_generator.manual_seed(new_seed) idx = (torch.randperm(base_layer.weight.numel(), generator=random_generator)[:num_shira_weights]).to( base_layer.weight.device ) val = torch.ones_like(idx.type(base_layer.weight.dtype)) mask = torch.zeros_like(base_layer.weight.view(1, -1)) mask = mask.scatter_(1, idx.unsqueeze(0), val.unsqueeze(0)).view(shape) return mask return mask_fn mask_type = "random" if not use_custom_random_mask_function_with_custom_kwargs else "custom" config = ShiraConfig( r=shira_r, mask_type=mask_type, target_modules=shira_target_modules, task_type="CAUSAL_LM", ) if use_custom_random_mask_function_with_custom_kwargs: custom_arg = 120 custom_mask_fn = custom_random_mask_function_with_custom_kwargs(custom_arg) config.mask_fn = custom_mask_fn model = get_peft_model(model, config) data = load_dataset(data_path) train_val = data["train"].train_test_split(test_size=val_set_size, shuffle=True, seed=42) train_data = train_val["train"].shuffle().map(generate_and_tokenize_prompt) val_data = train_val["test"].shuffle().map(generate_and_tokenize_prompt) trainer = transformers.Trainer( model=model, train_dataset=train_data, eval_dataset=val_data, args=transformers.TrainingArguments( per_device_train_batch_size=batch_size, warmup_steps=100, num_train_epochs=num_epochs, learning_rate=learning_rate, logging_steps=100, optim="adamw_torch", eval_strategy="steps", save_strategy="steps", eval_steps=eval_step, save_steps=save_step, output_dir=output_dir, save_total_limit=3, load_best_model_at_end=True, ddp_find_unused_parameters=False if world_size > 1 else None, ), data_collator=transformers.DataCollatorForSeq2Seq( tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True ), ) trainer.train() model.save_pretrained(output_dir) # Delete the model and load it again from the checkpoint. del model model = AutoModelForCausalLM.from_pretrained(base_model, **model_kwargs) model = PeftModel.from_pretrained(model, output_dir) def generate_prompt(example): return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {example["instruction"]} ### Response: {example["output"]}""" if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--base_model", type=str, default="path/to/model") parser.add_argument("--data_path", type=str, default="yahma/alpaca-cleaned") parser.add_argument("--output_dir", type=str, default="shira") parser.add_argument("--batch_size", type=int, default=16) parser.add_argument("--num_epochs", type=int, default=1) parser.add_argument("--learning_rate", type=float, default=3e-4) parser.add_argument("--cutoff_len", type=int, default=256) parser.add_argument("--val_set_size", type=int, default=16) parser.add_argument("--eval_step", type=int, default=100) parser.add_argument("--save_step", type=int, default=100) parser.add_argument("--device_map", type=str, default="auto") parser.add_argument("--shira_r", type=int, default=32) parser.add_argument("--shira_target_modules", type=str, default=None) parser.add_argument("--torch_dtype", type=str, default="float16") parser.add_argument("--seed", type=int, default=None) parser.add_argument("--use_custom_random_mask_function_with_custom_kwargs", action="store_true") args = parser.parse_args() train( base_model=args.base_model, data_path=args.data_path, output_dir=args.output_dir, batch_size=args.batch_size, num_epochs=args.num_epochs, learning_rate=args.learning_rate, cutoff_len=args.cutoff_len, val_set_size=args.val_set_size, eval_step=args.eval_step, save_step=args.save_step, device_map=args.device_map, shira_r=args.shira_r, shira_target_modules=args.shira_target_modules, torch_dtype=args.torch_dtype, seed=args.seed, use_custom_random_mask_function_with_custom_kwargs=args.use_custom_random_mask_function_with_custom_kwargs, )
peft/examples/shira_finetuning/shira_finetuning.py/0
{ "file_path": "peft/examples/shira_finetuning/shira_finetuning.py", "repo_id": "peft", "token_count": 3546 }
239
{ "optimizer_kwargs": { "lr": 5e-4 } }
peft/method_comparison/MetaMathQA/experiments/adaptionprompt/llama-3.2-3B-lr_0.0005/training_params.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/adaptionprompt/llama-3.2-3B-lr_0.0005/training_params.json", "repo_id": "peft", "token_count": 28 }
240
{ "alpha_pattern": {}, "auto_mapping": null, "base_model_name_or_path": null, "bias": "none", "corda_config": null, "eva_config": null, "exclude_modules": null, "fan_in_fan_out": false, "inference_mode": false, "init_lora_weights": true, "layer_replication": null, "layers_pattern": null, "layers_to_transform": null, "loftq_config": {}, "lora_alpha": 64, "lora_bias": false, "lora_dropout": 0.0, "megatron_config": null, "megatron_core": "megatron.core", "modules_to_save": null, "peft_type": "LORA", "r": 32, "rank_pattern": {}, "revision": null, "target_modules": null, "task_type": "CAUSAL_LM", "use_dora": false, "use_rslora": false }
peft/method_comparison/MetaMathQA/experiments/lora/llama-3.2-3B-rank32-lorafa/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/lora/llama-3.2-3B-rank32-lorafa/adapter_config.json", "repo_id": "peft", "token_count": 304 }
241
# Copyright 2023 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ import os from datetime import datetime as dt from datetime import timezone from github import Github LABELS_TO_EXEMPT = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", "PRs welcome to address this", ] def main(): g = Github(os.environ["GITHUB_TOKEN"]) repo = g.get_repo("huggingface/peft") open_issues = repo.get_issues(state="open") for issue in open_issues: comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None if ( (last_comment is not None and last_comment.user.login == "github-actions[bot]") and (dt.now(timezone.utc) - issue.updated_at).days > 7 and (dt.now(timezone.utc) - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): issue.edit(state="closed") elif ( (dt.now(timezone.utc) - issue.updated_at).days > 23 and (dt.now(timezone.utc) - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\n" ) if __name__ == "__main__": main()
peft/scripts/stale.py/0
{ "file_path": "peft/scripts/stale.py", "repo_id": "peft", "token_count": 890 }
242
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .adalora import AdaLoraConfig, AdaLoraModel from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel from .boft import BOFTConfig, BOFTModel from .bone import BoneConfig, BoneModel from .c3a import C3AConfig, C3AModel from .cpt import CPTConfig, CPTEmbedding from .fourierft import FourierFTConfig, FourierFTModel from .hra import HRAConfig, HRAModel from .ia3 import IA3Config, IA3Model from .ln_tuning import LNTuningConfig, LNTuningModel from .loha import LoHaConfig, LoHaModel from .lokr import LoKrConfig, LoKrModel from .lora import ( EvaConfig, LoftQConfig, LoraConfig, LoraModel, LoraRuntimeConfig, get_eva_state_dict, initialize_lora_eva_weights, ) from .miss import MissConfig, MissModel from .mixed import MixedModel from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit from .oft import OFTConfig, OFTModel from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType from .poly import PolyConfig, PolyModel from .prefix_tuning import PrefixEncoder, PrefixTuningConfig from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit from .randlora import RandLoraConfig, RandLoraModel from .road import RoadConfig, RoadModel from .shira import ShiraConfig, ShiraModel from .trainable_tokens import TrainableTokensConfig, TrainableTokensModel from .vblora import VBLoRAConfig, VBLoRAModel from .vera import VeraConfig, VeraModel from .xlora import XLoraConfig, XLoraModel __all__ = [ "AdaLoraConfig", "AdaLoraModel", "AdaptionPromptConfig", "AdaptionPromptModel", "BOFTConfig", "BOFTModel", "BoneConfig", "BoneModel", "C3AConfig", "C3AModel", "CPTConfig", "CPTEmbedding", "EvaConfig", "FourierFTConfig", "FourierFTModel", "HRAConfig", "HRAModel", "IA3Config", "IA3Model", "LNTuningConfig", "LNTuningModel", "LoHaConfig", "LoHaModel", "LoKrConfig", "LoKrModel", "LoftQConfig", "LoraConfig", "LoraModel", "LoraRuntimeConfig", "MissConfig", "MissModel", "MixedModel", "MultitaskPromptEmbedding", "MultitaskPromptTuningConfig", "MultitaskPromptTuningInit", "OFTConfig", "OFTModel", "PolyConfig", "PolyModel", "PrefixEncoder", "PrefixTuningConfig", "PromptEmbedding", "PromptEncoder", "PromptEncoderConfig", "PromptEncoderReparameterizationType", "PromptTuningConfig", "PromptTuningInit", "RandLoraConfig", "RandLoraModel", "RoadConfig", "RoadModel", "ShiraConfig", "ShiraModel", "TrainableTokensConfig", "TrainableTokensModel", "VBLoRAConfig", "VBLoRAModel", "VeraConfig", "VeraModel", "XLoraConfig", "XLoraModel", "get_eva_state_dict", "initialize_lora_eva_weights", ]
peft/src/peft/tuners/__init__.py/0
{ "file_path": "peft/src/peft/tuners/__init__.py", "repo_id": "peft", "token_count": 1311 }
243
#include <torch/torch.h> #include <vector> #include <iostream> #include <torch/extension.h> std::vector<at::Tensor> forward_fast_block_diag_cuda( at::Tensor input); std::vector<at::Tensor> forward_fast_block_diag( at::Tensor input ) { return forward_fast_block_diag_cuda(input); } std::vector<at::Tensor> backward_fast_block_diag_cuda( at::Tensor grad_output, at::Tensor input); std::vector<at::Tensor> backward_fast_block_diag( at::Tensor grad_output, at::Tensor input ) { return backward_fast_block_diag_cuda(grad_output, input); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("forward", &forward_fast_block_diag, "FAST BLOCK DIAG (CUDA)"); m.def("backward", &backward_fast_block_diag, "FAST BLOCK DIAG backward (CUDA)"); }
peft/src/peft/tuners/boft/fbd/fbd_cuda.cpp/0
{ "file_path": "peft/src/peft/tuners/boft/fbd/fbd_cuda.cpp", "repo_id": "peft", "token_count": 370 }
244
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from typing import Optional from torch import nn from torch.nn.modules import Module from tqdm import tqdm from peft.config import PeftConfig from peft.tuners.tuners_utils import BaseTuner, _get_submodules, check_target_module_exists from peft.utils import TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING, ModulesToSaveWrapper from .layer import LNTuningLayer class LNTuningModel(BaseTuner): """ Creates LayerNorm tuning from a pretrained transformer model. The method is described in detail in https://huggingface.co/papers/2312.11420. Args: model ([`torch.nn.Module`]): The model to be adapted. config ([`LNTuningConfig`]): The configuration of the Lora model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): This option has no effect on LN tuning but exists for consistency with other PEFT methods. Returns: 'torch.nn.Module': The adapted model with LayerNorm tuned on. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import get_peft_model, TaskType, LNTuningConfig >>> peft_config = LNTuningConfig( ... task_type=TaskType.CAUSAL_LM, ... ) >>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf") >>> model = get_peft_model(model, peft_config) >>> model.print_trainable_parameters() ``` **Attributes**: - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`LNTuningConfig`]): The configuration of the Lora model. """ prefix: str = "ln_tuning_" def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) # TODO: here need to handle the modules_to_save rather than the target_modules @staticmethod def _prepare_adapter_config(peft_config: PeftConfig, model_config: dict) -> PeftConfig: if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _create_and_replace( self, peft_config: PeftConfig, adapter_name: str, target: Module, target_name: str, parent: Module, current_key: str, ) -> None: # replace the original module with a same new module new_module = self._create_new_module(peft_config, target, adapter_name) if adapter_name != self.active_adapter: new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) def _create_new_module( self, peft_config: PeftConfig, target: Module, adapter_name: str, ) -> Module: if not isinstance(target, LNTuningLayer): new_module = LNTuningLayer(target, adapter_name) else: new_module = target new_module.update_layer(target.base_layer, adapter_name) return new_module def _replace_module(self, parent: Module, child_name: str, new_module: Module, child: Module) -> None: setattr(parent, child_name, new_module) if hasattr(child, "base_layer"): child = child.base_layer if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) for name, module in new_module.named_modules(): weight = child.qweight if hasattr(child, "qweight") else child.weight module.to(weight.device) def _mark_only_adapters_as_trainable(self, model: Module): for n, p in model.named_parameters(): if self.prefix not in n: p.requires_grad = False else: p.requires_grad = True def _check_target_module_exists(self, peft_config: PeftConfig, key: str) -> bool: return check_target_module_exists(peft_config, key) def _set_adapter_layers(self, enabled: bool) -> None: for module in self.model.modules(): if isinstance(module, (LNTuningLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self) -> None: """Enable all adapters. Call this if you have previously disabled all adapters and want to re-enable them. """ self._set_adapter_layers(enabled=True) def disable_adapter_layers(self) -> None: """Disable all adapters. When disabling all adapters, the model output corresponds to the output of the base model. """ self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name: str) -> None: for module in self.model.modules(): if isinstance(module, LNTuningLayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None, ): self._unloading_checks(adapter_names) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] desc = "Unloading adapters " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: target.merge(adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) return self.model def unload(self): return self._unload_and_optionally_merge(merge=False) def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ) -> nn.Module: return self._unload_and_optionally_merge(merge=True) def _cast_adapter_dtype(self, adapter_name: str, autocast_adapter_dtype: bool = True) -> None: # Note: LN Tuning does not add adapter layers, instead it creates copies of the original layer. For this reason, # we need to skip adapter autocasting, otherwise we would change the dtype of copies of the original layer, # resulting in dtype errors down the line. pass
peft/src/peft/tuners/ln_tuning/model.py/0
{ "file_path": "peft/src/peft/tuners/ln_tuning/model.py", "repo_id": "peft", "token_count": 3309 }
245
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Optional import torch from peft.import_utils import is_eetq_available from peft.tuners.lora.layer import LoraLayer from peft.tuners.tuners_utils import BaseTunerLayer if is_eetq_available(): from eetq import EetqLinear class EetqLoraLinear(torch.nn.Module, LoraLayer): def __init__( self, base_layer, adapter_name, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, use_dora: bool = False, lora_bias: bool = False, **kwargs, ): if use_dora: raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") super().__init__() LoraLayer.__init__(self, base_layer) # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, lora_bias=lora_bias, ) def forward(self, x: torch.Tensor): result = self.quant_linear_module(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = self._cast_input_dtype(x, lora_A.weight.dtype) output = lora_B(lora_A(dropout(x))) if requires_conversion: output = output.to(expected_dtype) output = output * scaling result = result + output return result def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise AttributeError("Merging LoRA layers is not supported for Eetq layers.") def unmerge(self) -> None: raise AttributeError("Unmerging LoRA layers is not supported for Eetq layers.") def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep def dispatch_eetq( target: torch.nn.Module, adapter_name: str, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if is_eetq_available() and isinstance(target_base_layer, EetqLinear): new_module = EetqLoraLinear(target, adapter_name, **kwargs) target.weight = target_base_layer.weight if hasattr(target, "bias"): target.bias = target_base_layer.bias return new_module
peft/src/peft/tuners/lora/eetq.py/0
{ "file_path": "peft/src/peft/tuners/lora/eetq.py", "repo_id": "peft", "token_count": 1926 }
246
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from typing import Any, Optional, Union from torch import nn from tqdm import tqdm from peft.tuners import adalora, loha, lokr, lora, oft, shira from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists from peft.utils import ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, PeftType, _get_submodules, get_auto_gptq_quant_linear, ) # Collection of constants used for all tuners COMPATIBLE_TUNER_TYPES = (PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.OFT, PeftType.SHIRA) PREFIXES = [ lora.LoraModel.prefix, lokr.LoKrModel.prefix, loha.LoHaModel.prefix, oft.OFTModel.prefix, shira.ShiraModel.prefix, ] Configs = Union[ lora.LoraConfig, loha.LoHaConfig, lokr.LoKrConfig, adalora.AdaLoraConfig, oft.OFTConfig, shira.ShiraConfig ] Layers = ( lora.layer.LoraLayer, loha.layer.LoHaLayer, lokr.layer.LoKrLayer, adalora.layer.AdaLoraLayer, oft.OFTLayer, shira.ShiraLayer, ) class MixedModel(BaseTuner): """ A class that allows to mix different types of adapters in a single model. Note: This class should usually not be initialized directly. Instead, use `get_peft_model` with the argument `mixed=True`. Args: model (:obj:`nn.Module`): The model to be tuned. config (:obj:`PeftConfig`): The config of the model to be tuned. The adapter type must be compatible. adapter_name (:obj:`str`): The name of the first adapter. """ def __init__(self, model: nn.Module, config: Configs, adapter_name: str) -> None: super().__init__(model, config, adapter_name) def _check_new_adapter_config(self, config: Configs) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ if not isinstance(config, Configs.__args__): raise ValueError( f"{self.__class__.__name__} only supports {COMPATIBLE_TUNER_TYPES} configs, but got {type(config)}." ) biases = (getattr(config, "bias", None) for config in self.peft_config) biases = [bias for bias in biases if bias not in (None, "none")] if len(biases) > 1: raise ValueError( f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " "set bias to 'none' for all adapters." ) @staticmethod def _check_target_module_exists(config: Configs, key: str): return check_target_module_exists(config, key) def _create_and_replace( self, config: Configs, *args: Any, **kwargs: Any, ) -> None: if isinstance(config, adalora.AdaLoraConfig): adalora.AdaLoraModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, lora.LoraConfig): lora.LoraModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, loha.LoHaConfig): loha.LoHaModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, lokr.LoKrConfig): lokr.LoKrModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, oft.OFTConfig): oft.OFTModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, shira.ShiraConfig): shira.ShiraModel._create_and_replace(self, config, *args, **kwargs) else: raise ValueError(f"Unsupported config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.") def _replace_module(self, parent, child_name, new_module, child) -> None: setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.get_base_layer() elif hasattr(child, "quant_linear_module"): # TODO maybe not necessary to have special treatment? child = child.quant_linear_module if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) # dispatch to correct device for name, module in new_module.named_modules(): if any(prefix in name for prefix in PREFIXES): module.to(child.weight.device) if "ranknum" in name: module.to(child.weight.device) def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if not any(prefix in n for prefix in PREFIXES): p.requires_grad = False for active_adapter in self.active_adapters: bias = getattr(self.peft_config[active_adapter], "bias", "none") if bias == "none": continue if bias == "all": for n, p in model.named_parameters(): if "bias" in n: p.requires_grad = True elif bias == "lora_only": # TODO: check if this is needed for other supported types for m in model.modules(): if isinstance(m, Layers) and hasattr(m, "bias") and m.bias is not None: m.bias.requires_grad = True else: raise ValueError(f"Requested bias: {bias}, is not implemented.") @staticmethod def _create_new_module(config, adapter_name, target, **kwargs): gptq_quantization_config = kwargs.get("gptq_quantization_config", None) AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) if (gptq_quantization_config is not None) or (AutoGPTQQuantLinear is not None): raise ValueError(f"GPTQ quantization not supported for {config.peft_type.value} (yet).") loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) if loaded_in_8bit or loaded_in_4bit: raise ValueError(f"8bit and 4bit quantization not supported for {config.peft_type.value} (yet).") if isinstance(config, adalora.AdaLoraConfig): new_module = adalora.AdaLoraModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, lora.LoraConfig): new_module = lora.LoraModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, loha.LoHaConfig): new_module = loha.LoHaModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, lokr.LoKrConfig): new_module = lokr.LoKrModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, oft.OFTConfig): new_module = oft.OFTModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, shira.ShiraConfig): new_module = shira.ShiraModel._create_new_module(config, adapter_name, target, **kwargs) else: raise ValueError(f"Unknown config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.") return new_module def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self): self._set_adapter_layers(enabled=True) def disable_adapter_layers(self): for active_adapter in self.active_adapters: val = getattr(self.peft_config[active_adapter], "bias", "none") if val != "none": msg = ( f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " "output as the base model would without adaption." ) warnings.warn(msg) self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name: Union[str, list[str]]) -> None: for module in self.model.modules(): if isinstance(module, Layers): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None, ): if merge: if getattr(self.model, "quantization_method", None) == "gptq": raise ValueError("Cannot merge layers when the model is gptq quantized") def merge_recursively(module): # helper function to recursively merge the base_layer of the target path = [] layer = module while hasattr(layer, "base_layer"): path.append(layer) layer = layer.base_layer for layer_before, layer_after in zip(path[:-1], path[1:]): layer_after.merge(safe_merge=safe_merge, adapter_names=adapter_names) layer_before.base_layer = layer_after.base_layer module.merge(safe_merge=safe_merge, adapter_names=adapter_names) key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: merge_recursively(target) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` new_module = target.modules_to_save[target.active_adapter] if hasattr(new_module, "base_layer"): # check if the module is itself a tuner layer if merge: new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names) new_module = new_module.get_base_layer() setattr(parent, target_name, new_module) return self.model def add_weighted_adapter(self, *args: Any, **kwargs: Any) -> None: raise NotImplementedError(f"Weighted adapters are not supported for {self.__class__.__name__} (yet).") def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None: """ Deletes an existing adapter. Args: adapter_name (Union[str, list[str]]): Name of the adapter(s) to delete. """ if isinstance(adapter_name, str): adapter_names = [adapter_name] else: adapter_names = adapter_name mismatched = set(adapter_names) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) for adapter_name in adapter_names: del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, BaseTunerLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or [] self._delete_auxiliary_adapter(adapter_name, new_active_adapters=new_adapter) def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ) -> nn.Module: r""" This method merges the layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self) -> nn.Module: """ Gets back the base model by removing all the lora modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False) def generate(self, *args: Any, **kwargs: Any): return self.model.generate(*args, **kwargs)
peft/src/peft/tuners/mixed/model.py/0
{ "file_path": "peft/src/peft/tuners/mixed/model.py", "repo_id": "peft", "token_count": 6912 }
247
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from dataclasses import dataclass, field from typing import Union from peft.config import PromptLearningConfig from peft.utils import PeftType class PromptEncoderReparameterizationType(str, enum.Enum): MLP = "MLP" LSTM = "LSTM" @dataclass class PromptEncoderConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEncoder`]. Args: encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]): The type of reparameterization to use. encoder_hidden_size (`int`): The hidden size of the prompt encoder. encoder_num_layers (`int`): The number of layers of the prompt encoder. encoder_dropout (`float`): The dropout probability of the prompt encoder. """ encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field( default=PromptEncoderReparameterizationType.MLP, metadata={"help": "How to reparameterize the prompt encoder"}, ) encoder_hidden_size: int = field( default=None, metadata={"help": "The hidden size of the prompt encoder"}, ) encoder_num_layers: int = field( default=2, metadata={"help": "The number of layers of the prompt encoder"}, ) encoder_dropout: float = field( default=0.0, metadata={"help": "The dropout of the prompt encoder"}, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.P_TUNING
peft/src/peft/tuners/p_tuning/config.py/0
{ "file_path": "peft/src/peft/tuners/p_tuning/config.py", "repo_id": "peft", "token_count": 748 }
248
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.other import transpose from .._buffer_dict import BufferDict class UniqueBaseGrad(torch.autograd.Function): # Memory efficent for a unique base @staticmethod def forward(ctx, randlora_A, randlora_lambda, randlora_gamma): out = randlora_lambda[:, :, None] * randlora_A * randlora_gamma[None,] ctx.save_for_backward(randlora_A, randlora_lambda, randlora_gamma) return out @staticmethod def backward(ctx, grad_output): randlora_A, randlora_lambda, randlora_gamma = ctx.saved_tensors randlora_A, randlora_lambda, randlora_gamma = ( randlora_A.to(grad_output.dtype), randlora_lambda.to(grad_output.dtype), randlora_gamma.to(grad_output.dtype), ) grad_randlora_lambda = torch.einsum("kbj,kvj,bj->kb", grad_output, randlora_A, randlora_gamma) grad_randlora_gamma = torch.einsum("kbj,kvj,kb->bj", grad_output, randlora_A, randlora_lambda) return None, grad_randlora_lambda, grad_randlora_gamma class RandLoraLayer(BaseTunerLayer): # List all names of layers that may contain adapter weights adapter_layer_names = ("randlora_lambda", "randlora_gamma") other_param_names = ("randlora_A", "randlora_B") def __init__(self, base_layer: nn.Module, **kwargs): self.base_layer = base_layer self.r = {} self.scaling = {} self.randlora_dropout = nn.ModuleDict({}) # For storing vector scale self.randlora_lambda = nn.ParameterDict({}) self.randlora_gamma = nn.ParameterDict({}) # Stores a reference to the randlora_A/B BufferDict. # Set to `None` otherwise to avoid computation with random weights self.randlora_A: Optional[BufferDict] = None self.randlora_B: Optional[BufferDict] = None # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] # flag to enable/disable casting of input to weight dtype during forward call self.cast_input_dtype_enabled = True base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, Conv1D): in_features, out_features = ( base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape ) self.in_features = in_features self.out_features = out_features self.kwargs = kwargs @property def merged(self) -> bool: return bool(self.merged_adapters) def update_layer( self, adapter_name, randlora_A: BufferDict, randlora_B: BufferDict, r, randlora_alpha, randlora_dropout, init_weights, ): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r if randlora_dropout > 0.0: randlora_dropout_layer = nn.Dropout(p=randlora_dropout) else: randlora_dropout_layer = nn.Identity() self.randlora_dropout.update(nn.ModuleDict({adapter_name: randlora_dropout_layer})) # Actual trainable parameters num_bases = min(self.in_features, self.out_features) / r self.num_bases = int(num_bases) if num_bases.is_integer() else int(num_bases) + 1 # Full rank self.randlora_lambda[adapter_name] = nn.Parameter(torch.randn(r, self.num_bases), requires_grad=True) self.randlora_gamma[adapter_name] = nn.Parameter( torch.ones(self.num_bases, min(self.out_features, self.in_features)) / max(self.out_features, self.in_features), requires_grad=True, ) self.scaling[adapter_name] = randlora_alpha / r # non trainable references to randlora_A/B buffers self.randlora_A = randlora_A self.randlora_B = randlora_B if adapter_name not in randlora_A: # This means that this is not the first RandLora adapter. We have to add an entry in the dict for this adapter. if len(self.randlora_A) < 1: raise ValueError( "The `randlora_A` and `randlora_B` buffers are empty. This should not happen. Please report this issue." ) # we can take any of the existing adapter's parameters, as they should all be identical randlora_A_param = list(self.randlora_A.values())[0] randlora_B_param = list(self.randlora_B.values())[0] error_tmpl = ( "{} has a size of {} but {} or greater is required; this probably happened because an additional RandLora " "adapter was added after the first one with incompatible shapes." ) max_dim, min_dim = max(self.in_features, self.out_features), min(self.in_features, self.out_features) # check input size if randlora_B_param.shape[0] < max_dim: raise ValueError(error_tmpl.format("randlora_B", randlora_B_param.shape[0], max_dim)) # check output size if randlora_A_param.shape[-1] < min_dim: raise ValueError(error_tmpl.format("randlora_A", randlora_A_param.shape[1], min_dim)) # check r error_tmpl = ( "{} has a size of {} but {} or greater is required; this probably happened because an additional RandLora " "adapter with a lower rank was added after the first one; loading the adapters " "in reverse order may solve this." ) if randlora_A_param.shape[0] < self.r[adapter_name]: raise ValueError(error_tmpl.format("randlora_A", randlora_A_param.shape[0], self.r[adapter_name])) if randlora_B_param.shape[-1] < self.r[adapter_name]: raise ValueError(error_tmpl.format("randlora_B", randlora_B_param.shape[-1], self.r[adapter_name])) self.randlora_A[adapter_name] = randlora_A_param self.randlora_B[adapter_name] = randlora_B_param if init_weights: self.reset_randlora_parameters(adapter_name) self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) def reset_randlora_parameters(self, adapter_name): if adapter_name in self.randlora_lambda.keys(): with torch.no_grad(): nn.init.zeros_(self.randlora_lambda[adapter_name]) nn.init.constant_(self.randlora_gamma[adapter_name], 1 / max(self.randlora_gamma[adapter_name].shape)) class Linear(nn.Linear, RandLoraLayer): # RandLora implemented in a dense layer def __init__( self, base_layer, randlora_A: BufferDict, randlora_B: BufferDict, adapter_name: str, r: int = 0, randlora_alpha: int = 0, randlora_dropout: float = 0.0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_target_conv_1d_layer: bool = False, init_weights: bool = True, **kwargs, ) -> None: # this gets the init from nn.Linear's super perspective, i.e. nn.Module.__init__, which should always be called super(nn.Linear, self).__init__() RandLoraLayer.__init__(self, base_layer, **kwargs) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer(adapter_name, randlora_A, randlora_B, r, randlora_alpha, randlora_dropout, init_weights) self.is_target_conv_1d_layer = is_target_conv_1d_layer def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.randlora_lambda.keys(): base_layer = self.get_base_layer() orig_dtype = base_layer.weight.dtype if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights.to(orig_dtype) else: delta_weight = self.get_delta_weight(active_adapter) base_layer.weight.data += delta_weight.to(orig_dtype) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: base_layer = self.get_base_layer() orig_dtype = base_layer.weight.dtype active_adapter = self.merged_adapters.pop() if active_adapter in self.randlora_lambda.keys(): delta_weight = self.get_delta_weight(active_adapter) base_layer.weight.data -= delta_weight.to(orig_dtype) def get_scaled_bases(self, adapter, device=None) -> tuple[torch.Tensor, torch.Tensor]: """ Performs scaling on the smallest random base (randlora_A) and returns randlora_A and randlora_B in the correct order to fit the target layers' dimensions Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ randlora_A = self.randlora_A[adapter] randlora_B = self.randlora_B[adapter] if device is None: device = randlora_B.device dtype = randlora_B.dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) randlora_lambda = self.randlora_lambda[adapter].to(device) randlora_gamma = self.randlora_gamma[adapter].to(device) if cast_to_fp32: randlora_A = randlora_A.float() randlora_B = randlora_B.float() randlora_lambda = randlora_lambda.float() randlora_gamma = randlora_gamma.float() # The trainable parameters are always applied to randlora_A, the smallest basis. min_dim, max_dim = min(self.out_features, self.in_features), max(self.out_features, self.in_features) # As adapted layers may have different shapes and RandLora contains a single shared pair of A and B matrices, # we initialize these matrices with the largest required size for each dimension. # During the forward pass, required submatrices are sliced out from the shared randlora_A and randlora_B. sliced_A = randlora_A[:, : self.num_bases, :min_dim].to(device) sliced_B = randlora_B[:max_dim, : self.num_bases, :].to(device) # Flattening the matrices over the rank and number of bases dimensions is more memory efficient update_B = sliced_B.flatten(start_dim=1) update_A = UniqueBaseGrad.apply(sliced_A, randlora_lambda, randlora_gamma).flatten(end_dim=1) # Since update_A is applied on the smallest dimension, test whether update_A or update_B should be applied first. This is done to reduce trainable parameters. if min_dim == self.in_features: return update_A, update_B return update_B.T, update_A.T def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ update_B, update_A = self.get_scaled_bases(adapter) update = (update_B.T @ update_A.T).T output_tensor = transpose(update, self.fan_in_fan_out) scaling = self.scaling[adapter] return output_tensor * scaling def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.randlora_lambda.keys(): continue dropout = self.randlora_dropout[active_adapter] update_B, update_A = self.get_scaled_bases(active_adapter, device=x.device) x = x.to(update_A.dtype) scaling = self.scaling[active_adapter] result = result + F.linear(F.linear(dropout(x), update_B), update_A) * scaling result = result.to(previous_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "randlora." + rep
peft/src/peft/tuners/randlora/layer.py/0
{ "file_path": "peft/src/peft/tuners/randlora/layer.py", "repo_id": "peft", "token_count": 6797 }
249
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import os import re import textwrap import warnings from abc import ABC, abstractmethod from contextlib import contextmanager, nullcontext from typing import Any, Optional, Union, overload import torch from accelerate.hooks import AlignDevicesHook from accelerate.utils import named_module_tensors, offload_state_dict from torch import nn from transformers import PreTrainedModel from transformers.pytorch_utils import Conv1D from peft.mapping import PEFT_TYPE_TO_PREFIX_MAPPING from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND from peft.utils.constants import ( DUMMY_MODEL_CONFIG, DUMMY_TARGET_MODULES, EMBEDDING_LAYER_NAMES, MIN_TARGET_MODULES_FOR_OPTIMIZATION, SEQ_CLS_HEAD_NAMES, ) from peft.utils.integrations import init_empty_weights from peft.utils.other import AuxiliaryTrainingWrapper, match_target_against_key, set_additional_trainable_modules from peft.utils.peft_types import PeftType, TaskType from ..config import PeftConfig from ..utils import _get_submodules from ._buffer_dict import BufferDict @contextmanager def onload_layer(layer): r""" A utility for modifying a module containing one or more tuners and a base layer, any of which are offloaded to the CPU or disk. Moves a module's sub-modules to the execution device before some action is performed, after that the base layer state dictionary is re-assigned (if that layer was offloaded to the disk) and finally the parameters are offloaded. If the module has no offloaded sub-modules, this function does nothing. Args: layer ('torch.nn.Module'): layer with tuners to be merged """ offloaded_modules = [] for name, module in layer.named_modules(): if name in ["", "base_layer"]: continue if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload: module._hf_hook.pre_forward(module) offloaded_modules.append(module) base_layer_offload = False if hasattr(layer, "base_layer") and ( hasattr(layer.base_layer, "_hf_hook") and isinstance(layer.base_layer._hf_hook, AlignDevicesHook) and layer.base_layer._hf_hook.offload ): # check if the base layer is disk-offloaded (must contain a 'dataset' and an offload index) if torch.device("meta") in layer.base_layer._hf_hook.original_devices.values() and hasattr( layer.base_layer._hf_hook.weights_map, "dataset" ): # find the disk-offload index (maps modules to safetensors) from the `dataset` (OffloadedWeightsLoader object) index = layer.base_layer._hf_hook.weights_map.dataset.index module_name = list(dict(layer.base_layer._hf_hook.weights_map.dataset).keys())[0] # any module will do file_name = index[module_name]["safetensors_file"] base_name_arr = [] # get effective dir name for i in os.path.split(file_name): if "--" in i: base_name_arr.append(i) break base_name_arr.append(i) base_name = os.path.join(*base_name_arr) safetensors_filename = base_name + "-merged" layer.base_layer._hf_hook.pre_forward(layer.base_layer) base_layer_offload = True yield for module in offloaded_modules: module._hf_hook.post_forward(module, torch.tensor([])) if base_layer_offload: # re-make weights map (must be on cpu to send params to the disk via memmap if disk offload) layer.base_layer._hf_hook.weights_map = { name: param.to("cpu") for name, param in named_module_tensors(layer.base_layer) } # offload weights map to disk if original device is the disk if torch.device("meta") in layer.base_layer._hf_hook.original_devices.values() and hasattr( layer.base_layer._hf_hook.weights_map, "dataset" ): # rewrite directory with merged weights offload_state_dict(safetensors_filename, layer.base_layer._hf_hook.weights_map) layer.base_layer._hf_hook.post_forward(layer.base_layer, torch.tensor([])) def _check_lora_target_modules_mamba(peft_config: PeftConfig, model: nn.Module, target_name: str): """ Prevent applying LoRA to incompatible modules in specific architectures (e.g., Mamba). """ lora_like_types = {"LORA", "ADALORA", "XLORA", "RANDLORA"} incompatible_modules = {"out_proj", "conv1d"} mamba_model_types = {"falcon_h1", "mamba", "mamba2", "falcon_mamba"} if ( peft_config.peft_type in lora_like_types and hasattr(model, "config") and getattr(model.config, "model_type", None) in mamba_model_types ): if target_name in incompatible_modules: raise ValueError( f"[PEFT:{peft_config.peft_type}] Module '{target_name}' is incompatible with Mamba-based models " f"(model_type='{model.config.model_type}'). Incompatible modules: {incompatible_modules}. " "Please remove it from `target_modules` to avoid compatibility issues." ) class BaseTuner(nn.Module, ABC): r""" A base tuner model that provides the common methods and attributes for all tuners that are injectable into a torch.nn.Module For adding a new Tuner class, one needs to overwrite the following methods: - **_prepare_adapter_config**: A private method to eventually prepare the adapter config, for example in case the field `target_modules` is missing. - **_create_and_replace**: A private method to create and replace the target module with the adapter module. - **_check_target_module_exists**: A private helper method to check if the passed module's key name matches any of the target modules in the adapter_config. The easiest is to check what is done in the `peft.tuners.lora.LoraModel` class. Attributes: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. forward (`Callable`): The forward method of the model. peft_config (`Union[`PeftConfig`, dict[str, PeftConfig]]`): The adapter configuration object, it should be a dictionary of `str` to `PeftConfig` objects. One can also pass a PeftConfig object and a new adapter will be created with the default name `adapter` or create a new dictionary with a key `adapter_name` and a value of that peft config. config (`dict[str, Any]`): The model configuration object, it should be a dictionary of `str` to `Any` objects. targeted_module_names (`list[str]`): The list of module names that were actually adapted. Can be useful to inspect if you want to quickly double-check that the `config.target_modules` were specified correctly. targeted_parameter_names (`list[str]`): The list of parameter names that were actually adapted. Can be useful to inspect if you want to quickly double-check that the `config.target_parameters` were specified correctly. """ def __init__( self, model, peft_config: Union[PeftConfig, dict[str, PeftConfig]], adapter_name: str, low_cpu_mem_usage: bool = False, state_dict: Optional[dict[str, torch.Tensor]] = None, ) -> None: super().__init__() self.model = model self.targeted_module_names: list[str] = [] self.targeted_parameter_names: list[str] = [] # For advanced developers, if you want to attach multiple adapters to your # model, just add a `peft_config` dict attribute to your model. if not hasattr(self, "peft_config"): self.peft_config = {adapter_name: peft_config} if isinstance(peft_config, PeftConfig) else peft_config else: warnings.warn( "Already found a `peft_config` attribute in the model. This will lead to having multiple adapters" " in the model. Make sure to know what you are doing!" ) if isinstance(peft_config, PeftConfig): self.peft_config[adapter_name] = peft_config else: # user is adding a dict of PeftConfigs self.peft_config.update(peft_config) self.active_adapter: str | list[str] = adapter_name self._pre_injection_hook(self.model, self.peft_config[adapter_name], adapter_name) if peft_config != PeftType.XLORA or peft_config[adapter_name] != PeftType.XLORA: self.inject_adapter(self.model, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage, state_dict=state_dict) # Copy the peft_config in the injected model. self.model.peft_config = self.peft_config @property def active_adapters(self) -> list[str]: if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def forward(self, *args: Any, **kwargs: Any): return self.model.forward(*args, **kwargs) def _pre_injection_hook(self, model: nn.Module, config: PeftConfig, adapter_name: str) -> None: r""" A hook to be called before the adapter is injected into the model. This method can be overridden by child classes to perform any pre-injection operations. Args: model (`nn.Module`): The model to be adapted. config (`PeftConfig`): The adapter config. adapter_name (`str`): The adapter name. """ pass @abstractmethod def _prepare_adapter_config(self, peft_config: PeftConfig, model_config: dict) -> PeftConfig: r""" A private method to eventually prepare the adapter config. For transformers based models, if `peft_config.target_modules` is None, we can automatically infer the target modules from the `TRANSFORMERS_MODELS_TO_XXX_TARGET_MODULES_MAPPING`. This method can be further refactored in the future to automatically infer it for all tuner models. Check out `peft.tuner.lora.LoraModel._prepare_adapter_config` for an example. Args: peft_config (`PeftConfig`): The adapter config. model_config (`dict`): The transformers model config, that config should contain the `model_type` key. """ ... def _prepare_model(self, peft_config: PeftConfig, model: nn.Module): r""" A private method to modify the model structure before adapter is applied. See `peft.tuner.lora.LoraModel._prepare_model` for an example. Args: peft_config (`PeftConfig`): The prepared adapter config. model (`nn.Module`): The model that is going to be adapted. """ pass @abstractmethod def _check_target_module_exists(peft_config: PeftConfig, key: str) -> bool: r""" A helper private method to check if the passed module's key name matches any of the target modules in the `peft_config.target_modules` list. If it does, return `True`, else return `False`. Args: peft_config (`PeftConfig`): The adapter config. key (`str`): The module's key name. """ ... @abstractmethod def _create_and_replace( self, peft_config: PeftConfig, adapter_name: str, target: nn.Module, target_name: str, parent: nn.Module, current_key: str, parameter_name: Optional[str] = None, ) -> None: r""" Inplace replacement of the target module with the adapter layer. This method needs to be overridden by all the tuner classes. Check `peft.tuners.lora.LoraModel._create_and_replace` for an example. Args: peft_config (`PeftConfig`): The adapter config. adapter_name (`str`): The adapter name. target (`nn.Module`): The target module. target_name (`str`): The target module's name. parent (`nn.Module`): The parent module. current_key (`str`): The key of the current target being adapted. parameter_name (`str`, *optional*) If, and only if, an `nn.Parameter` is being targeted, this is the name of the parameter. """ ... @abstractmethod def _mark_only_adapters_as_trainable(self, model: nn.Module): r""" A helper method to mark only the adapter layers as trainable (i.e. module.requires_grad = False) This needs to be overridden for all tuner classes to match the correct key names. Check `peft.tuners.lora.LoraModel._mark_only_adapters_as_trainable` for an example. """ ... @abstractmethod def disable_adapter_layers(self) -> None: """ Disable all adapters in-place. """ ... @abstractmethod def enable_adapter_layers(self) -> None: """ Enable all adapters in-place """ ... def _check_new_adapter_config(self, config: PeftConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ pass def _cast_adapter_dtype(self, adapter_name: str, autocast_adapter_dtype: bool = True) -> None: """ A helper method to cast the adapter weights to the correct dtype. Currently, this only upcasts float16 and bfloat16 to float32. Args: adapter_name (`str`): The adapter name. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. """ if not autocast_adapter_dtype: return dtypes_to_convert_to_fp32 = {torch.float16, torch.bfloat16} for module in self.model.modules(): if not isinstance(module, BaseTunerLayer): continue for submodule in module.modules(): if not isinstance(submodule, (nn.ModuleDict, nn.ParameterDict, BufferDict)): continue if adapter_name not in submodule: continue if isinstance(submodule[adapter_name], nn.Parameter): if submodule[adapter_name].dtype in dtypes_to_convert_to_fp32: submodule[adapter_name].data = submodule[adapter_name].data.to(torch.float32) continue if isinstance(submodule[adapter_name], torch.Tensor): # e.g. from a BufferDict if submodule[adapter_name].dtype in dtypes_to_convert_to_fp32: submodule[adapter_name] = submodule[adapter_name].to(torch.float32) continue for param in submodule[adapter_name].parameters(): if param.dtype in dtypes_to_convert_to_fp32: param.data = param.data.to(torch.float32) def _check_merge_allowed(self): """Helper method to check whether the adapter can be merged. Raise a ValueError if it is not possible to merge the adapter with the given configuration. """ example_code = textwrap.dedent( """ ```python from transformers import AutoModelForCausalLM # Load original tied model model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it", tie_word_embeddings=False) # Set the randomly initialized lm_head to the previously tied embeddings model.lm_head.weight.data = model.model.embed_tokens.weight.data.clone() # Save the untied model untied_model_dir = "dir/for/untied/model" model.save_pretrained(untied_model_dir) model.config.save_pretrained(untied_model_dir) # Now use the original model but in untied format model = AutoModelForCausalLM.from_pretrained(untied_model_dir) ``` """ ) tied_target_modules = self._get_tied_target_modules(self.model) if tied_target_modules: warnings.warn( f"Model with `tie_word_embeddings=True` and the {tied_target_modules=} are part of the adapter. " "This can lead to complications. " "You can opt to merge the adapter after cloning the weights (to untie the embeddings). " "You can untie the embeddings by loading the model with `tie_word_embeddings=False`. For example:" + example_code ) def _check_target_module_compatiblity(self, peft_config: PeftConfig, model: nn.Module, target_name: str): """ Prevent applying LoRA to incompatible modules in specific architectures (e.g., Mamba). """ _check_lora_target_modules_mamba(peft_config, model, target_name) def _create_and_replace_parameter( self, peft_config, adapter_name, target, target_name, parent, current_key ) -> None: raise NotImplementedError(f"{self.__class__.__name__} does not support targeting nn.Parameter.") def inject_adapter( self, model: nn.Module, adapter_name: str, autocast_adapter_dtype: bool = True, low_cpu_mem_usage: bool = False, state_dict: Optional[dict[str, torch.Tensor]] = None, ) -> None: r""" Creates adapter layers and replaces the target modules with the adapter layers. This method is called under the hood by `peft.mapping.get_peft_model` if a non-prompt tuning adapter class is passed. The corresponding PEFT config is directly retrieved from the `peft_config` attribute of the BaseTuner class. Args: model (`nn.Module`): The model to be tuned. adapter_name (`str`): The adapter name. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. state_dict (`dict`, *optional*, defaults to `None`) If a state_dict is passed here, the adapters will be injected based on the entries of the state_dict. This can be useful when the exact `target_modules` of the PEFT method is unknown, for instance because the checkpoint was created without meta data. Note that the values from the state_dict are not used, only the keys are used to determine the correct layers that should be adapted. """ ################################### # PREPARATION OF MODEL AND CONFIG # ################################### peft_config = self.peft_config[adapter_name] excluded_modules = [] unmatched_modules = [] targeted_modules_from_peft_config: list[str] = [] # only relevant if state_dict is passed # Note: If possible, all checks should be performed *at the start of this method*. # This way, we can raise early if something goes wrong, without leaving the model # in a bad (half-initialized) state. self._check_new_adapter_config(peft_config) model_config = self.get_model_config(model) peft_config = self._prepare_adapter_config(peft_config, model_config) self._prepare_model(peft_config, model) if getattr(peft_config, "target_parameters", []) and state_dict: raise ValueError( "Trying to inject a PEFT adapter from a state_dict but the PEFT config uses `target_parameters`. This " "is not supported -- when using `target_parameters`, please inject the adapter without the state_dict." ) named_modules = list(model.named_modules()) key_list = [key for key, _ in named_modules] uses_dummy_target_modules = getattr(peft_config, "target_modules", None) == DUMMY_TARGET_MODULES if uses_dummy_target_modules: # dummy adapter, we allow not matching any module named_modules = [] key_list = [] # update peft_config.target_modules if required peft_config = _maybe_include_all_linear_layers(peft_config, model) # This is an optimization to reduce the number of entries in the target_modules list. The reason is that in some # circumstances, target_modules can contain hundreds of entries. Since each target module is checked against # each module of the net (which can be thousands), this can become quite expensive when many adapters are being # added. Often, the target_modules can be condensed in such a case, which speeds up the process. # A context in which this can happen is when diffusers loads non-PEFT LoRAs. As there is no meta info on # target_modules in that case, they are just inferred by listing all keys from the state_dict, which can be # quite a lot. See: https://github.com/huggingface/diffusers/issues/9297 # As there is a small chance for undiscovered bugs, we apply this optimization only if the list of # target_modules is sufficiently big. # We also exclude IA³ from this optimization. This is because IA³ has both target_modules and # feedforward_modules, which are coupled (the latter must be a subset). It would be possible to change the logic # to keep both in sync, but it's not quite trivial and probably not worth the effort. See #2429. if ( isinstance(peft_config.target_modules, (list, set)) and (len(peft_config.target_modules) >= MIN_TARGET_MODULES_FOR_OPTIMIZATION) and (peft_config.peft_type != PeftType.IA3) ): names_no_target = [ name for name in key_list if not any((name == suffix) or name.endswith("." + suffix) for suffix in peft_config.target_modules) ] new_target_modules = _find_minimal_target_modules(peft_config.target_modules, names_no_target) if len(new_target_modules) < len(peft_config.target_modules): peft_config.target_modules = new_target_modules ############################### # MATCHING & CREATING MODULES # ############################### existing_adapter_map = {} for key, module in named_modules: if isinstance(module, BaseTunerLayer): existing_adapter_map[key] = module # TODO: check if this the most robust way module_names: set[str] = set() if state_dict is not None: prefix = PEFT_TYPE_TO_PREFIX_MAPPING[peft_config.peft_type] module_names = {k.rsplit("." + prefix, 1)[0] for k in state_dict} for key, module in named_modules: if not key: continue # It is possible that we're adding an additional adapter, so if we encounter a key that clearly belongs to a # previous adapter we can skip here since we don't want to interfere with adapter internals. for adapter_key in existing_adapter_map: if key.startswith(adapter_key + "."): excluded_modules.append(key) break if excluded_modules and excluded_modules[-1] == key: continue if state_dict is None: # normal mechanism: match the modules using the peft_config result = self._check_target_module_exists(peft_config, key) if isinstance(result, _ExcludedModule): excluded_modules.append(key) elif not result: unmatched_modules.append(key) else: self.targeted_module_names.append(key) parent, target, target_name = _get_submodules(model, key) self._check_target_module_compatiblity(peft_config, model, target_name) ctx = init_empty_weights if low_cpu_mem_usage else nullcontext with ctx(): self._create_and_replace( peft_config, adapter_name, target, target_name, parent, current_key=key ) else: # use the state_dict to match modules instead if key not in module_names: unmatched_modules.append(key) else: self.targeted_module_names.append(key) parent, target, target_name = _get_submodules(model, key) self._check_target_module_compatiblity(peft_config, model, target_name) ctx = init_empty_weights if low_cpu_mem_usage else nullcontext with ctx(): self._create_and_replace( peft_config, adapter_name, target, target_name, parent, current_key=key ) # still record what would have been matched via the config so that the two results can be compared if self._check_target_module_exists(peft_config, key): targeted_modules_from_peft_config.append(key) if getattr(peft_config, "target_parameters", []): # Note: We don't need to check for no state_dict being passed, since we already checked this earlier. self._inject_parameters( peft_config=peft_config, model=model, adapter_name=adapter_name, low_cpu_mem_usage=low_cpu_mem_usage ) #################### # CHECK FOR ERRORS # #################### if state_dict is not None: # in case that the state_dict was used as source of truth and it resulted in different outcomes than what # would have been matched with the PEFT config, warn the user about that. targeted_set_from_peft_config = set(targeted_modules_from_peft_config) targeted_set_from_state_dict = set(self.targeted_module_names) diff_peft_config = targeted_set_from_peft_config - targeted_set_from_state_dict diff_state_dict = targeted_set_from_state_dict - targeted_set_from_peft_config warning_msg = "" if diff_peft_config or diff_state_dict: warning_msg = ( "While injecting the PEFT adapters, an inconsistency was discovered between the PEFT config and " "the provided state_dict. This is not necessarily an issue and can be ignored if this was the " "intent. " ) if diff_peft_config: warning_msg += ( f"The PEFT config contained these additional target modules: {sorted(diff_peft_config)}. " ) if diff_state_dict: warning_msg += f"The state_dict contained these additional target modules: {sorted(diff_state_dict)}. " if warning_msg: warnings.warn(warning_msg, RuntimeWarning) if not self.targeted_module_names and not self.targeted_parameter_names and not uses_dummy_target_modules: if excluded_modules and not unmatched_modules: # All targeted modules were excluded raise ValueError( "All modules were excluded. This is likely unintended. " "Check your `target_modules`, `exclude_modules` and `modules_to_save` configuration." ) elif not excluded_modules and unmatched_modules and not peft_config.target_modules: raise ValueError( "No `target_modules` passed but also no `target_parameters` found. Please check the values for " "these arguments." ) elif not excluded_modules and unmatched_modules: # None of the targeted modules matched error_msg = ( f"Target modules {peft_config.target_modules} not found in the base model. " f"Please check the target modules and try again." ) if getattr(peft_config, "layers_to_transform", None) is not None: error_msg += f" Note: You specified 'layers_to_transform': {peft_config.layers_to_transform}." if getattr(peft_config, "layers_pattern", None) is not None: error_msg += f" You also specified 'layers_pattern': {peft_config.layers_pattern}." raise ValueError(error_msg) else: # Some modules did not match and some matched but were excluded error_msg = ( "No modules were targeted for adaptation. " "This might be caused by a combination of mismatched target modules and excluded modules. " "Please check your `target_modules` and `exclude_modules` configuration. You may also have " "only targeted modules that are marked to be saved (`modules_to_save`)." ) if getattr(peft_config, "layers_to_transform", None) is not None: error_msg += f" Note: You specified 'layers_to_transform': {peft_config.layers_to_transform}." if getattr(peft_config, "layers_pattern", None) is not None: error_msg += f" You also specified 'layers_pattern': {peft_config.layers_pattern}." raise ValueError(error_msg) elif hasattr(peft_config, "exclude_modules") and peft_config.exclude_modules and not excluded_modules: # exclude_modules was passed but was not used warnings.warn( f"You have passed exclude_modules={peft_config.exclude_modules} but no modules were excluded. " "Please check that exclude_modules was set correctly." ) elif not uses_dummy_target_modules: # If we landed here, it means that at least one module or parameter was adapted, so let's not raise an # error. However, let's warn the user if it seems like # - they wanted to match a module but there was no match # - they wanted to match a parameter but there was no match if peft_config.target_modules and not self.targeted_module_names: warnings.warn( f"target_modules={peft_config.target_modules} were set but no module was matched.", RuntimeWarning ) elif getattr(peft_config, "target_parameters", []) and not self.targeted_parameter_names: warnings.warn( f"target_parameters={peft_config.target_parameters} were set but no parameter was matched.", RuntimeWarning, ) tied_target_modules = self._get_tied_target_modules(model=model) if tied_target_modules: warnings.warn( f"Model with `tie_word_embeddings=True` and the {tied_target_modules=} are part of the adapter. " "This can lead to complications, for example when merging the adapter " "or converting your model to formats other than safetensors. " "See for example https://github.com/huggingface/peft/issues/2018." ) ################ # HOUSEKEEPING # ################ # It's important to set the adapter here (again), because otherwise it can happen that if a 2nd adapter is # added, and it targets different layer(s) than the first adapter (which is active), then those different # layers will be activated, which we don't want. self.set_adapter(self.active_adapters) self._mark_only_adapters_as_trainable(model) if self.peft_config[adapter_name].inference_mode: for n, p in model.named_parameters(): if adapter_name in n: p.requires_grad = False set_additional_trainable_modules( model=model, peft_config=peft_config, model_config=BaseTuner.get_model_config(self), adapter_name=adapter_name, ) def _inject_parameters( self, peft_config: PeftConfig, model: nn.Module, adapter_name: str, low_cpu_mem_usage: bool ) -> None: """Inject layers based on peft_config.target_modules""" def strip_base_layer_from_name(module_name): # It is possible that the layer is already a PEFT layer and needs updating with a new adapter. In this case, # the name of parameter would be something like `model.layers.0.experts.base_layer.weight`, i.e. there is a # "base_layer" inserted in the name. We need to remove that, otherwise we won't be able to match correctly # (in this case, "experts.weight" would not match). name = ".base_layer" while name in module_name: prefix, _, suffix = module_name.rpartition(name) module_name = prefix + suffix return module_name def create_and_replace_param(module_name, key, param_name): # helper function to avoid duplication parent, target, target_name = _get_submodules(model, module_name) unwrapped_module_name = strip_base_layer_from_name(module_name) unwrapped_module = model.get_submodule(unwrapped_module_name) # use the class name for checking to avoid circular import if isinstance(unwrapped_module, BaseTunerLayer) and unwrapped_module.__class__.__name__ != "ParamWrapper": raise ValueError( f"Trying to wrap an `nn.Parameter` of layer '{unwrapped_module_name}' of type " f"{type(target).__name__}, which is not a valid target. Make sure that this layer is not " "also targeted with `target_modules`. For some models, PEFT will do this automatically, " "try setting `target_modules=[]` to prevent it." ) self._check_target_module_compatiblity(peft_config, model, target_name) ctx = init_empty_weights if low_cpu_mem_usage else nullcontext with ctx(): self._create_and_replace( peft_config, adapter_name, target, target_name, parent, current_key=key, parameter_name=param_name.rpartition(".")[-1], ) # TODO very simple matching, might not cover all use cases unsorted_target_names = set(peft_config.target_parameters) # As the order of matching can influence the nesting of multiple params on the same module, ensure determinism # by sorting. target_names = sorted(unsorted_target_names) for module_name, module in model.named_modules(): if hasattr(module, "parametrizations"): # Deal with the case that the parameter is already parametrized. The issue is that we would not be able # to match `f"{module_name}.{param_name}"`, as the parameter is now something like # `module.parametrization.weight`. for key in target_names: target_module_name, _, param_name = key.rpartition(".") if target_module_name != module_name: continue if getattr(module, param_name, None) is None: continue create_and_replace_param(module_name, key, param_name) self.targeted_parameter_names.append(key) else: # Standard case: the parameter is not already parametrized. Note, however, that the model could already # be nested with lora.ParamWrapper, as this is how we allow targeting multiple Parameters on the same # module. unwrapped_module_name = strip_base_layer_from_name(module_name) # we're interested in finding the "lowest" module that contains the parameter, hence recurse=False for param_name, param in module.named_parameters(recurse=False): key = f"{unwrapped_module_name}.{param_name}" if (key in target_names) or any(key.endswith(f".{target_key}") for target_key in target_names): # Note: We use the unwrapped_module_name to check if the key matches, but we use the module_name for # replacement, since we want to replace the wrapped module. create_and_replace_param(module_name, key, param_name) self.targeted_parameter_names.append(key) def merge_adapter(self, adapter_names: Optional[list[str]] = None, safe_merge: bool = False) -> None: """ This method merges the adapter layers into the base model. Merging adapters can lead to a speed up of the forward pass. A copy of the adapter weights is still kept in memory, which is required to unmerge the adapters. In order to merge the adapter weights without keeping them in memory, please call `merge_and_unload`. Args: adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If `None`, all active adapters will be merged. Defaults to `None`. safe_merge (`bool`, *optional*): If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. """ # Note: The order of arguments here is: # adapter_names, safe_merge # For layer.merge, the order is: # safe_merge, adapter_names # This is not so nice but this method here started with only adapter_names, thus putting safe_merge first would # be a backwards incompatible change. self._check_merge_allowed() for module in self.model.modules(): if isinstance(module, BaseTunerLayer): with onload_layer(module): module.merge(adapter_names=adapter_names, safe_merge=safe_merge) def unmerge_adapter(self): """ This method unmerges all merged adapter layers from the base model. """ for module in self.model.modules(): if isinstance(module, BaseTunerLayer): with onload_layer(module): module.unmerge() def _delete_auxiliary_adapter(self, adapter_name: str, new_active_adapters: Optional[list[str]]) -> None: for module in self.modules(): if isinstance(module, AuxiliaryTrainingWrapper): module.delete_adapter(adapter_name, new_active_adapters=new_active_adapters) def _unloading_checks(self, adapter_names: Optional[list[str]]): adapters_to_consider = adapter_names or self.active_adapters is_modules_to_save_available = any( self.peft_config[adapter].modules_to_save for adapter in adapters_to_consider ) if is_modules_to_save_available and len(adapters_to_consider) > 1: raise ValueError("Cannot unload multiple adapters that specify `modules_to_save`.") @staticmethod def get_model_config(model: nn.Module) -> dict: """ This method gets the config from a model in dictionary form. If model has not attribute config, then this method returns a default config. Args: model (`nn.Module`): Model to get the config from. default (`dict|None`, *optional*):: What to return if model does not have a config attribute. """ model_config = getattr(model, "config", DUMMY_MODEL_CONFIG) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() return model_config def _get_tied_target_modules(self, model: nn.Module) -> list[str]: tied_target_modules = [] model_config = self.get_model_config(model) if model_config.get("tie_word_embeddings"): for target_module in self.targeted_module_names: # This potentially yields false positives since we're just looking at the layer names. So if we use a # model that uses weight-tying of lm_head and embed_tokens, a third, unrelated, layer which is # unfortunately named so that it is in EMBEDDING_LAYER_NAMES will be falsely reported here as well. if target_module.split(".")[-1] in EMBEDDING_LAYER_NAMES: tied_target_modules.append(target_module) return tied_target_modules class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_pluggable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str, ...] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str, ...] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer @property def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight @property def bias(self) -> torch.Tensor: base_layer = self.get_base_layer() return base_layer.bias def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise NotImplementedError def unmerge(self) -> None: raise NotImplementedError @property def merged(self) -> bool: return bool(self.merged_adapters) @property def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters @property def active_adapter(self) -> str | list[str]: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def _get_available_adapters(self) -> set[str]: """Return all adapter names that can be found on this module.""" adapters = set() for layer_name in self.adapter_layer_names: module = getattr(self, layer_name) if not isinstance(module, (nn.ModuleDict, nn.ParameterDict)): continue adapters.update(set(module.keys())) return adapters @property def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool) -> None: """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) def _move_adapter_to_device_of_base_layer(self, adapter_name: str, device: Optional[torch.device] = None) -> None: """ Move the adapter of the given name to the device of the base layer. """ if device is None: base_layer = self.get_base_layer() if isinstance(base_layer, nn.MultiheadAttention): base_layer = base_layer.out_proj # check weight and qweight (for GPTQ) for weight_name in ("weight", "qweight"): weight = getattr(base_layer, weight_name, None) if weight is not None: device = weight.device dtype = weight.dtype break else: # no break encountered: could not determine the device return meta = torch.device("meta") # loop through all potential adapter layers and move them to the device of the base layer; be careful to only # move this specific adapter to the device, as the other adapters could be on different devices # see #1639 for adapter_layer_name in self.adapter_layer_names + self.other_param_names: adapter_layer = getattr(self, adapter_layer_name, None) if not isinstance(adapter_layer, (nn.ModuleDict, nn.ParameterDict, BufferDict)): continue if adapter_name not in adapter_layer: continue if any(p.device == meta for p in adapter_layer.parameters()): continue # TODO: weight is not necessarily defined here, leading to a NameError, fix that if weight.dtype.is_floating_point or weight.dtype.is_complex: adapter_layer[adapter_name] = adapter_layer[adapter_name].to(device, dtype=dtype) else: adapter_layer[adapter_name] = adapter_layer[adapter_name].to(device) @overload def _cast_input_dtype(self, x: None, dtype: torch.dtype) -> None: ... @overload def _cast_input_dtype(self, x: torch.Tensor, dtype: torch.dtype) -> torch.Tensor: ... def _cast_input_dtype(self, x, dtype: torch.dtype): """ Whether to cast the dtype of the input of the forward method. Usually, we want to enable this to align the input dtype with the dtype of the weight, but by setting layer.cast_input_dtype=False, this can be disabled if necessary. Enabling or disabling can be managed via the peft.helpers.disable_lora_input_dtype_casting context manager. """ if x is None: # useful e.g. if x is the bias, which can be None return None cast_input_dtype_enabled = getattr(self, "cast_input_dtype_enabled", True) if (not cast_input_dtype_enabled) or (x.dtype == dtype): return x return x.to(dtype=dtype) def _find_minimal_target_modules( target_modules: list[str] | set[str], other_module_names: list[str] | set[str] ) -> set[str]: """Find the minimal set of target modules that is sufficient to separate them from the other modules. Sometimes, a very large list of target_modules could be passed, which can slow down loading of adapters (e.g. when loaded from diffusers). It may be possible to condense this list from hundreds of items to just a handful of suffixes that are sufficient to distinguish the target modules from the other modules. Example: ```py >>> from peft.tuners.tuners_utils import _find_minimal_target_modules >>> target_modules = [f"model.decoder.layers.{i}.self_attn.q_proj" for i in range(100)] >>> target_modules += [f"model.decoder.layers.{i}.self_attn.v_proj" for i in range(100)] >>> other_module_names = [f"model.encoder.layers.{i}.self_attn.k_proj" for i in range(100)] >>> _find_minimal_target_modules(target_modules, other_module_names) {"q_proj", "v_proj"} ``` Args: target_modules (`list[str]` | `set[str]`): The list of target modules. other_module_names (`list[str]` | `set[str]`): The list of other module names. They must not overlap with the target modules. Returns: `set[str]`: The minimal set of target modules that is sufficient to separate them from the other modules. Raises: ValueError: If `target_modules` is not a list or set of strings or if it contains an empty string. Also raises an error if `target_modules` and `other_module_names` contain common elements. """ if isinstance(target_modules, str) or not target_modules: raise ValueError("target_modules should be a list or set of strings.") target_modules = set(target_modules) if "" in target_modules: raise ValueError("target_modules should not contain an empty string.") other_module_names = set(other_module_names) if not target_modules.isdisjoint(other_module_names): msg = ( "target_modules and other_module_names contain common elements, this should not happen, please " "open a GitHub issue at https://github.com/huggingface/peft/issues with the code to reproduce this issue" ) raise ValueError(msg) # it is assumed that module name parts are separated by a "." def generate_suffixes(s): parts = s.split(".") return [".".join(parts[i:]) for i in range(len(parts))][::-1] # Create a reverse lookup for other_module_names to quickly check suffix matches other_module_suffixes = {suffix for item in other_module_names for suffix in generate_suffixes(item)} # Find all potential suffixes from target_modules target_modules_suffix_map = {item: generate_suffixes(item) for item in target_modules} # Initialize a set for required suffixes required_suffixes = set() # We sort the target_modules_suffix_map simply to get deterministic behavior, since sets have no order. In theory # the order should not matter but in case there is a bug, it's better for the bug to be deterministic. for item, suffixes in sorted(target_modules_suffix_map.items(), key=lambda tup: tup[1]): # Go through target_modules items, shortest suffixes first for suffix in suffixes: # If the suffix is already in required_suffixes or matches other_module_names, skip it if suffix in required_suffixes or suffix in other_module_suffixes: continue # Check if adding this suffix covers the item if not any(item.endswith("." + req_suffix) for req_suffix in required_suffixes): required_suffixes.add(suffix) break if not required_suffixes: return set(target_modules) return required_suffixes class _ExcludedModule: """ A private helper method used to represent excluded modules in the check_target_module_exists function. """ def __bool__(self): return False def check_target_module_exists(config, key: str) -> bool | re.Match[str] | None: """A helper method to check if the passed module's key name matches any of the target modules in the adapter_config. Args: config (`LoraConfig` | `LycorisConfig`): A config to match target modules from key (`str`): A key to search any matches in config Returns: `bool` | `re.Match[str]` | `None`: True of match object if key matches any target modules from config, False or None if no match found """ if hasattr(config, "exclude_modules") and config.exclude_modules: if isinstance(config.exclude_modules, str): if re.fullmatch(config.exclude_modules, key): return _ExcludedModule() elif key in config.exclude_modules: return _ExcludedModule() elif any(key.endswith(f".{exclude_key}") for exclude_key in config.exclude_modules): return _ExcludedModule() # Adapters should never match on modules to save modules as it is a guarantee for conflicts of behavior # between `ModulesToSaveWrapper` internals and the potential adapter. modules_to_save = getattr(config, "modules_to_save", None) if modules_to_save: if any(re.match(rf"(^|.*\.){m}($|\..*)", key) for m in modules_to_save): return _ExcludedModule() if (config.target_modules is None) and (config.target_parameters is not None): # this is allowed if config.target_parameters are specified return False if isinstance(config.target_modules, str): target_module_found = match_target_against_key(config.target_modules, key) elif key in config.target_modules: # this module is specified directly in target_modules target_module_found = True else: target_module_found = any(key.endswith(f".{target_key}") for target_key in config.target_modules) layer_indexes = getattr(config, "layers_to_transform", None) layers_pattern = getattr(config, "layers_pattern", None) is_using_layer_indexes = layer_indexes is not None and ( len(layer_indexes) != 0 if isinstance(layer_indexes, list) else True ) if is_using_layer_indexes and target_module_found: layer_index = None # TODO: It's still unclear how empty layers_pattern (None, [], or "") should behave # For now, empty layers_pattern means any layer pattern is ok if layers_pattern is None or len(layers_pattern) == 0: layer_index = re.match(r".*\.[^.]*\.(\d+)\.", key) else: layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern for pattern in layers_pattern: layer_index = re.match(rf".*\.{pattern}\.(\d+)\.", key) if layer_index is not None: break if layer_index is None: target_module_found = False else: layer_index = int(layer_index.group(1)) if isinstance(layer_indexes, int): target_module_found = layer_index == layer_indexes else: target_module_found = layer_index in layer_indexes return target_module_found def inspect_matched_modules(tuner: BaseTuner, adapter_name: str = "default") -> dict: """ A helper function to inspect the set of matched and unmatched modules for a PEFT model and the given adapter. """ config = tuner.peft_config[adapter_name] key_list = [key for key, _ in tuner.model.named_modules()] module_dict = {"matched": [], "unmatched": []} for key in key_list: if tuner._check_target_module_exists(config, key): module_dict["matched"].append(key) else: module_dict["unmatched"].append(key) return module_dict def _maybe_include_all_linear_layers(peft_config: PeftConfig, model: nn.Module) -> PeftConfig: """ Helper function to update `target_modules` to all linear/Conv1D layers if provided as 'all-linear'. Adapted from the QLoRA repository: https://github.com/artidoro/qlora/blob/main/qlora.py """ if not hasattr(peft_config, "target_modules"): return peft_config # if `target_modules` is a string, convert to lower case and check if it matches "all-linear" if not ( isinstance(peft_config.target_modules, str) and peft_config.target_modules.lower() == INCLUDE_LINEAR_LAYERS_SHORTHAND ): return peft_config linear_classes = (torch.nn.Linear, Conv1D) linear_names = ("Linear",) linear_module_names = set() for name, module in model.named_modules(): # match with all linear classes. if isinstance(module, linear_classes): linear_module_names.add(name) elif isinstance(module, BaseTunerLayer) and any(n in type(module).__name__ for n in linear_names): # If the model already has adapter layers applied, then the "linear" layer is actually an adapter layer, # e.g. lora.Linear, and not nn.Linear. To target this layer, we don't want to check the layer type, as there # are many possible layer types (one for each PEFT method) and the list would quickly get out of date. Thus # we rely on the name of the layer class, which by convention is something like "Linear", "Linear4bit", # "HqqLoraLinear", ... in PEFT. It's not pretty but should generally work. # See 2390 linear_module_names.add(name) # Try to remove linear layers that should not be targeted as best as possible. We have to rely on convention as # there are no hard rules to detect these modules. module_names_to_exclude = set() if isinstance(model, PreTrainedModel): output_emb = model.get_output_embeddings() if output_emb is not None: # ignore the last classification head for text generation models last_module_name = [name for name, module in model.named_modules() if module is output_emb][0] module_names_to_exclude.add(last_module_name) elif peft_config.task_type == TaskType.SEQ_CLS: # ignore classifier head for classification models (issue 2027) # there is no fix name for the classifier head, so check the common ones for name in SEQ_CLS_HEAD_NAMES: cls_head = getattr(model, name, None) if cls_head is not None: last_module_name = [name for name, module in model.named_modules() if module is cls_head][0] module_names_to_exclude.add(last_module_name) break # we don't want nested LoRA layers, i.e. LoRA being applied to possibly existing lora_A, lora_B, etc. # see 2390 for prefix, module in model.named_modules(): if isinstance(module, BaseTunerLayer): for suffix, child in module.named_modules(): if suffix: module_names_to_exclude.add(f"{prefix}.{suffix}") linear_module_names -= module_names_to_exclude peft_config.target_modules = linear_module_names return peft_config def check_adapters_to_merge(module: BaseTunerLayer, adapter_names: Optional[list[str]] = None) -> list[str]: """ Helper function to check which adapters should be merged. Only return those adapters that are not already merged. Give a warning if some or all of the adapters are already merged. """ if adapter_names is None: adapter_names = module.active_adapters if isinstance(adapter_names, str): raise ValueError(f"adapter_names should be a list of strings, got {adapter_names!r}.") if module.merged: merged_adapters = set(module.merged_adapters) adapter_names = [name for name in adapter_names if name not in merged_adapters] if adapter_names: warnings.warn( f"Already following adapters were merged {','.join(module.merged_adapters)}. " f"You are now additionally merging {','.join(adapter_names)}." ) else: warnings.warn("All adapters are already merged, nothing to do.") return adapter_names def clone_module(module: nn.Module, share_weights=False): """Clone a module in a pytorch model. Clones a module of a model, optionally sharing all the parameters between the original and the clone. Simplifies reusing a module when manipulating the architecture of a model. """ clone = copy.deepcopy(module) def _share_weights(src: nn.Module, dst: nn.Module): for name, param in src.named_parameters(recurse=False): dst.register_parameter(name, param) if share_weights: for name, submodule in module.named_modules(): _share_weights(submodule, clone.get_submodule(name)) return clone def replicate_layers(model: nn.Module, layer_map: list[tuple[int, int]]): """Replicate layers in a transfomer model with weight sharing. This function looks for a module list attribute at model[(.model)*].layers and replicates the layers in the module list according to the layer map. For example the map `[[0, 4], [2, 5]]` will take the set of layers `[0, 1, 2, 3, 4]` and replace them with a module list containing `[0, 1, 2, 3, 2, 3, 4]`. """ while hasattr(model, "model"): model = model.model # Some variants of the bert model nest the main model under the bert attribute. if hasattr(model, "bert"): model = model.bert model_type = None layers: nn.ModuleList = None if hasattr(model, "layers"): model_type = "llama" layers = model.layers elif hasattr(model, "encoder") and hasattr(model.encoder, "layer"): model_type = "bert" layers = model.encoder.layer elif hasattr(model, "h"): model_type = "falcon" layers = model.h if not model_type or not isinstance(layers, nn.ModuleList): raise ValueError( "Could not locate the layers attribute in the model. " "Expected Llama, Bert or Falcon compatible architectures." ) new_layers = [] for start, end in layer_map: for i in range(start, end): current_idx = len(new_layers) new_layers.append(clone_module(layers[i], share_weights=True)) # This is a hack needed to work around the layer_idx introduced in HF transformers. for submodule in new_layers[-1].modules(): if hasattr(submodule, "layer_idx"): submodule.layer_idx = current_idx layers = nn.ModuleList(new_layers) if model_type == "llama": model.layers = layers elif model_type == "bert": model.encoder.layer = layers elif model_type == "falcon": model.h = layers else: raise ValueError("Unexpected model type, need to handle post-processing of layers.") if hasattr(model.config, "num_hidden_layers"): # Common to Llama, Bert, Falcon. model.config.num_hidden_layers = len(new_layers)
peft/src/peft/tuners/tuners_utils.py/0
{ "file_path": "peft/src/peft/tuners/tuners_utils.py", "repo_id": "peft", "token_count": 28362 }
250
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import packaging.version import torch import transformers from transformers import BloomPreTrainedModel # needed for prefix-tuning of bloom model def bloom_model_postprocess_past_key_value(past_key_values): past_key_values = torch.cat(past_key_values) total_layers, batch_size, num_attention_heads, num_virtual_tokens, head_dim = past_key_values.shape keys = past_key_values[: total_layers // 2] keys = keys.transpose(2, 3).reshape( total_layers // 2, batch_size * num_attention_heads, head_dim, num_virtual_tokens ) values = past_key_values[total_layers // 2 :] values = values.reshape(total_layers // 2, batch_size * num_attention_heads, num_virtual_tokens, head_dim) return tuple(zip(keys, values)) # needed for prefix-tuning of StarCoder models def starcoder_model_postprocess_past_key_value(past_key_values): result = [] for k in past_key_values: k = k[:, :, 0] k = k.permute([1, 2, 0, 3]) k = k.reshape(*k.shape[:-2], -1) result.append(k) return tuple(result) # TODO: remove this once transformers 4.53 is no longer supported TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = {} transformers_le_4_53 = packaging.version.parse(transformers.__version__) < packaging.version.parse("4.54.0.dev0") if transformers_le_4_53: TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING["gpt_bigcode"] = ( starcoder_model_postprocess_past_key_value ) if hasattr(BloomPreTrainedModel, "_convert_to_standard_cache"): # special handling for bloom architecture was fixed in: # https://github.com/huggingface/transformers/pull/31445 # the _convert_to_standard_cache method is removed in the PR and thus serves as an indicator TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING["bloom"] = bloom_model_postprocess_past_key_value TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING = { "llama": ["input_layernorm", "post_attention_layernorm", "norm"], "bloom": ["input_layernorm", "post_attention_layernorm", "ln_f"], "llava": [ "multi_modal_projector", "input_layernorm", "post_attention_layernorm", "norm", "embed_tokens", "lm_head", ], "t5": ["layer_norm", "final_layer_norm"], "mt5": ["layer_norm", "final_layer_norm"], "bart": ["self_attn_layer_norm", "encoder_attn_layer_norm", "final_layer_norm"], "gpt2": ["ln_1", "ln_2", "ln_f"], "blip-2": ["layernorm", "LayerNorm", "final_layer_norm", "self_attn_layer_norm"], "gptj": ["ln_1", "ln_f"], "falcon": ["input_layernorm", "post_attention_layernorm", "ln_f"], "mistral": ["input_layernorm", "post_attention_layernorm", "norm"], "phi": ["input_layernorm", "final_layernorm"], "gemma": ["input_layernorm", "post_attention_layernorm", "norm"], "gemma2": [ "input_layernorm", "post_attention_layernorm", "pre_feedforward_layernorm", "post_feedforward_layernorm", "norm", ], "gemma3_text": [ "input_layernorm", "post_attention_layernorm", "pre_feedforward_layernorm", "post_feedforward_layernorm", "norm", ], "qwen2": ["post_attention_layernorm"], "qwen3": ["post_attention_layernorm"], } TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING = { "t5": ["q", "v"], "mt5": ["q", "v"], "bart": ["q_proj", "v_proj"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "blip-2": ["q", "v", "q_proj", "v_proj"], "opt": ["q_proj", "v_proj"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "value"], "xlm-roberta": ["query", "value"], "electra": ["query", "value"], "deberta-v2": ["query_proj", "value_proj"], "deberta": ["in_proj"], "layoutlm": ["query", "value"], "llama": ["q_proj", "v_proj"], "llama4": ["q_proj", "v_proj"], "chatglm": ["query_key_value"], "gpt_bigcode": ["c_attn"], "mpt": ["Wqkv"], "RefinedWebModel": ["query_key_value"], "RefinedWeb": ["query_key_value"], "falcon": ["query_key_value"], "btlm": ["c_proj", "c_attn"], "codegen": ["qkv_proj"], "mistral": ["q_proj", "v_proj"], "mixtral": ["q_proj", "v_proj"], "stablelm": ["q_proj", "v_proj"], "phi": ["q_proj", "v_proj", "fc1", "fc2"], "gemma": ["q_proj", "v_proj"], "gemma2": ["q_proj", "v_proj"], "gemma3_text": ["q_proj", "v_proj"], "qwen2": ["q_proj", "v_proj"], "qwen3": ["q_proj", "v_proj"], } TRANSFORMERS_MODELS_TO_LOKR_TARGET_MODULES_MAPPING = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING.copy() TRANSFORMERS_MODELS_TO_LOHA_TARGET_MODULES_MAPPING = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING.copy() TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING = { "t5": ["k", "v", "wo"], "mt5": ["k", "v", "wi_1"], "gpt2": ["c_attn", "mlp.c_proj"], "bloom": ["query_key_value", "mlp.dense_4h_to_h"], "roberta": ["key", "value", "output.dense"], "opt": ["q_proj", "k_proj", "fc2"], "gptj": ["q_proj", "v_proj", "fc_out"], "gpt_neox": ["query_key_value", "dense_4h_to_h"], "gpt_neo": ["q_proj", "v_proj", "c_proj"], "bart": ["q_proj", "v_proj", "fc2"], "gpt_bigcode": ["c_attn", "mlp.c_proj"], "llama": ["k_proj", "v_proj", "down_proj"], "llama4": ["q_proj", "v_proj", "down_proj"], "mistral": ["k_proj", "v_proj", "down_proj"], "mixtral": ["k_proj", "v_proj", "w2"], "bert": ["key", "value", "output.dense"], "deberta-v2": ["key_proj", "value_proj", "output.dense"], "deberta": ["in_proj", "output.dense"], "RefinedWebModel": ["query_key_value", "dense_4h_to_h"], "RefinedWeb": ["query_key_value", "dense_4h_to_h"], "falcon": ["query_key_value", "dense_4h_to_h"], "phi": ["q_proj", "v_proj", "fc2"], "gemma": ["q_proj", "v_proj", "down_proj"], "gemma2": ["q_proj", "v_proj", "down_proj"], "gemma3_text": ["q_proj", "v_proj", "down_proj"], "qwen2": ["q_proj", "v_proj", "down_proj"], "qwen3": ["q_proj", "v_proj", "down_proj"], } TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING = { "t5": ["wo"], "mt5": [], "gpt2": ["mlp.c_proj"], "bloom": ["mlp.dense_4h_to_h"], "roberta": ["output.dense"], "opt": ["fc2"], "gptj": ["fc_out"], "gpt_neox": ["dense_4h_to_h"], "gpt_neo": ["c_proj"], "bart": ["fc2"], "gpt_bigcode": ["mlp.c_proj"], "llama": ["down_proj"], "llama4": ["down_proj"], "mistral": ["down_proj"], "mixtral": ["w2"], "bert": ["output.dense"], "deberta-v2": ["output.dense"], "deberta": ["output.dense"], "RefinedWeb": ["dense_4h_to_h"], "RefinedWebModel": ["dense_4h_to_h"], "falcon": ["dense_4h_to_h"], "phi": ["fc2"], "gemma": ["down_proj"], "gemma2": ["down_proj"], "gemma3_text": ["down_proj"], "qwen2": ["down_proj"], "qwen3": ["down_proj"], } TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING = { "t5": ["q", "k", "v", "o", "wi", "wo"], "mt5": ["q", "k", "v", "o", "wi_0", "wi_1", "wo"], "bart": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "opt": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "llama": ["q_proj", "v_proj"], "llama4": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "key", "value", "dense"], # "xlm-roberta": ["query", "value"], # "electra": ["query", "value"], "deberta-v2": ["query_proj", "key_proj", "value_proj", "dense"], "gpt_bigcode": ["c_attn"], "deberta": ["in_proj"], # "layoutlm": ["query", "value"], "gemma": ["q_proj", "v_proj"], "gemma2": ["q_proj", "v_proj"], "gemma3_text": ["q_proj", "v_proj"], "qwen2": ["q_proj", "v_proj"], "qwen3": ["q_proj", "v_proj"], } TRANSFORMERS_MODELS_TO_VERA_TARGET_MODULES_MAPPING = { "t5": ["q", "v"], "mt5": ["q", "v"], "bart": ["q_proj", "v_proj"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "blip-2": ["q", "v", "q_proj", "v_proj"], "opt": ["q_proj", "v_proj"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "value"], "xlm-roberta": ["query", "value"], "electra": ["query", "value"], "deberta-v2": ["query_proj", "value_proj"], "deberta": ["in_proj"], "layoutlm": ["query", "value"], "llama": ["q_proj", "v_proj"], "llama4": ["q_proj", "v_proj"], "chatglm": ["query_key_value"], "gpt_bigcode": ["c_attn"], "mpt": ["Wqkv"], "RefinedWebModel": ["query_key_value"], "RefinedWeb": ["query_key_value"], "falcon": ["query_key_value"], "btlm": ["c_proj", "c_attn"], "codegen": ["qkv_proj"], "mistral": ["q_proj", "v_proj"], "mixtral": ["q_proj", "v_proj"], "stablelm": ["q_proj", "v_proj"], "phi": ["q_proj", "v_proj"], "gemma": ["q_proj", "v_proj"], "gemma2": ["q_proj", "v_proj"], "gemma3_text": ["q_proj", "v_proj"], "qwen2": ["q_proj", "v_proj"], "qwen3": ["q_proj", "v_proj"], } TRANSFORMERS_MODELS_TO_SHIRA_TARGET_MODULES_MAPPING = { "t5": ["q", "v"], "mt5": ["q", "v"], "bart": ["q_proj", "v_proj"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "blip-2": ["q", "v", "q_proj", "v_proj"], "opt": ["q_proj", "v_proj"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "value"], "xlm-roberta": ["query", "value"], "electra": ["query", "value"], "deberta-v2": ["query_proj", "value_proj"], "deberta": ["in_proj"], "layoutlm": ["query", "value"], "llama": ["q_proj", "v_proj"], "chatglm": ["query_key_value"], "gpt_bigcode": ["c_attn"], "mpt": ["Wqkv"], "RefinedWebModel": ["query_key_value"], "RefinedWeb": ["query_key_value"], "falcon": ["query_key_value"], "btlm": ["c_proj", "c_attn"], "codegen": ["qkv_proj"], "mistral": ["q_proj", "v_proj"], "mixtral": ["q_proj", "v_proj"], "stablelm": ["q_proj", "v_proj"], "phi": ["q_proj", "v_proj"], "gemma": ["q_proj", "v_proj"], "gemma2": ["q_proj", "v_proj"], "gemma3_text": ["q_proj", "v_proj"], "qwen2": ["q_proj", "v_proj"], } TRANSFORMERS_MODELS_TO_FOURIERFT_TARGET_MODULES_MAPPING = { "t5": ["q", "v"], "mt5": ["q", "v"], "bart": ["q_proj", "v_proj"], "gpt2": ["mlp.c_proj"], "bloom": ["query_key_value"], "blip-2": ["q", "v", "q_proj", "v_proj"], "opt": ["q_proj", "v_proj"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "value"], "xlm-roberta": ["query", "value"], "electra": ["query", "value"], "deberta-v2": ["query_proj", "value_proj"], "deberta": ["in_proj"], "layoutlm": ["query", "value"], "llama": ["q_proj", "v_proj"], "llama4": ["q_proj", "v_proj"], "chatglm": ["query_key_value"], "gpt_bigcode": ["mlp.c_proj"], "mpt": ["Wqkv"], "RefinedWebModel": ["query_key_value"], "RefinedWeb": ["query_key_value"], "falcon": ["query_key_value"], "codegen": ["qkv_proj"], "mistral": ["q_proj", "v_proj"], "mixtral": ["q_proj", "v_proj"], "stablelm": ["q_proj", "v_proj"], "phi": ["q_proj", "v_proj", "fc1", "fc2"], "gemma": ["q_proj", "v_proj"], "gemma2": ["q_proj", "v_proj"], "gemma3_text": ["q_proj", "v_proj"], "qwen2": ["q_proj", "v_proj"], "qwen3": ["q_proj", "v_proj"], } TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING = { "t5": ["q", "k", "v", "o", "wi", "wo"], "mt5": ["q", "k", "v", "o", "wi_0", "wi_1", "wo"], "bart": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "opt": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "llama": ["q_proj", "v_proj"], "llama4": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "value"], "deberta-v2": ["query_proj", "key_proj", "value_proj", "dense"], "gpt_bigcode": ["c_attn"], "deberta": ["in_proj"], "gemma": ["q_proj", "v_proj"], "gemma2": ["q_proj", "v_proj"], "gemma3_text": ["q_proj", "v_proj"], "qwen2": ["q_proj", "v_proj"], "qwen3": ["q_proj", "v_proj"], } TRANSFORMERS_MODELS_TO_C3A_TARGET_MODULES_MAPPING = { "t5": ["q", "v"], "mt5": ["q", "v"], "bart": ["q_proj", "v_proj"], "gpt2": ["mlp.c_proj"], "bloom": ["query_key_value"], "blip-2": ["q", "v", "q_proj", "v_proj"], "opt": ["q_proj", "v_proj"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "value"], "xlm-roberta": ["query", "value"], "electra": ["query", "value"], "deberta-v2": ["query_proj", "value_proj"], "deberta": ["in_proj"], "layoutlm": ["query", "value"], "llama": ["q_proj", "v_proj"], "llama4": ["q_proj", "v_proj"], "chatglm": ["query_key_value"], "gpt_bigcode": ["mlp.c_proj"], "mpt": ["Wqkv"], "RefinedWebModel": ["query_key_value"], "RefinedWeb": ["query_key_value"], "falcon": ["query_key_value"], "codegen": ["qkv_proj"], "mistral": ["q_proj", "v_proj"], "mixtral": ["q_proj", "v_proj"], "stablelm": ["q_proj", "v_proj"], "phi": ["q_proj", "v_proj", "fc1", "fc2"], "gemma": ["q_proj", "v_proj"], "gemma2": ["q_proj", "v_proj"], "gemma3_text": ["q_proj", "v_proj"], "qwen2": ["q_proj", "v_proj"], "qwen3": ["q_proj", "v_proj"], } TRANSFORMERS_MODELS_TO_RANDLORA_TARGET_MODULES_MAPPING = ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING # Leaving this for now but RandLoRA is flexible ) WEIGHTS_NAME = "adapter_model.bin" SAFETENSORS_WEIGHTS_NAME = "adapter_model.safetensors" CONFIG_NAME = "adapter_config.json" EMBEDDING_LAYER_NAMES = ["embed_tokens", "lm_head"] SEQ_CLS_HEAD_NAMES = ["score", "classifier"] INCLUDE_LINEAR_LAYERS_SHORTHAND = "all-linear" TOKENIZER_CONFIG_NAME = "tokenizer_config.json" DUMMY_TARGET_MODULES = "dummy-target-modules" DUMMY_MODEL_CONFIG = {"model_type": "custom"} # If users specify more than this number of target modules, we apply an optimization to try to reduce the target modules # to a minimal set of suffixes, which makes loading faster. We only apply this when exceeding a certain size since # otherwise there is no point in optimizing and there is a small chance of bugs in the optimization algorithm, so no # point in taking unnecessary risks. See #2045 for more context. MIN_TARGET_MODULES_FOR_OPTIMIZATION = 20
peft/src/peft/utils/constants.py/0
{ "file_path": "peft/src/peft/utils/constants.py", "repo_id": "peft", "token_count": 7641 }
251
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import torch from transformers import AutoModelForCausalLM, AutoTokenizer from peft import ( AutoPeftModel, AutoPeftModelForCausalLM, AutoPeftModelForFeatureExtraction, AutoPeftModelForQuestionAnswering, AutoPeftModelForSeq2SeqLM, AutoPeftModelForSequenceClassification, AutoPeftModelForTokenClassification, LoraConfig, PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, get_peft_model, ) from peft.utils import infer_device class TestPeftAutoModel: dtype = torch.float16 if infer_device() == "mps" else torch.bfloat16 def test_peft_causal_lm(self): model_id = "peft-internal-testing/tiny-OPTForCausalLM-lora" model = AutoPeftModelForCausalLM.from_pretrained(model_id) assert isinstance(model, PeftModelForCausalLM) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForCausalLM.from_pretrained(tmp_dirname) assert isinstance(model, PeftModelForCausalLM) # check if kwargs are passed correctly model = AutoPeftModelForCausalLM.from_pretrained(model_id, torch_dtype=self.dtype) assert isinstance(model, PeftModelForCausalLM) assert model.base_model.lm_head.weight.dtype == self.dtype adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForCausalLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=self.dtype) def test_peft_causal_lm_extended_vocab(self): model_id = "peft-internal-testing/tiny-random-OPTForCausalLM-extended-vocab" model = AutoPeftModelForCausalLM.from_pretrained(model_id) assert isinstance(model, PeftModelForCausalLM) # check if kwargs are passed correctly model = AutoPeftModelForCausalLM.from_pretrained(model_id, torch_dtype=self.dtype) assert isinstance(model, PeftModelForCausalLM) assert model.base_model.lm_head.weight.dtype == self.dtype adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForCausalLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=self.dtype) def test_peft_seq2seq_lm(self): model_id = "peft-internal-testing/tiny_T5ForSeq2SeqLM-lora" model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id) assert isinstance(model, PeftModelForSeq2SeqLM) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForSeq2SeqLM.from_pretrained(tmp_dirname) assert isinstance(model, PeftModelForSeq2SeqLM) # check if kwargs are passed correctly model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id, torch_dtype=self.dtype) assert isinstance(model, PeftModelForSeq2SeqLM) assert model.base_model.lm_head.weight.dtype == self.dtype adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=self.dtype) def test_peft_sequence_cls(self): model_id = "peft-internal-testing/tiny_OPTForSequenceClassification-lora" model = AutoPeftModelForSequenceClassification.from_pretrained(model_id) assert isinstance(model, PeftModelForSequenceClassification) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForSequenceClassification.from_pretrained(tmp_dirname) assert isinstance(model, PeftModelForSequenceClassification) # check if kwargs are passed correctly model = AutoPeftModelForSequenceClassification.from_pretrained(model_id, torch_dtype=self.dtype) assert isinstance(model, PeftModelForSequenceClassification) assert model.score.original_module.weight.dtype == self.dtype adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForSequenceClassification.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=self.dtype ) def test_peft_token_classification(self): model_id = "peft-internal-testing/tiny_GPT2ForTokenClassification-lora" model = AutoPeftModelForTokenClassification.from_pretrained(model_id) assert isinstance(model, PeftModelForTokenClassification) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForTokenClassification.from_pretrained(tmp_dirname) assert isinstance(model, PeftModelForTokenClassification) # check if kwargs are passed correctly model = AutoPeftModelForTokenClassification.from_pretrained(model_id, torch_dtype=self.dtype) assert isinstance(model, PeftModelForTokenClassification) assert model.base_model.classifier.original_module.weight.dtype == self.dtype adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForTokenClassification.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=self.dtype ) def test_peft_question_answering(self): model_id = "peft-internal-testing/tiny_OPTForQuestionAnswering-lora" model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id) assert isinstance(model, PeftModelForQuestionAnswering) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForQuestionAnswering.from_pretrained(tmp_dirname) assert isinstance(model, PeftModelForQuestionAnswering) # check if kwargs are passed correctly model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id, torch_dtype=self.dtype) assert isinstance(model, PeftModelForQuestionAnswering) assert model.base_model.qa_outputs.original_module.weight.dtype == self.dtype adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForQuestionAnswering.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=self.dtype ) def test_peft_feature_extraction(self): model_id = "peft-internal-testing/tiny_OPTForFeatureExtraction-lora" model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id) assert isinstance(model, PeftModelForFeatureExtraction) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForFeatureExtraction.from_pretrained(tmp_dirname) assert isinstance(model, PeftModelForFeatureExtraction) # check if kwargs are passed correctly model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id, torch_dtype=self.dtype) assert isinstance(model, PeftModelForFeatureExtraction) assert model.base_model.model.decoder.embed_tokens.weight.dtype == self.dtype adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForFeatureExtraction.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=self.dtype ) def test_peft_whisper(self): model_id = "peft-internal-testing/tiny_WhisperForConditionalGeneration-lora" model = AutoPeftModel.from_pretrained(model_id) assert isinstance(model, PeftModel) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModel.from_pretrained(tmp_dirname) assert isinstance(model, PeftModel) # check if kwargs are passed correctly model = AutoPeftModel.from_pretrained(model_id, torch_dtype=self.dtype) assert isinstance(model, PeftModel) assert model.base_model.model.model.encoder.embed_positions.weight.dtype == self.dtype adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModel.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=self.dtype) def test_embedding_size_not_reduced_if_greater_vocab_size(self, tmp_path): # See 2415 # There was a bug in AutoPeftModels where the embedding was always resized to the vocab size of the tokenizer # when the tokenizer was found. This makes sense if the vocabulary was extended, but some models like Qwen # already start out with "spare" embeddings, i.e. the embedding size is larger than the vocab size. This could # result in the embedding being shrunk, which in turn resulted in an error when loading the weights. # first create a checkpoint; it is important that the tokenizer is also saved in the same location model_id = "Qwen/Qwen2-0.5B" model = AutoModelForCausalLM.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) model = get_peft_model(model, LoraConfig(modules_to_save=["lm_head", "embed_token"])) model.save_pretrained(tmp_path) tokenizer.save_pretrained(tmp_path) # does not raise; without the fix, it raises: # > size mismatch for base_model.model.lm_head.modules_to_save.default.weight: copying a param with shape # torch.Size([151936, 896]) from checkpoint, the shape in current model is torch.Size([151646, 896]). AutoPeftModelForCausalLM.from_pretrained(tmp_path)
peft/tests/test_auto.py/0
{ "file_path": "peft/tests/test_auto.py", "repo_id": "peft", "token_count": 4124 }
252
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn from peft.utils.integrations import init_empty_weights, skip_init_on_device class MLP(nn.Module): def __init__(self, bias=True): super().__init__() self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.drop = nn.Dropout(0.5) self.lin1 = nn.Linear(20, 2, bias=bias) def get_mlp(): return MLP() class TestInitEmptyWeights: def test_init_empty_weights_works(self): # this is a very rudimentary test, as init_empty_weights is copied almost 1:1 from accelerate and is tested # there with init_empty_weights(): mlp = get_mlp() expected = torch.device("meta") assert all(p.device == expected for p in mlp.parameters()) def test_skip_init_on_device_works(self): # when a function is decorated with skip_init_on_device, the parameters are not moved to meta device, even when # inside the context decorated_fn = skip_init_on_device(get_mlp) with init_empty_weights(): mlp = decorated_fn() expected = torch.device("cpu") assert all(p.device == expected for p in mlp.parameters()) def test_skip_init_on_device_works_outside_context(self): # same as before, but ensure that skip_init_on_device does not break when no init_empty_weights context is used decorated_fn = skip_init_on_device(get_mlp) mlp = decorated_fn() expected = torch.device("cpu") assert all(p.device == expected for p in mlp.parameters()) def test_skip_init_on_device_not_permanent(self): # ensure that after skip_init_on_device has been used, init_empty_weights reverts to its original functionality # with decorator => cpu decorated_fn = skip_init_on_device(get_mlp) with init_empty_weights(): mlp = decorated_fn() expected = torch.device("cpu") assert all(p.device == expected for p in mlp.parameters()) # without decorator => meta with init_empty_weights(): mlp = get_mlp() expected = torch.device("meta") assert all(p.device == expected for p in mlp.parameters()) def test_skip_init_on_device_nested(self): # ensure that skip_init_on_device works even if the decorated function is nested inside another decorated # function @skip_init_on_device def outer_fn(): @skip_init_on_device def inner_fn(): return get_mlp() mlp0 = inner_fn() mlp1 = get_mlp() return mlp0, mlp1 with init_empty_weights(): mlp0, mlp1 = outer_fn() expected = torch.device("cpu") assert all(p.device == expected for p in mlp0.parameters()) assert all(p.device == expected for p in mlp1.parameters())
peft/tests/test_integrations.py/0
{ "file_path": "peft/tests/test_integrations.py", "repo_id": "peft", "token_count": 1367 }
253
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The intent of the tests contained in this file is to check as many PEFT features as possible with torch.compile. This # is thus a document on how well torch.compile is supported by PEFT. Currently, we know that certain features do not # work with torch.compile. The corresponding tests should be marked with `@pytest.mark.xfail(strict=True)`. # # When adding a new test that fails with torch.compile, please make sure first that it does NOT fail without # torch.compile. import gc import os import pytest import torch from accelerate.utils.memory import clear_device_cache from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, DataCollatorForLanguageModeling, Trainer, TrainerCallback, TrainingArguments, ) from peft import ( AdaLoraConfig, BOFTConfig, BoneConfig, HRAConfig, IA3Config, LNTuningConfig, LoHaConfig, LoKrConfig, LoraConfig, MissConfig, OFTConfig, PeftModel, TaskType, VBLoRAConfig, VeraConfig, get_peft_model, ) from .testing_utils import load_dataset_english_quotes, require_bitsandbytes # only run (very slow) torch.compile tests when explicitly asked to if os.environ.get("PEFT_DEBUG_WITH_TORCH_COMPILE") != "1": pytest.skip(allow_module_level=True) # Mapping: name of the setting -> (Peft config instance, torch.compile kwargs) SETTINGS = { "adalora": (AdaLoraConfig(task_type=TaskType.CAUSAL_LM, total_step=5), {}), "boft": (BOFTConfig(task_type=TaskType.CAUSAL_LM), {}), "dora": (LoraConfig(task_type=TaskType.CAUSAL_LM, use_dora=True), {}), "ia3": (IA3Config(task_type=TaskType.CAUSAL_LM), {}), "ln_tuning": (LNTuningConfig(task_type=TaskType.CAUSAL_LM, target_modules=["final_layer_norm"]), {}), "loha": (LoHaConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}), "lokr": pytest.param( (LoKrConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}), ), "lora": (LoraConfig(task_type=TaskType.CAUSAL_LM), {}), "lora-target-embeddings": pytest.param( (LoraConfig(task_type=TaskType.CAUSAL_LM, target_modules=["embed_tokens"]), {}), ), "lora-with-modules-to-save": (LoraConfig(task_type=TaskType.CAUSAL_LM, modules_to_save=["embed_tokens"]), {}), "oft": (OFTConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}), "vblora": (VBLoRAConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"], vector_length=2), {}), "vera": (VeraConfig(task_type=TaskType.CAUSAL_LM), {}), "hra": (HRAConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}), "bone": (BoneConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"], r=2), {}), "bone-bat": ( BoneConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"], r=2, init_weights="bat"), {}, ), "miss": (MissConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"], r=2), {}), "miss-bat": ( MissConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"], r=2, init_weights="bat"), {}, ), "miss-mini": ( MissConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"], r=2, init_weights="mini"), {}, ), } @pytest.mark.single_gpu_tests class TestTorchCompileCausalLM: """ Tests for using torch.compile with causal LM. Tip: When adding a new test, set `fake_compile = True` below. With this setting, torch.compile is being skipped. This is useful for two reasons: - compile is slow, so to quickly iterate on the test, it's best to disable it and only enable it at the very end - even if you expect the test to fail with compile, as compile does not work with every PEFT feature, it still MUST succeed without compile, otherwise the test is incorrect. Before creating the PR, disable `fake_compile`. """ fake_compile = False model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" max_train_loss = 15.0 # generous threshold for maximum loss after training @pytest.fixture(autouse=True) def teardown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ clear_device_cache(garbage_collection=True) gc.collect() @pytest.fixture(scope="class") def tokenizer(self): return AutoTokenizer.from_pretrained(self.model_id) @pytest.fixture(scope="class") def data(self, tokenizer): def tokenize(samples): # For some reason, the max sequence length is not honored by the tokenizer, resulting in IndexErrors. Thus, # manually ensure that sequences are not too long. tokenized = tokenizer(samples["quote"]) tokenized["input_ids"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized["input_ids"]] tokenized["attention_mask"] = [ input_ids[: tokenizer.model_max_length] for input_ids in tokenized["attention_mask"] ] return tokenized data = load_dataset_english_quotes() data = data.map(tokenize, batched=True) # We need to manually remove unused columns. This is because we cannot use remove_unused_columns=True in the # Trainer, as this leads to errors with torch.compile. We also cannot just leave them in, as they contain # strings. Therefore, manually remove all unused columns. data = data.remove_columns(["quote", "author", "tags"]) return data def compile(self, model, compile_kwargs): compile_kwargs = compile_kwargs.copy() # those are only for the Trainer arguments compile_kwargs.pop("torch_compile_backend", None) compile_kwargs.pop("torch_compile_mode", None) if self.fake_compile: return model return torch.compile(model, **compile_kwargs) @pytest.mark.parametrize("settings", SETTINGS.values(), ids=SETTINGS.keys()) def test_causal_lm_training_trainer_compile(self, settings, tokenizer, data, tmp_path): r"""Train a PEFT model with torch.compile using Trainer""" tmp_dir = tmp_path / "model" config, compile_kwargs = settings torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", ) model = get_peft_model(model, config) # record outputs before training model.eval() sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device) with torch.inference_mode(): output_before = model(sample) model.train() train_kwargs = { "per_device_train_batch_size": 4, "max_steps": 5, "learning_rate": 1e-3, "logging_steps": 1, "output_dir": tmp_dir, "seed": 0, } if isinstance(config, AdaLoraConfig): train_kwargs["learning_rate"] = 1e-2 training_args = TrainingArguments( torch_compile=not self.fake_compile, torch_compile_backend=compile_kwargs.get("torch_compile_backend", None), torch_compile_mode=compile_kwargs.get("torch_compile_mode", None), **train_kwargs, ) trainer = Trainer( model=model, train_dataset=data["train"], args=training_args, data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False if isinstance(config, AdaLoraConfig): class OptimizerStepCallback(TrainerCallback): def on_optimizer_step(self, args, state, control, **kwargs): model.update_and_allocate(state.global_step) trainer.add_callback(OptimizerStepCallback()) trainer.train() model.eval() atol, rtol = 1e-4, 1e-4 with torch.inference_mode(): output_after = model(sample) tokens_after = model.generate(sample) assert torch.isfinite(output_after.logits).all() # sanity check: model was updated assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol) assert trainer.state.log_history[-1]["train_loss"] < self.max_train_loss # check saving the model and loading it without compile model.save_pretrained(tmp_path) del model torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained(self.model_id, device_map="auto") model = PeftModel.from_pretrained(model, tmp_path) with torch.inference_mode(): output_loaded = model(sample) tokens_loaded = model.generate(sample) assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol) assert (tokens_after == tokens_loaded).all() @pytest.mark.parametrize("settings", SETTINGS.values(), ids=SETTINGS.keys()) def test_causal_lm_training_pytorch_compile(self, settings, tokenizer, data, tmp_path): r"""Train a PEFT model with torch.compile using PyTorch training loop""" torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", ) config, compile_kwargs = settings model = get_peft_model(model, config) if isinstance(config, AdaLoraConfig): model.base_model.peft_config["default"].total_step = 5 model = self.compile(model, compile_kwargs) # record outputs before training model.eval() sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device) with torch.inference_mode(): output_before = model(sample) model.train() model.config.use_cache = False optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3) batch_size = 4 losses = [] max_steps = 5 * batch_size for i in range(0, max_steps, batch_size): batch = tokenizer.pad(data["train"][i : i + batch_size], return_tensors="pt").to(model.device) # add targets batch["labels"] = batch["input_ids"].clone() optimizer.zero_grad() outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() losses.append(loss.item()) if isinstance(config, AdaLoraConfig): model.base_model.update_and_allocate(i) model.eval() with torch.inference_mode(): output_after = model(sample) tokens_after = model.generate(sample) assert torch.isfinite(output_after.logits).all() atol, rtol = 1e-4, 1e-4 # sanity check: model was updated assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol) assert losses[-1] < self.max_train_loss # check saving the model and loading it without compile model.save_pretrained(tmp_path) del model torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained(self.model_id, device_map="auto") model = PeftModel.from_pretrained(model, tmp_path) with torch.inference_mode(): output_loaded = model(sample) tokens_loaded = model.generate(sample) assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol) assert (tokens_after == tokens_loaded).all() @require_bitsandbytes def test_causal_lm_training_lora_bnb_compile(self, tokenizer, data, tmp_path): r"""Train a bnb quantized LoRA model with torch.compile using PyTorch training loop""" torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ) config = LoraConfig(task_type=TaskType.CAUSAL_LM) model = get_peft_model(model, config) model = self.compile(model, {}) # record outputs before training model.eval() sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device) with torch.inference_mode(): output_before = model(sample) model.train() model.config.use_cache = False optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3) batch_size = 4 losses = [] max_steps = 5 * batch_size for i in range(0, max_steps, batch_size): batch = tokenizer.pad(data["train"][i : i + batch_size], return_tensors="pt").to(model.device) # add targets batch["labels"] = batch["input_ids"].clone() optimizer.zero_grad() outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() losses.append(loss.item()) model.eval() with torch.inference_mode(): output_after = model(sample) assert torch.isfinite(output_after.logits).all() atol, rtol = 5e-4, 5e-4 # sanity check: model was updated assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol) assert losses[-1] < self.max_train_loss # check saving the model and loading it without compile model.save_pretrained(tmp_path) del model torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True) ) model = PeftModel.from_pretrained(model, tmp_path) with torch.inference_mode(): # after loading, outputs are float32 for some reason output_loaded = model(sample) assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol) @require_bitsandbytes def test_causal_lm_multiple_lora_adapter_compile(self, tokenizer, data): torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ).eval() sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device) with torch.inference_mode(): output_base = model(sample) config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False) model = get_peft_model(model, config) model.add_adapter("other", config) model = self.compile(model, {}) model.eval() with torch.inference_mode(): output_default_adapter = model(sample) model.set_adapter("other") with torch.inference_mode(): output_other_adapter = model(sample) atol, rtol = 1e-4, 1e-4 # outputs of the base model != output of default adapter != output of other adapter assert not torch.allclose(output_base.logits, output_default_adapter.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_base.logits, output_other_adapter.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_default_adapter.logits, output_other_adapter.logits, atol=atol, rtol=rtol) # now delete the other adapter model.delete_adapter("other") model.set_adapter("default") with torch.inference_mode(): output_after_delete = model(sample) # outputs after delete == output of default adapter assert torch.allclose(output_default_adapter.logits, output_after_delete.logits, atol=atol, rtol=rtol) def test_causal_lm_disable_lora_adapter_compile(self, tokenizer, data): torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ).eval() sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device) with torch.inference_mode(): output_base = model(sample) config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False) model = get_peft_model(model, config).eval() model = self.compile(model, {}) output_lora = model(sample) with model.disable_adapter(): with torch.inference_mode(): output_disabled = model(sample) atol, rtol = 5e-4, 5e-4 # outputs of the base model == output disabled adapter != output of lora adapter assert torch.allclose(output_base.logits, output_disabled.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_base.logits, output_lora.logits, atol=atol, rtol=rtol) @require_bitsandbytes def test_causal_lm_merging_lora_adapter_compile(self, tokenizer, data): # merge the adapter torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ).eval() sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device) with torch.inference_mode(): output_base = model(sample) config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False) model = get_peft_model(model, config).eval() with torch.inference_mode(): output_lora = model(sample) model.merge_adapter() with torch.inference_mode(): output_merged = model(sample) # merging is less precise, be more tolerant atol, rtol = 1e-1, 1e-1 # outputs of the base model != output of lora adapter == output of merged adapter assert not torch.allclose(output_base.logits, output_lora.logits, atol=atol, rtol=rtol) assert torch.allclose(output_lora.logits, output_merged.logits, atol=atol, rtol=rtol) @require_bitsandbytes def test_causal_lm_merging_multiple_lora_adapters_compile(self, tokenizer, data): # merge multiple adapters at once torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ).eval() sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device) with torch.inference_mode(): output_base = model(sample) config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False) model = get_peft_model(model, config).eval() model.add_adapter("other", config) with torch.inference_mode(): output_default = model(sample) model.set_adapter("other") with torch.inference_mode(): output_other = model(sample) model.base_model.merge_adapter(["default", "other"]) with torch.inference_mode(): output_merged = model(sample) # merging is less precise, be more tolerant atol, rtol = 1e-1, 1e-1 # outputs of the base model != output of default adapter != output of other adapter assert not torch.allclose(output_base.logits, output_default.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_base.logits, output_other.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_default.logits, output_other.logits, atol=atol, rtol=rtol) # outputs of merged adapter != all others assert not torch.allclose(output_base.logits, output_merged.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_default.logits, output_merged.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_other.logits, output_merged.logits, atol=atol, rtol=rtol) @require_bitsandbytes def test_causal_lm_merge_and_unload_lora_adapter_compile(self, tokenizer, data): torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ).eval() sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device) with torch.inference_mode(): output_base = model(sample) config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False) model = get_peft_model(model, config).eval() model = self.compile(model, {}) with torch.inference_mode(): output_lora = model(sample) unloaded = model.merge_and_unload() with torch.inference_mode(): output_unloaded = unloaded(sample) # merging is less precise, be more tolerant atol, rtol = 1e-1, 1e-1 # outputs of the base model != output of lora adapter == output of unloaded adapter assert not torch.allclose(output_base.logits, output_lora.logits, atol=atol, rtol=rtol) assert torch.allclose(output_lora.logits, output_unloaded.logits, atol=atol, rtol=rtol) @require_bitsandbytes def test_causal_lm_mixed_batch_lora_adapter_compile(self, tokenizer, data): torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ).eval() # we need at least 3 samples for this to work! sample = { "input_ids": torch.arange(12).reshape(3, 4).to("cuda"), "attention_mask": torch.ones(3, 4).long().to("cuda"), } with torch.inference_mode(): output_base = model(**sample) config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False) model = get_peft_model(model, config).eval() with torch.inference_mode(): output_default = model(**sample) model.add_adapter("other", config) model.set_adapter("other") with torch.inference_mode(): output_other = model(**sample) model = self.compile(model, {}) # set adapter_indices so that it alternates between 0 (base), lora 1, and lora 2 adapter_names = ["__base__", "default", "other"] with torch.inference_mode(): output_mixed = model(**sample, adapter_names=adapter_names) atol, rtol = 5e-4, 5e-4 # outputs of the base model != output of lora adapter 1 != output of other adapter assert not torch.allclose(output_base.logits, output_default.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_default.logits, output_other.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_other.logits, output_mixed.logits, atol=atol, rtol=rtol) # outputs of mixed adapter is mix of all 3 assert torch.allclose(output_base.logits[0], output_mixed.logits[0], atol=atol, rtol=rtol) assert torch.allclose(output_default.logits[1], output_mixed.logits[1], atol=atol, rtol=rtol) assert torch.allclose(output_other.logits[2], output_mixed.logits[2], atol=atol, rtol=rtol) @require_bitsandbytes def test_causal_lm_add_weighted_adapter_lora_adapter_compile(self, tokenizer, data): torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ).eval() sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device) with torch.inference_mode(): output_base = model(sample) config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False) model = get_peft_model(model, config).eval() model.add_adapter("other", config) with torch.inference_mode(): output_default = model(sample) model.set_adapter("other") with torch.inference_mode(): output_other = model(sample) model.add_weighted_adapter(["default", "other"], [0.5, 0.5], adapter_name="combined") model.set_adapter("combined") with torch.inference_mode(): output_combined = model(sample) atol, rtol = 1e-4, 1e-4 # outputs of the base model != output of default adapter != output of other adapter assert not torch.allclose(output_base.logits, output_default.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_base.logits, output_other.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_default.logits, output_other.logits, atol=atol, rtol=rtol) # outputs of combined adapter != all others assert not torch.allclose(output_base.logits, output_combined.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_default.logits, output_combined.logits, atol=atol, rtol=rtol) assert not torch.allclose(output_other.logits, output_combined.logits, atol=atol, rtol=rtol)
peft/tests/test_torch_compile.py/0
{ "file_path": "peft/tests/test_torch_compile.py", "repo_id": "peft", "token_count": 11218 }
254
message: "If you use this software, please cite it as below." title: "PyTorch Image Models" version: "1.2.2" doi: "10.5281/zenodo.4414861" authors: - family-names: Wightman given-names: Ross version: 1.0.11 year: "2019" url: "https://github.com/huggingface/pytorch-image-models" license: "Apache 2.0"
pytorch-image-models/CITATION.cff/0
{ "file_path": "pytorch-image-models/CITATION.cff", "repo_id": "pytorch-image-models", "token_count": 122 }
255
# Changelog ## Jan 19, 2025 * Fix loading of LeViT safetensor weights, remove conversion code which should have been deactivated * Add 'SO150M' ViT weights trained with SBB recipes, decent results, but not optimal shape for ImageNet-12k/1k pretrain/ft * `vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k_ft_in1k` - 86.7% top-1 * `vit_so150m_patch16_reg4_gap_384.sbb_e250_in12k_ft_in1k` - 87.4% top-1 * `vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k` * Misc typing, typo, etc. cleanup * 1.0.14 release to get above LeViT fix out ## Jan 9, 2025 * Add support to train and validate in pure `bfloat16` or `float16` * `wandb` project name arg added by https://github.com/caojiaolong, use arg.experiment for name * Fix old issue w/ checkpoint saving not working on filesystem w/o hard-link support (e.g. FUSE fs mounts) * 1.0.13 release ## Jan 6, 2025 * Add `torch.utils.checkpoint.checkpoint()` wrapper in `timm.models` that defaults `use_reentrant=False`, unless `TIMM_REENTRANT_CKPT=1` is set in env. ## Dec 31, 2024 * `convnext_nano` 384x384 ImageNet-12k pretrain & fine-tune. https://huggingface.co/models?search=convnext_nano%20r384 * Add AIM-v2 encoders from https://github.com/apple/ml-aim, see on Hub: https://huggingface.co/models?search=timm%20aimv2 * Add PaliGemma2 encoders from https://github.com/google-research/big_vision to existing PaliGemma, see on Hub: https://huggingface.co/models?search=timm%20pali2 * Add missing L/14 DFN2B 39B CLIP ViT, `vit_large_patch14_clip_224.dfn2b_s39b` * Fix existing `RmsNorm` layer & fn to match standard formulation, use PT 2.5 impl when possible. Move old impl to `SimpleNorm` layer, it's LN w/o centering or bias. There were only two `timm` models using it, and they have been updated. * Allow override of `cache_dir` arg for model creation * Pass through `trust_remote_code` for HF datasets wrapper * `inception_next_atto` model added by creator * Adan optimizer caution, and Lamb decoupled weighgt decay options * Some feature_info metadata fixed by https://github.com/brianhou0208 * All OpenCLIP and JAX (CLIP, SigLIP, Pali, etc) model weights that used load time remapping were given their own HF Hub instances so that they work with `hf-hub:` based loading, and thus will work with new Transformers `TimmWrapperModel` ## Nov 28, 2024 * More optimizers * Add MARS optimizer (https://arxiv.org/abs/2411.10438, https://github.com/AGI-Arena/MARS) * Add LaProp optimizer (https://arxiv.org/abs/2002.04839, https://github.com/Z-T-WANG/LaProp-Optimizer) * Add masking from 'Cautious Optimizers' (https://arxiv.org/abs/2411.16085, https://github.com/kyleliang919/C-Optim) to Adafactor, Adafactor Big Vision, AdamW (legacy), Adopt, Lamb, LaProp, Lion, NadamW, RMSPropTF, SGDW * Cleanup some docstrings and type annotations re optimizers and factory * Add MobileNet-V4 Conv Medium models pretrained on in12k and fine-tuned in1k @ 384x384 * https://huggingface.co/timm/mobilenetv4_conv_medium.e250_r384_in12k_ft_in1k * https://huggingface.co/timm/mobilenetv4_conv_medium.e250_r384_in12k * https://huggingface.co/timm/mobilenetv4_conv_medium.e180_ad_r384_in12k * https://huggingface.co/timm/mobilenetv4_conv_medium.e180_r384_in12k * Add small cs3darknet, quite good for the speed * https://huggingface.co/timm/cs3darknet_focus_s.ra4_e3600_r256_in1k ## Nov 12, 2024 * Optimizer factory refactor * New factory works by registering optimizers using an OptimInfo dataclass w/ some key traits * Add `list_optimizers`, `get_optimizer_class`, `get_optimizer_info` to reworked `create_optimizer_v2` fn to explore optimizers, get info or class * deprecate `optim.optim_factory`, move fns to `optim/_optim_factory.py` and `optim/_param_groups.py` and encourage import via `timm.optim` * Add Adopt (https://github.com/iShohei220/adopt) optimizer * Add 'Big Vision' variant of Adafactor (https://github.com/google-research/big_vision/blob/main/big_vision/optax.py) optimizer * Fix original Adafactor to pick better factorization dims for convolutions * Tweak LAMB optimizer with some improvements in torch.where functionality since original, refactor clipping a bit * dynamic img size support in vit, deit, eva improved to support resize from non-square patch grids, thanks https://github.com/wojtke * ## Oct 31, 2024 Add a set of new very well trained ResNet & ResNet-V2 18/34 (basic block) weights. See https://huggingface.co/blog/rwightman/resnet-trick-or-treat ## Oct 19, 2024 * Cleanup torch amp usage to avoid cuda specific calls, merge support for Ascend (NPU) devices from [MengqingCao](https://github.com/MengqingCao) that should work now in PyTorch 2.5 w/ new device extension autoloading feature. Tested Intel Arc (XPU) in Pytorch 2.5 too and it (mostly) worked. ## Oct 16, 2024 * Fix error on importing from deprecated path `timm.models.registry`, increased priority of existing deprecation warnings to be visible * Port weights of InternViT-300M (https://huggingface.co/OpenGVLab/InternViT-300M-448px) to `timm` as `vit_intern300m_patch14_448` ### Oct 14, 2024 * Pre-activation (ResNetV2) version of 18/18d/34/34d ResNet model defs added by request (weights pending) * Release 1.0.10 ### Oct 11, 2024 * MambaOut (https://github.com/yuweihao/MambaOut) model & weights added. A cheeky take on SSM vision models w/o the SSM (essentially ConvNeXt w/ gating). A mix of original weights + custom variations & weights. |model |img_size|top1 |top5 |param_count| |---------------------------------------------------------------------------------------------------------------------|--------|------|------|-----------| |[mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k)|384 |87.506|98.428|101.66 | |[mambaout_base_plus_rw.sw_e150_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_in12k_ft_in1k)|288 |86.912|98.236|101.66 | |[mambaout_base_plus_rw.sw_e150_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_in12k_ft_in1k)|224 |86.632|98.156|101.66 | |[mambaout_base_tall_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_tall_rw.sw_e500_in1k) |288 |84.974|97.332|86.48 | |[mambaout_base_wide_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_wide_rw.sw_e500_in1k) |288 |84.962|97.208|94.45 | |[mambaout_base_short_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_short_rw.sw_e500_in1k) |288 |84.832|97.27 |88.83 | |[mambaout_base.in1k](http://huggingface.co/timm/mambaout_base.in1k) |288 |84.72 |96.93 |84.81 | |[mambaout_small_rw.sw_e450_in1k](http://huggingface.co/timm/mambaout_small_rw.sw_e450_in1k) |288 |84.598|97.098|48.5 | |[mambaout_small.in1k](http://huggingface.co/timm/mambaout_small.in1k) |288 |84.5 |96.974|48.49 | |[mambaout_base_wide_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_wide_rw.sw_e500_in1k) |224 |84.454|96.864|94.45 | |[mambaout_base_tall_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_tall_rw.sw_e500_in1k) |224 |84.434|96.958|86.48 | |[mambaout_base_short_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_short_rw.sw_e500_in1k) |224 |84.362|96.952|88.83 | |[mambaout_base.in1k](http://huggingface.co/timm/mambaout_base.in1k) |224 |84.168|96.68 |84.81 | |[mambaout_small.in1k](http://huggingface.co/timm/mambaout_small.in1k) |224 |84.086|96.63 |48.49 | |[mambaout_small_rw.sw_e450_in1k](http://huggingface.co/timm/mambaout_small_rw.sw_e450_in1k) |224 |84.024|96.752|48.5 | |[mambaout_tiny.in1k](http://huggingface.co/timm/mambaout_tiny.in1k) |288 |83.448|96.538|26.55 | |[mambaout_tiny.in1k](http://huggingface.co/timm/mambaout_tiny.in1k) |224 |82.736|96.1 |26.55 | |[mambaout_kobe.in1k](http://huggingface.co/timm/mambaout_kobe.in1k) |288 |81.054|95.718|9.14 | |[mambaout_kobe.in1k](http://huggingface.co/timm/mambaout_kobe.in1k) |224 |79.986|94.986|9.14 | |[mambaout_femto.in1k](http://huggingface.co/timm/mambaout_femto.in1k) |288 |79.848|95.14 |7.3 | |[mambaout_femto.in1k](http://huggingface.co/timm/mambaout_femto.in1k) |224 |78.87 |94.408|7.3 | * SigLIP SO400M ViT fine-tunes on ImageNet-1k @ 378x378, added 378x378 option for existing SigLIP 384x384 models * [vit_so400m_patch14_siglip_378.webli_ft_in1k](https://huggingface.co/timm/vit_so400m_patch14_siglip_378.webli_ft_in1k) - 89.42 top-1 * [vit_so400m_patch14_siglip_gap_378.webli_ft_in1k](https://huggingface.co/timm/vit_so400m_patch14_siglip_gap_378.webli_ft_in1k) - 89.03 * SigLIP SO400M ViT encoder from recent multi-lingual (i18n) variant, patch16 @ 256x256 (https://huggingface.co/timm/ViT-SO400M-16-SigLIP-i18n-256). OpenCLIP update pending. * Add two ConvNeXt 'Zepto' models & weights (one w/ overlapped stem and one w/ patch stem). Uses RMSNorm, smaller than previous 'Atto', 2.2M params. * [convnext_zepto_rms_ols.ra4_e3600_r224_in1k](https://huggingface.co/timm/convnext_zepto_rms_ols.ra4_e3600_r224_in1k) - 73.20 top-1 @ 224 * [convnext_zepto_rms.ra4_e3600_r224_in1k](https://huggingface.co/timm/convnext_zepto_rms.ra4_e3600_r224_in1k) - 72.81 @ 224 ### Sept 2024 * Add a suite of tiny test models for improved unit tests and niche low-resource applications (https://huggingface.co/blog/rwightman/timm-tiny-test) * Add MobileNetV4-Conv-Small (0.5x) model (https://huggingface.co/posts/rwightman/793053396198664) * [mobilenetv4_conv_small_050.e3000_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small_050.e3000_r224_in1k) - 65.81 top-1 @ 256, 64.76 @ 224 * Add MobileNetV3-Large variants trained with MNV4 Small recipe * [mobilenetv3_large_150d.ra4_e3600_r256_in1k](http://hf.co/timm/mobilenetv3_large_150d.ra4_e3600_r256_in1k) - 81.81 @ 320, 80.94 @ 256 * [mobilenetv3_large_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv3_large_100.ra4_e3600_r224_in1k) - 77.16 @ 256, 76.31 @ 224 ### Aug 21, 2024 * Updated SBB ViT models trained on ImageNet-12k and fine-tuned on ImageNet-1k, challenging quite a number of much larger, slower models | model | top1 | top5 | param_count | img_size | | -------------------------------------------------- | ------ | ------ | ----------- | -------- | | [vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k) | 87.438 | 98.256 | 64.11 | 384 | | [vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k) | 86.608 | 97.934 | 64.11 | 256 | | [vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k) | 86.594 | 98.02 | 60.4 | 384 | | [vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k) | 85.734 | 97.61 | 60.4 | 256 | * MobileNet-V1 1.25, EfficientNet-B1, & ResNet50-D weights w/ MNV4 baseline challenge recipe | model | top1 | top5 | param_count | img_size | |--------------------------------------------------------------------------------------------------------------------------|--------|--------|-------------|----------| | [resnet50d.ra4_e3600_r224_in1k](http://hf.co/timm/resnet50d.ra4_e3600_r224_in1k) | 81.838 | 95.922 | 25.58 | 288 | | [efficientnet_b1.ra4_e3600_r240_in1k](http://hf.co/timm/efficientnet_b1.ra4_e3600_r240_in1k) | 81.440 | 95.700 | 7.79 | 288 | | [resnet50d.ra4_e3600_r224_in1k](http://hf.co/timm/resnet50d.ra4_e3600_r224_in1k) | 80.952 | 95.384 | 25.58 | 224 | | [efficientnet_b1.ra4_e3600_r240_in1k](http://hf.co/timm/efficientnet_b1.ra4_e3600_r240_in1k) | 80.406 | 95.152 | 7.79 | 240 | | [mobilenetv1_125.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_125.ra4_e3600_r224_in1k) | 77.600 | 93.804 | 6.27 | 256 | | [mobilenetv1_125.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_125.ra4_e3600_r224_in1k) | 76.924 | 93.234 | 6.27 | 224 | * Add SAM2 (HieraDet) backbone arch & weight loading support * Add Hiera Small weights trained w/ abswin pos embed on in12k & fine-tuned on 1k |model |top1 |top5 |param_count| |---------------------------------|------|------|-----------| |hiera_small_abswin_256.sbb2_e200_in12k_ft_in1k |84.912|97.260|35.01 | |hiera_small_abswin_256.sbb2_pd_e200_in12k_ft_in1k |84.560|97.106|35.01 | ### Aug 8, 2024 * Add RDNet ('DenseNets Reloaded', https://arxiv.org/abs/2403.19588), thanks [Donghyun Kim](https://github.com/dhkim0225) ### July 28, 2024 * Add `mobilenet_edgetpu_v2_m` weights w/ `ra4` mnv4-small based recipe. 80.1% top-1 @ 224 and 80.7 @ 256. * Release 1.0.8 ### July 26, 2024 * More MobileNet-v4 weights, ImageNet-12k pretrain w/ fine-tunes, and anti-aliased ConvLarge models | model |top1 |top1_err|top5 |top5_err|param_count|img_size| |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------| | [mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k)|84.99 |15.01 |97.294|2.706 |32.59 |544 | | [mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k)|84.772|15.228 |97.344|2.656 |32.59 |480 | | [mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k)|84.64 |15.36 |97.114|2.886 |32.59 |448 | | [mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k)|84.314|15.686 |97.102|2.898 |32.59 |384 | | [mobilenetv4_conv_aa_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e600_r384_in1k) |83.824|16.176 |96.734|3.266 |32.59 |480 | | [mobilenetv4_conv_aa_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e600_r384_in1k) |83.244|16.756 |96.392|3.608 |32.59 |384 | | [mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k)|82.99 |17.01 |96.67 |3.33 |11.07 |320 | | [mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k)|82.364|17.636 |96.256|3.744 |11.07 |256 | * Impressive MobileNet-V1 and EfficientNet-B0 baseline challenges (https://huggingface.co/blog/rwightman/mobilenet-baselines) | model |top1 |top1_err|top5 |top5_err|param_count|img_size| |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------| | [efficientnet_b0.ra4_e3600_r224_in1k](http://hf.co/timm/efficientnet_b0.ra4_e3600_r224_in1k) |79.364|20.636 |94.754|5.246 |5.29 |256 | | [efficientnet_b0.ra4_e3600_r224_in1k](http://hf.co/timm/efficientnet_b0.ra4_e3600_r224_in1k) |78.584|21.416 |94.338|5.662 |5.29 |224 | | [mobilenetv1_100h.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100h.ra4_e3600_r224_in1k) |76.596|23.404 |93.272|6.728 |5.28 |256 | | [mobilenetv1_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100.ra4_e3600_r224_in1k) |76.094|23.906 |93.004|6.996 |4.23 |256 | | [mobilenetv1_100h.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100h.ra4_e3600_r224_in1k) |75.662|24.338 |92.504|7.496 |5.28 |224 | | [mobilenetv1_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100.ra4_e3600_r224_in1k) |75.382|24.618 |92.312|7.688 |4.23 |224 | * Prototype of `set_input_size()` added to vit and swin v1/v2 models to allow changing image size, patch size, window size after model creation. * Improved support in swin for different size handling, in addition to `set_input_size`, `always_partition` and `strict_img_size` args have been added to `__init__` to allow more flexible input size constraints * Fix out of order indices info for intermediate 'Getter' feature wrapper, check out or range indices for same. * Add several `tiny` < .5M param models for testing that are actually trained on ImageNet-1k |model |top1 |top1_err|top5 |top5_err|param_count|img_size|crop_pct| |----------------------------|------|--------|------|--------|-----------|--------|--------| |test_efficientnet.r160_in1k |47.156|52.844 |71.726|28.274 |0.36 |192 |1.0 | |test_byobnet.r160_in1k |46.698|53.302 |71.674|28.326 |0.46 |192 |1.0 | |test_efficientnet.r160_in1k |46.426|53.574 |70.928|29.072 |0.36 |160 |0.875 | |test_byobnet.r160_in1k |45.378|54.622 |70.572|29.428 |0.46 |160 |0.875 | |test_vit.r160_in1k|42.0 |58.0 |68.664|31.336 |0.37 |192 |1.0 | |test_vit.r160_in1k|40.822|59.178 |67.212|32.788 |0.37 |160 |0.875 | * Fix vit reg token init, thanks [Promisery](https://github.com/Promisery) * Other misc fixes ### June 24, 2024 * 3 more MobileNetV4 hyrid weights with different MQA weight init scheme | model |top1 |top1_err|top5 |top5_err|param_count|img_size| |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------| | [mobilenetv4_hybrid_large.ix_e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.ix_e600_r384_in1k) |84.356|15.644 |96.892 |3.108 |37.76 |448 | | [mobilenetv4_hybrid_large.ix_e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.ix_e600_r384_in1k) |83.990|16.010 |96.702 |3.298 |37.76 |384 | | [mobilenetv4_hybrid_medium.ix_e550_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r384_in1k) |83.394|16.606 |96.760|3.240 |11.07 |448 | | [mobilenetv4_hybrid_medium.ix_e550_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r384_in1k) |82.968|17.032 |96.474|3.526 |11.07 |384 | | [mobilenetv4_hybrid_medium.ix_e550_r256_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r256_in1k) |82.492|17.508 |96.278|3.722 |11.07 |320 | | [mobilenetv4_hybrid_medium.ix_e550_r256_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r256_in1k) |81.446|18.554 |95.704|4.296 |11.07 |256 | * florence2 weight loading in DaViT model ### June 12, 2024 * MobileNetV4 models and initial set of `timm` trained weights added: | model |top1 |top1_err|top5 |top5_err|param_count|img_size| |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------| | [mobilenetv4_hybrid_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.e600_r384_in1k) |84.266|15.734 |96.936 |3.064 |37.76 |448 | | [mobilenetv4_hybrid_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.e600_r384_in1k) |83.800|16.200 |96.770 |3.230 |37.76 |384 | | [mobilenetv4_conv_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_large.e600_r384_in1k) |83.392|16.608 |96.622 |3.378 |32.59 |448 | | [mobilenetv4_conv_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_large.e600_r384_in1k) |82.952|17.048 |96.266 |3.734 |32.59 |384 | | [mobilenetv4_conv_large.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_large.e500_r256_in1k) |82.674|17.326 |96.31 |3.69 |32.59 |320 | | [mobilenetv4_conv_large.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_large.e500_r256_in1k) |81.862|18.138 |95.69 |4.31 |32.59 |256 | | [mobilenetv4_hybrid_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e500_r224_in1k) |81.276|18.724 |95.742|4.258 |11.07 |256 | | [mobilenetv4_conv_medium.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r256_in1k) |80.858|19.142 |95.768|4.232 |9.72 |320 | | [mobilenetv4_hybrid_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e500_r224_in1k) |80.442|19.558 |95.38 |4.62 |11.07 |224 | | [mobilenetv4_conv_blur_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_blur_medium.e500_r224_in1k) |80.142|19.858 |95.298|4.702 |9.72 |256 | | [mobilenetv4_conv_medium.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r256_in1k) |79.928|20.072 |95.184|4.816 |9.72 |256 | | [mobilenetv4_conv_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r224_in1k) |79.808|20.192 |95.186|4.814 |9.72 |256 | | [mobilenetv4_conv_blur_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_blur_medium.e500_r224_in1k) |79.438|20.562 |94.932|5.068 |9.72 |224 | | [mobilenetv4_conv_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r224_in1k) |79.094|20.906 |94.77 |5.23 |9.72 |224 | | [mobilenetv4_conv_small.e2400_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e2400_r224_in1k) |74.616|25.384 |92.072|7.928 |3.77 |256 | | [mobilenetv4_conv_small.e1200_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e1200_r224_in1k) |74.292|25.708 |92.116|7.884 |3.77 |256 | | [mobilenetv4_conv_small.e2400_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e2400_r224_in1k) |73.756|26.244 |91.422|8.578 |3.77 |224 | | [mobilenetv4_conv_small.e1200_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e1200_r224_in1k) |73.454|26.546 |91.34 |8.66 |3.77 |224 | * Apple MobileCLIP (https://arxiv.org/pdf/2311.17049, FastViT and ViT-B) image tower model support & weights added (part of OpenCLIP support). * ViTamin (https://arxiv.org/abs/2404.02132) CLIP image tower model & weights added (part of OpenCLIP support). * OpenAI CLIP Modified ResNet image tower modelling & weight support (via ByobNet). Refactor AttentionPool2d. ### May 14, 2024 * Support loading PaliGemma jax weights into SigLIP ViT models with average pooling. * Add Hiera models from Meta (https://github.com/facebookresearch/hiera). * Add `normalize=` flag for transforms, return non-normalized torch.Tensor with original dytpe (for `chug`) * Version 1.0.3 release ### May 11, 2024 * `Searching for Better ViT Baselines (For the GPU Poor)` weights and vit variants released. Exploring model shapes between Tiny and Base. | model | top1 | top5 | param_count | img_size | | -------------------------------------------------- | ------ | ------ | ----------- | -------- | | [vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k) | 86.202 | 97.874 | 64.11 | 256 | | [vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k) | 85.418 | 97.48 | 60.4 | 256 | | [vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k) | 84.322 | 96.812 | 63.95 | 256 | | [vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k) | 83.906 | 96.684 | 60.23 | 256 | | [vit_base_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_base_patch16_rope_reg1_gap_256.sbb_in1k) | 83.866 | 96.67 | 86.43 | 256 | | [vit_medium_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_rope_reg1_gap_256.sbb_in1k) | 83.81 | 96.824 | 38.74 | 256 | | [vit_betwixt_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in1k) | 83.706 | 96.616 | 60.4 | 256 | | [vit_betwixt_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg1_gap_256.sbb_in1k) | 83.628 | 96.544 | 60.4 | 256 | | [vit_medium_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_reg4_gap_256.sbb_in1k) | 83.47 | 96.622 | 38.88 | 256 | | [vit_medium_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_reg1_gap_256.sbb_in1k) | 83.462 | 96.548 | 38.88 | 256 | | [vit_little_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_little_patch16_reg4_gap_256.sbb_in1k) | 82.514 | 96.262 | 22.52 | 256 | | [vit_wee_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_wee_patch16_reg1_gap_256.sbb_in1k) | 80.256 | 95.360 | 13.42 | 256 | | [vit_pwee_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_pwee_patch16_reg1_gap_256.sbb_in1k) | 80.072 | 95.136 | 15.25 | 256 | | [vit_mediumd_patch16_reg4_gap_256.sbb_in12k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb_in12k) | N/A | N/A | 64.11 | 256 | | [vit_betwixt_patch16_reg4_gap_256.sbb_in12k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in12k) | N/A | N/A | 60.4 | 256 | * AttentionExtract helper added to extract attention maps from `timm` models. See example in https://github.com/huggingface/pytorch-image-models/discussions/1232#discussioncomment-9320949 * `forward_intermediates()` API refined and added to more models including some ConvNets that have other extraction methods. * 1017 of 1047 model architectures support `features_only=True` feature extraction. Remaining 34 architectures can be supported but based on priority requests. * Remove torch.jit.script annotated functions including old JIT activations. Conflict with dynamo and dynamo does a much better job when used. ### April 11, 2024 * Prepping for a long overdue 1.0 release, things have been stable for a while now. * Significant feature that's been missing for a while, `features_only=True` support for ViT models with flat hidden states or non-std module layouts (so far covering `'vit_*', 'twins_*', 'deit*', 'beit*', 'mvitv2*', 'eva*', 'samvit_*', 'flexivit*'`) * Above feature support achieved through a new `forward_intermediates()` API that can be used with a feature wrapping module or directly. ```python model = timm.create_model('vit_base_patch16_224') final_feat, intermediates = model.forward_intermediates(input) output = model.forward_head(final_feat) # pooling + classifier head print(final_feat.shape) torch.Size([2, 197, 768]) for f in intermediates: print(f.shape) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) print(output.shape) torch.Size([2, 1000]) ``` ```python model = timm.create_model('eva02_base_patch16_clip_224', pretrained=True, img_size=512, features_only=True, out_indices=(-3, -2,)) output = model(torch.randn(2, 3, 512, 512)) for o in output: print(o.shape) torch.Size([2, 768, 32, 32]) torch.Size([2, 768, 32, 32]) ``` * TinyCLIP vision tower weights added, thx [Thien Tran](https://github.com/gau-nernst) ### Feb 19, 2024 * Next-ViT models added. Adapted from https://github.com/bytedance/Next-ViT * HGNet and PP-HGNetV2 models added. Adapted from https://github.com/PaddlePaddle/PaddleClas by [SeeFun](https://github.com/seefun) * Removed setup.py, moved to pyproject.toml based build supported by PDM * Add updated model EMA impl using _for_each for less overhead * Support device args in train script for non GPU devices * Other misc fixes and small additions * Min supported Python version increased to 3.8 * Release 0.9.16 ### Jan 8, 2024 Datasets & transform refactoring * HuggingFace streaming (iterable) dataset support (`--dataset hfids:org/dataset`) * Webdataset wrapper tweaks for improved split info fetching, can auto fetch splits from supported HF hub webdataset * Tested HF `datasets` and webdataset wrapper streaming from HF hub with recent `timm` ImageNet uploads to https://huggingface.co/timm * Make input & target column/field keys consistent across datasets and pass via args * Full monochrome support when using e:g: `--input-size 1 224 224` or `--in-chans 1`, sets PIL image conversion appropriately in dataset * Improved several alternate crop & resize transforms (ResizeKeepRatio, RandomCropOrPad, etc) for use in PixParse document AI project * Add SimCLR style color jitter prob along with grayscale and gaussian blur options to augmentations and args * Allow train without validation set (`--val-split ''`) in train script * Add `--bce-sum` (sum over class dim) and `--bce-pos-weight` (positive weighting) args for training as they're common BCE loss tweaks I was often hard coding ### Nov 23, 2023 * Added EfficientViT-Large models, thanks [SeeFun](https://github.com/seefun) * Fix Python 3.7 compat, will be dropping support for it soon * Other misc fixes * Release 0.9.12 ### Nov 20, 2023 * Added significant flexibility for Hugging Face Hub based timm models via `model_args` config entry. `model_args` will be passed as kwargs through to models on creation. * See example at https://huggingface.co/gaunernst/vit_base_patch16_1024_128.audiomae_as2m_ft_as20k/blob/main/config.json * Usage: https://github.com/huggingface/pytorch-image-models/discussions/2035 * Updated imagenet eval and test set csv files with latest models * `vision_transformer.py` typing and doc cleanup by [Laureηt](https://github.com/Laurent2916) * 0.9.11 release ### Nov 3, 2023 * [DFN (Data Filtering Networks)](https://huggingface.co/papers/2309.17425) and [MetaCLIP](https://huggingface.co/papers/2309.16671) ViT weights added * DINOv2 'register' ViT model weights added (https://huggingface.co/papers/2309.16588, https://huggingface.co/papers/2304.07193) * Add `quickgelu` ViT variants for OpenAI, DFN, MetaCLIP weights that use it (less efficient) * Improved typing added to ResNet, MobileNet-v3 thanks to [Aryan](https://github.com/a-r-r-o-w) * ImageNet-12k fine-tuned (from LAION-2B CLIP) `convnext_xxlarge` * 0.9.9 release ### Oct 20, 2023 * [SigLIP](https://huggingface.co/papers/2303.15343) image tower weights supported in `vision_transformer.py`. * Great potential for fine-tune and downstream feature use. * Experimental 'register' support in vit models as per [Vision Transformers Need Registers](https://huggingface.co/papers/2309.16588) * Updated RepViT with new weight release. Thanks [wangao](https://github.com/jameslahm) * Add patch resizing support (on pretrained weight load) to Swin models * 0.9.8 release pending ### Sep 1, 2023 * TinyViT added by [SeeFun](https://github.com/seefun) * Fix EfficientViT (MIT) to use torch.autocast so it works back to PT 1.10 * 0.9.7 release ### Aug 28, 2023 * Add dynamic img size support to models in `vision_transformer.py`, `vision_transformer_hybrid.py`, `deit.py`, and `eva.py` w/o breaking backward compat. * Add `dynamic_img_size=True` to args at model creation time to allow changing the grid size (interpolate abs and/or ROPE pos embed each forward pass). * Add `dynamic_img_pad=True` to allow image sizes that aren't divisible by patch size (pad bottom right to patch size each forward pass). * Enabling either dynamic mode will break FX tracing unless PatchEmbed module added as leaf. * Existing method of resizing position embedding by passing different `img_size` (interpolate pretrained embed weights once) on creation still works. * Existing method of changing `patch_size` (resize pretrained patch_embed weights once) on creation still works. * Example validation cmd `python validate.py --data-dir /imagenet --model vit_base_patch16_224 --amp --amp-dtype bfloat16 --img-size 255 --crop-pct 1.0 --model-kwargs dynamic_img_size=True dyamic_img_pad=True` ### Aug 25, 2023 * Many new models since last release * FastViT - https://arxiv.org/abs/2303.14189 * MobileOne - https://arxiv.org/abs/2206.04040 * InceptionNeXt - https://arxiv.org/abs/2303.16900 * RepGhostNet - https://arxiv.org/abs/2211.06088 (thanks https://github.com/ChengpengChen) * GhostNetV2 - https://arxiv.org/abs/2211.12905 (thanks https://github.com/yehuitang) * EfficientViT (MSRA) - https://arxiv.org/abs/2305.07027 (thanks https://github.com/seefun) * EfficientViT (MIT) - https://arxiv.org/abs/2205.14756 (thanks https://github.com/seefun) * Add `--reparam` arg to `benchmark.py`, `onnx_export.py`, and `validate.py` to trigger layer reparameterization / fusion for models with any one of `reparameterize()`, `switch_to_deploy()` or `fuse()` * Including FastViT, MobileOne, RepGhostNet, EfficientViT (MSRA), RepViT, RepVGG, and LeViT * Preparing 0.9.6 'back to school' release ### Aug 11, 2023 * Swin, MaxViT, CoAtNet, and BEiT models support resizing of image/window size on creation with adaptation of pretrained weights * Example validation cmd to test w/ non-square resize `python validate.py --data-dir /imagenet --model swin_base_patch4_window7_224.ms_in22k_ft_in1k --amp --amp-dtype bfloat16 --input-size 3 256 320 --model-kwargs window_size=8,10 img_size=256,320` ### Aug 3, 2023 * Add GluonCV weights for HRNet w18_small and w18_small_v2. Converted by [SeeFun](https://github.com/seefun) * Fix `selecsls*` model naming regression * Patch and position embedding for ViT/EVA works for bfloat16/float16 weights on load (or activations for on-the-fly resize) * v0.9.5 release prep ### July 27, 2023 * Added timm trained `seresnextaa201d_32x8d.sw_in12k_ft_in1k_384` weights (and `.sw_in12k` pretrain) with 87.3% top-1 on ImageNet-1k, best ImageNet ResNet family model I'm aware of. * RepViT model and weights (https://arxiv.org/abs/2307.09283) added by [wangao](https://github.com/jameslahm) * I-JEPA ViT feature weights (no classifier) added by [SeeFun](https://github.com/seefun) * SAM-ViT (segment anything) feature weights (no classifier) added by [SeeFun](https://github.com/seefun) * Add support for alternative feat extraction methods and -ve indices to EfficientNet * Add NAdamW optimizer * Misc fixes ### May 11, 2023 * `timm` 0.9 released, transition from 0.8.xdev releases ### May 10, 2023 * Hugging Face Hub downloading is now default, 1132 models on https://huggingface.co/timm, 1163 weights in `timm` * DINOv2 vit feature backbone weights added thanks to [Leng Yue](https://github.com/leng-yue) * FB MAE vit feature backbone weights added * OpenCLIP DataComp-XL L/14 feat backbone weights added * MetaFormer (poolformer-v2, caformer, convformer, updated poolformer (v1)) w/ weights added by [Fredo Guan](https://github.com/fffffgggg54) * Experimental `get_intermediate_layers` function on vit/deit models for grabbing hidden states (inspired by DINO impl). This is WIP and may change significantly... feedback welcome. * Model creation throws error if `pretrained=True` and no weights exist (instead of continuing with random initialization) * Fix regression with inception / nasnet TF sourced weights with 1001 classes in original classifiers * bitsandbytes (https://github.com/TimDettmers/bitsandbytes) optimizers added to factory, use `bnb` prefix, ie `bnbadam8bit` * Misc cleanup and fixes * Final testing before switching to a 0.9 and bringing `timm` out of pre-release state ### April 27, 2023 * 97% of `timm` models uploaded to HF Hub and almost all updated to support multi-weight pretrained configs * Minor cleanup and refactoring of another batch of models as multi-weight added. More fused_attn (F.sdpa) and features_only support, and torchscript fixes. ### April 21, 2023 * Gradient accumulation support added to train script and tested (`--grad-accum-steps`), thanks [Taeksang Kim](https://github.com/voidbag) * More weights on HF Hub (cspnet, cait, volo, xcit, tresnet, hardcorenas, densenet, dpn, vovnet, xception_aligned) * Added `--head-init-scale` and `--head-init-bias` to train.py to scale classiifer head and set fixed bias for fine-tune * Remove all InplaceABN (`inplace_abn`) use, replaced use in tresnet with standard BatchNorm (modified weights accordingly). ### April 12, 2023 * Add ONNX export script, validate script, helpers that I've had kicking around for along time. Tweak 'same' padding for better export w/ recent ONNX + pytorch. * Refactor dropout args for vit and vit-like models, separate drop_rate into `drop_rate` (classifier dropout), `proj_drop_rate` (block mlp / out projections), `pos_drop_rate` (position embedding drop), `attn_drop_rate` (attention dropout). Also add patch dropout (FLIP) to vit and eva models. * fused F.scaled_dot_product_attention support to more vit models, add env var (TIMM_FUSED_ATTN) to control, and config interface to enable/disable * Add EVA-CLIP backbones w/ image tower weights, all the way up to 4B param 'enormous' model, and 336x336 OpenAI ViT mode that was missed. ### April 5, 2023 * ALL ResNet models pushed to Hugging Face Hub with multi-weight support * All past `timm` trained weights added with recipe based tags to differentiate * All ResNet strikes back A1/A2/A3 (seed 0) and R50 example B/C1/C2/D weights available * Add torchvision v2 recipe weights to existing torchvision originals * See comparison table in https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k_288#model-comparison * New ImageNet-12k + ImageNet-1k fine-tunes available for a few anti-aliased ResNet models * `resnetaa50d.sw_in12k_ft_in1k` - 81.7 @ 224, 82.6 @ 288 * `resnetaa101d.sw_in12k_ft_in1k` - 83.5 @ 224, 84.1 @ 288 * `seresnextaa101d_32x8d.sw_in12k_ft_in1k` - 86.0 @ 224, 86.5 @ 288 * `seresnextaa101d_32x8d.sw_in12k_ft_in1k_288` - 86.5 @ 288, 86.7 @ 320 ### March 31, 2023 * Add first ConvNext-XXLarge CLIP -> IN-1k fine-tune and IN-12k intermediate fine-tunes for convnext-base/large CLIP models. | model |top1 |top5 |img_size|param_count|gmacs |macts | |----------------------------------------------------------------------------------------------------------------------|------|------|--------|-----------|------|------| | [convnext_xxlarge.clip_laion2b_soup_ft_in1k](https://huggingface.co/timm/convnext_xxlarge.clip_laion2b_soup_ft_in1k) |88.612|98.704|256 |846.47 |198.09|124.45| | convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384 |88.312|98.578|384 |200.13 |101.11|126.74| | convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320 |87.968|98.47 |320 |200.13 |70.21 |88.02 | | convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384 |87.138|98.212|384 |88.59 |45.21 |84.49 | | convnext_base.clip_laion2b_augreg_ft_in12k_in1k |86.344|97.97 |256 |88.59 |20.09 |37.55 | * Add EVA-02 MIM pretrained and fine-tuned weights, push to HF hub and update model cards for all EVA models. First model over 90% top-1 (99% top-5)! Check out the original code & weights at https://github.com/baaivision/EVA for more details on their work blending MIM, CLIP w/ many model, dataset, and train recipe tweaks. | model |top1 |top5 |param_count|img_size| |----------------------------------------------------|------|------|-----------|--------| | [eva02_large_patch14_448.mim_m38m_ft_in22k_in1k](https://huggingface.co/timm/eva02_large_patch14_448.mim_m38m_ft_in1k) |90.054|99.042|305.08 |448 | | eva02_large_patch14_448.mim_in22k_ft_in22k_in1k |89.946|99.01 |305.08 |448 | | eva_giant_patch14_560.m30m_ft_in22k_in1k |89.792|98.992|1014.45 |560 | | eva02_large_patch14_448.mim_in22k_ft_in1k |89.626|98.954|305.08 |448 | | eva02_large_patch14_448.mim_m38m_ft_in1k |89.57 |98.918|305.08 |448 | | eva_giant_patch14_336.m30m_ft_in22k_in1k |89.56 |98.956|1013.01 |336 | | eva_giant_patch14_336.clip_ft_in1k |89.466|98.82 |1013.01 |336 | | eva_large_patch14_336.in22k_ft_in22k_in1k |89.214|98.854|304.53 |336 | | eva_giant_patch14_224.clip_ft_in1k |88.882|98.678|1012.56 |224 | | eva02_base_patch14_448.mim_in22k_ft_in22k_in1k |88.692|98.722|87.12 |448 | | eva_large_patch14_336.in22k_ft_in1k |88.652|98.722|304.53 |336 | | eva_large_patch14_196.in22k_ft_in22k_in1k |88.592|98.656|304.14 |196 | | eva02_base_patch14_448.mim_in22k_ft_in1k |88.23 |98.564|87.12 |448 | | eva_large_patch14_196.in22k_ft_in1k |87.934|98.504|304.14 |196 | | eva02_small_patch14_336.mim_in22k_ft_in1k |85.74 |97.614|22.13 |336 | | eva02_tiny_patch14_336.mim_in22k_ft_in1k |80.658|95.524|5.76 |336 | * Multi-weight and HF hub for DeiT and MLP-Mixer based models ### March 22, 2023 * More weights pushed to HF hub along with multi-weight support, including: `regnet.py`, `rexnet.py`, `byobnet.py`, `resnetv2.py`, `swin_transformer.py`, `swin_transformer_v2.py`, `swin_transformer_v2_cr.py` * Swin Transformer models support feature extraction (NCHW feat maps for `swinv2_cr_*`, and NHWC for all others) and spatial embedding outputs. * FocalNet (from https://github.com/microsoft/FocalNet) models and weights added with significant refactoring, feature extraction, no fixed resolution / sizing constraint * RegNet weights increased with HF hub push, SWAG, SEER, and torchvision v2 weights. SEER is pretty poor wrt to performance for model size, but possibly useful. * More ImageNet-12k pretrained and 1k fine-tuned `timm` weights: * `rexnetr_200.sw_in12k_ft_in1k` - 82.6 @ 224, 83.2 @ 288 * `rexnetr_300.sw_in12k_ft_in1k` - 84.0 @ 224, 84.5 @ 288 * `regnety_120.sw_in12k_ft_in1k` - 85.0 @ 224, 85.4 @ 288 * `regnety_160.lion_in12k_ft_in1k` - 85.6 @ 224, 86.0 @ 288 * `regnety_160.sw_in12k_ft_in1k` - 85.6 @ 224, 86.0 @ 288 (compare to SWAG PT + 1k FT this is same BUT much lower res, blows SEER FT away) * Model name deprecation + remapping functionality added (a milestone for bringing 0.8.x out of pre-release). Mappings being added... * Minor bug fixes and improvements. ### Feb 26, 2023 * Add ConvNeXt-XXLarge CLIP pretrained image tower weights for fine-tune & features (fine-tuning TBD) -- see [model card](https://huggingface.co/laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup) * Update `convnext_xxlarge` default LayerNorm eps to 1e-5 (for CLIP weights, improved stability) * 0.8.15dev0 ### Feb 20, 2023 * Add 320x320 `convnext_large_mlp.clip_laion2b_ft_320` and `convnext_large_mlp.clip_laion2b_ft_soup_320` CLIP image tower weights for features & fine-tune * 0.8.13dev0 pypi release for latest changes w/ move to huggingface org ### Feb 16, 2023 * `safetensor` checkpoint support added * Add ideas from 'Scaling Vision Transformers to 22 B. Params' (https://arxiv.org/abs/2302.05442) -- qk norm, RmsNorm, parallel block * Add F.scaled_dot_product_attention support (PyTorch 2.0 only) to `vit_*`, `vit_relpos*`, `coatnet` / `maxxvit` (to start) * Lion optimizer (w/ multi-tensor option) added (https://arxiv.org/abs/2302.06675) * gradient checkpointing works with `features_only=True` ### Feb 7, 2023 * New inference benchmark numbers added in [results](results/) folder. * Add convnext LAION CLIP trained weights and initial set of in1k fine-tunes * `convnext_base.clip_laion2b_augreg_ft_in1k` - 86.2% @ 256x256 * `convnext_base.clip_laiona_augreg_ft_in1k_384` - 86.5% @ 384x384 * `convnext_large_mlp.clip_laion2b_augreg_ft_in1k` - 87.3% @ 256x256 * `convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384` - 87.9% @ 384x384 * Add DaViT models. Supports `features_only=True`. Adapted from https://github.com/dingmyu/davit by [Fredo](https://github.com/fffffgggg54). * Use a common NormMlpClassifierHead across MaxViT, ConvNeXt, DaViT * Add EfficientFormer-V2 model, update EfficientFormer, and refactor LeViT (closely related architectures). Weights on HF hub. * New EfficientFormer-V2 arch, significant refactor from original at (https://github.com/snap-research/EfficientFormer). Supports `features_only=True`. * Minor updates to EfficientFormer. * Refactor LeViT models to stages, add `features_only=True` support to new `conv` variants, weight remap required. * Move ImageNet meta-data (synsets, indices) from `/results` to [`timm/data/_info`](timm/data/_info/). * Add ImageNetInfo / DatasetInfo classes to provide labelling for various ImageNet classifier layouts in `timm` * Update `inference.py` to use, try: `python inference.py --data-dir /folder/to/images --model convnext_small.in12k --label-type detail --topk 5` * Ready for 0.8.10 pypi pre-release (final testing). ### Jan 20, 2023 * Add two convnext 12k -> 1k fine-tunes at 384x384 * `convnext_tiny.in12k_ft_in1k_384` - 85.1 @ 384 * `convnext_small.in12k_ft_in1k_384` - 86.2 @ 384 * Push all MaxxViT weights to HF hub, and add new ImageNet-12k -> 1k fine-tunes for `rw` base MaxViT and CoAtNet 1/2 models |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| ### Jan 11, 2023 * Update ConvNeXt ImageNet-12k pretrain series w/ two new fine-tuned weights (and pre FT `.in12k` tags) * `convnext_nano.in12k_ft_in1k` - 82.3 @ 224, 82.9 @ 288 (previously released) * `convnext_tiny.in12k_ft_in1k` - 84.2 @ 224, 84.5 @ 288 * `convnext_small.in12k_ft_in1k` - 85.2 @ 224, 85.3 @ 288 ### Jan 6, 2023 * Finally got around to adding `--model-kwargs` and `--opt-kwargs` to scripts to pass through rare args directly to model classes from cmd line * `train.py --data-dir /imagenet --model resnet50 --amp --model-kwargs output_stride=16 act_layer=silu` * `train.py --data-dir /imagenet --model vit_base_patch16_clip_224 --img-size 240 --amp --model-kwargs img_size=240 patch_size=12` * Cleanup some popular models to better support arg passthrough / merge with model configs, more to go. ### Jan 5, 2023 * ConvNeXt-V2 models and weights added to existing `convnext.py` * Paper: [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](http://arxiv.org/abs/2301.00808) * Reference impl: https://github.com/facebookresearch/ConvNeXt-V2 (NOTE: weights currently CC-BY-NC) @dataclass ### Dec 23, 2022 🎄☃ * Add FlexiViT models and weights from https://github.com/google-research/big_vision (check out paper at https://arxiv.org/abs/2212.08013) * NOTE currently resizing is static on model creation, on-the-fly dynamic / train patch size sampling is a WIP * Many more models updated to multi-weight and downloadable via HF hub now (convnext, efficientnet, mobilenet, vision_transformer*, beit) * More model pretrained tag and adjustments, some model names changed (working on deprecation translations, consider main branch DEV branch right now, use 0.6.x for stable use) * More ImageNet-12k (subset of 22k) pretrain models popping up: * `efficientnet_b5.in12k_ft_in1k` - 85.9 @ 448x448 * `vit_medium_patch16_gap_384.in12k_ft_in1k` - 85.5 @ 384x384 * `vit_medium_patch16_gap_256.in12k_ft_in1k` - 84.5 @ 256x256 * `convnext_nano.in12k_ft_in1k` - 82.9 @ 288x288 ### Dec 8, 2022 * Add 'EVA l' to `vision_transformer.py`, MAE style ViT-L/14 MIM pretrain w/ EVA-CLIP targets, FT on ImageNet-1k (w/ ImageNet-22k intermediate for some) * original source: https://github.com/baaivision/EVA | model | top1 | param_count | gmac | macts | hub | |:------------------------------------------|-----:|------------:|------:|------:|:----------------------------------------| | eva_large_patch14_336.in22k_ft_in22k_in1k | 89.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) | | eva_large_patch14_336.in22k_ft_in1k | 88.7 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) | | eva_large_patch14_196.in22k_ft_in22k_in1k | 88.6 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) | | eva_large_patch14_196.in22k_ft_in1k | 87.9 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) | ### Dec 6, 2022 * Add 'EVA g', BEiT style ViT-g/14 model weights w/ both MIM pretrain and CLIP pretrain to `beit.py`. * original source: https://github.com/baaivision/EVA * paper: https://arxiv.org/abs/2211.07636 | model | top1 | param_count | gmac | macts | hub | |:-----------------------------------------|-------:|--------------:|-------:|--------:|:----------------------------------------| | eva_giant_patch14_560.m30m_ft_in22k_in1k | 89.8 | 1014.4 | 1906.8 | 2577.2 | [link](https://huggingface.co/BAAI/EVA) | | eva_giant_patch14_336.m30m_ft_in22k_in1k | 89.6 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) | | eva_giant_patch14_336.clip_ft_in1k | 89.4 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) | | eva_giant_patch14_224.clip_ft_in1k | 89.1 | 1012.6 | 267.2 | 192.6 | [link](https://huggingface.co/BAAI/EVA) | ### Dec 5, 2022 * Pre-release (`0.8.0dev0`) of multi-weight support (`model_arch.pretrained_tag`). Install with `pip install --pre timm` * vision_transformer, maxvit, convnext are the first three model impl w/ support * model names are changing with this (previous _21k, etc. fn will merge), still sorting out deprecation handling * bugs are likely, but I need feedback so please try it out * if stability is needed, please use 0.6.x pypi releases or clone from [0.6.x branch](https://github.com/rwightman/pytorch-image-models/tree/0.6.x) * Support for PyTorch 2.0 compile is added in train/validate/inference/benchmark, use `--torchcompile` argument * Inference script allows more control over output, select k for top-class index + prob json, csv or parquet output * Add a full set of fine-tuned CLIP image tower weights from both LAION-2B and original OpenAI CLIP models | model | top1 | param_count | gmac | macts | hub | |:-------------------------------------------------|-------:|--------------:|-------:|--------:|:-------------------------------------------------------------------------------------| | vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k | 88.6 | 632.5 | 391 | 407.5 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_336.openai_ft_in12k_in1k | 88.3 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.openai_ft_in12k_in1k) | | vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k | 88.2 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_336.laion2b_ft_in12k_in1k | 88.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_224.openai_ft_in12k_in1k | 88.2 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in12k_in1k) | | vit_large_patch14_clip_224.laion2b_ft_in12k_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_224.openai_ft_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in1k) | | vit_large_patch14_clip_336.laion2b_ft_in1k | 87.9 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in1k) | | vit_huge_patch14_clip_224.laion2b_ft_in1k | 87.6 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in1k) | | vit_large_patch14_clip_224.laion2b_ft_in1k | 87.3 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in1k) | | vit_base_patch16_clip_384.laion2b_ft_in12k_in1k | 87.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_384.openai_ft_in12k_in1k | 87 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in12k_in1k) | | vit_base_patch16_clip_384.laion2b_ft_in1k | 86.6 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in1k) | | vit_base_patch16_clip_384.openai_ft_in1k | 86.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in1k) | | vit_base_patch16_clip_224.laion2b_ft_in12k_in1k | 86.2 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_224.openai_ft_in12k_in1k | 85.9 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in12k_in1k) | | vit_base_patch32_clip_448.laion2b_ft_in12k_in1k | 85.8 | 88.3 | 17.9 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_224.laion2b_ft_in1k | 85.5 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in1k) | | vit_base_patch32_clip_384.laion2b_ft_in12k_in1k | 85.4 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_224.openai_ft_in1k | 85.3 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in1k) | | vit_base_patch32_clip_384.openai_ft_in12k_in1k | 85.2 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.openai_ft_in12k_in1k) | | vit_base_patch32_clip_224.laion2b_ft_in12k_in1k | 83.3 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in12k_in1k) | | vit_base_patch32_clip_224.laion2b_ft_in1k | 82.6 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in1k) | | vit_base_patch32_clip_224.openai_ft_in1k | 81.9 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.openai_ft_in1k) | * Port of MaxViT Tensorflow Weights from official impl at https://github.com/google-research/maxvit * There was larger than expected drops for the upscaled 384/512 in21k fine-tune weights, possible detail missing, but the 21k FT did seem sensitive to small preprocessing | model | top1 | param_count | gmac | macts | hub | |:-----------------------------------|-------:|--------------:|-------:|--------:|:-----------------------------------------------------------------------| | maxvit_xlarge_tf_512.in21k_ft_in1k | 88.5 | 475.8 | 534.1 | 1413.2 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) | | maxvit_xlarge_tf_384.in21k_ft_in1k | 88.3 | 475.3 | 292.8 | 668.8 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) | | maxvit_base_tf_512.in21k_ft_in1k | 88.2 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) | | maxvit_large_tf_512.in21k_ft_in1k | 88 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) | | maxvit_large_tf_384.in21k_ft_in1k | 88 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) | | maxvit_base_tf_384.in21k_ft_in1k | 87.9 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) | | maxvit_base_tf_512.in1k | 86.6 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in1k) | | maxvit_large_tf_512.in1k | 86.5 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in1k) | | maxvit_base_tf_384.in1k | 86.3 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in1k) | | maxvit_large_tf_384.in1k | 86.2 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in1k) | | maxvit_small_tf_512.in1k | 86.1 | 69.1 | 67.3 | 383.8 | [link](https://huggingface.co/timm/maxvit_small_tf_512.in1k) | | maxvit_tiny_tf_512.in1k | 85.7 | 31 | 33.5 | 257.6 | [link](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) | | maxvit_small_tf_384.in1k | 85.5 | 69 | 35.9 | 183.6 | [link](https://huggingface.co/timm/maxvit_small_tf_384.in1k) | | maxvit_tiny_tf_384.in1k | 85.1 | 31 | 17.5 | 123.4 | [link](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) | | maxvit_large_tf_224.in1k | 84.9 | 211.8 | 43.7 | 127.4 | [link](https://huggingface.co/timm/maxvit_large_tf_224.in1k) | | maxvit_base_tf_224.in1k | 84.9 | 119.5 | 24 | 95 | [link](https://huggingface.co/timm/maxvit_base_tf_224.in1k) | | maxvit_small_tf_224.in1k | 84.4 | 68.9 | 11.7 | 53.2 | [link](https://huggingface.co/timm/maxvit_small_tf_224.in1k) | | maxvit_tiny_tf_224.in1k | 83.4 | 30.9 | 5.6 | 35.8 | [link](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) | ### Oct 15, 2022 * Train and validation script enhancements * Non-GPU (ie CPU) device support * SLURM compatibility for train script * HF datasets support (via ReaderHfds) * TFDS/WDS dataloading improvements (sample padding/wrap for distributed use fixed wrt sample count estimate) * in_chans !=3 support for scripts / loader * Adan optimizer * Can enable per-step LR scheduling via args * Dataset 'parsers' renamed to 'readers', more descriptive of purpose * AMP args changed, APEX via `--amp-impl apex`, bfloat16 supportedf via `--amp-dtype bfloat16` * main branch switched to 0.7.x version, 0.6x forked for stable release of weight only adds * master -> main branch rename ### Oct 10, 2022 * More weights in `maxxvit` series, incl first ConvNeXt block based `coatnext` and `maxxvit` experiments: * `coatnext_nano_rw_224` - 82.0 @ 224 (G) -- (uses ConvNeXt conv block, no BatchNorm) * `maxxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.7 @ 320 (G) (uses ConvNeXt conv block, no BN) * `maxvit_rmlp_small_rw_224` - 84.5 @ 224, 85.1 @ 320 (G) * `maxxvit_rmlp_small_rw_256` - 84.6 @ 256, 84.9 @ 288 (G) -- could be trained better, hparams need tuning (uses ConvNeXt block, no BN) * `coatnet_rmlp_2_rw_224` - 84.6 @ 224, 85 @ 320 (T) * NOTE: official MaxVit weights (in1k) have been released at https://github.com/google-research/maxvit -- some extra work is needed to port and adapt since my impl was created independently of theirs and has a few small differences + the whole TF same padding fun. ### Sept 23, 2022 * LAION-2B CLIP image towers supported as pretrained backbones for fine-tune or features (no classifier) * vit_base_patch32_224_clip_laion2b * vit_large_patch14_224_clip_laion2b * vit_huge_patch14_224_clip_laion2b * vit_giant_patch14_224_clip_laion2b ### Sept 7, 2022 * Hugging Face [`timm` docs](https://huggingface.co/docs/hub/timm) home now exists, look for more here in the future * Add BEiT-v2 weights for base and large 224x224 models from https://github.com/microsoft/unilm/tree/master/beit2 * Add more weights in `maxxvit` series incl a `pico` (7.5M params, 1.9 GMACs), two `tiny` variants: * `maxvit_rmlp_pico_rw_256` - 80.5 @ 256, 81.3 @ 320 (T) * `maxvit_tiny_rw_224` - 83.5 @ 224 (G) * `maxvit_rmlp_tiny_rw_256` - 84.2 @ 256, 84.8 @ 320 (T) ### Aug 29, 2022 * MaxVit window size scales with img_size by default. Add new RelPosMlp MaxViT weight that leverages this: * `maxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.6 @ 320 (T) ### Aug 26, 2022 * CoAtNet (https://arxiv.org/abs/2106.04803) and MaxVit (https://arxiv.org/abs/2204.01697) `timm` original models * both found in [`maxxvit.py`](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/maxxvit.py) model def, contains numerous experiments outside scope of original papers * an unfinished Tensorflow version from MaxVit authors can be found https://github.com/google-research/maxvit * Initial CoAtNet and MaxVit timm pretrained weights (working on more): * `coatnet_nano_rw_224` - 81.7 @ 224 (T) * `coatnet_rmlp_nano_rw_224` - 82.0 @ 224, 82.8 @ 320 (T) * `coatnet_0_rw_224` - 82.4 (T) -- NOTE timm '0' coatnets have 2 more 3rd stage blocks * `coatnet_bn_0_rw_224` - 82.4 (T) * `maxvit_nano_rw_256` - 82.9 @ 256 (T) * `coatnet_rmlp_1_rw_224` - 83.4 @ 224, 84 @ 320 (T) * `coatnet_1_rw_224` - 83.6 @ 224 (G) * (T) = TPU trained with `bits_and_tpu` branch training code, (G) = GPU trained * GCVit (weights adapted from https://github.com/NVlabs/GCVit, code 100% `timm` re-write for license purposes) * MViT-V2 (multi-scale vit, adapted from https://github.com/facebookresearch/mvit) * EfficientFormer (adapted from https://github.com/snap-research/EfficientFormer) * PyramidVisionTransformer-V2 (adapted from https://github.com/whai362/PVT) * 'Fast Norm' support for LayerNorm and GroupNorm that avoids float32 upcast w/ AMP (uses APEX LN if available for further boost) ### Aug 15, 2022 * ConvNeXt atto weights added * `convnext_atto` - 75.7 @ 224, 77.0 @ 288 * `convnext_atto_ols` - 75.9 @ 224, 77.2 @ 288 ### Aug 5, 2022 * More custom ConvNeXt smaller model defs with weights * `convnext_femto` - 77.5 @ 224, 78.7 @ 288 * `convnext_femto_ols` - 77.9 @ 224, 78.9 @ 288 * `convnext_pico` - 79.5 @ 224, 80.4 @ 288 * `convnext_pico_ols` - 79.5 @ 224, 80.5 @ 288 * `convnext_nano_ols` - 80.9 @ 224, 81.6 @ 288 * Updated EdgeNeXt to improve ONNX export, add new base variant and weights from original (https://github.com/mmaaz60/EdgeNeXt) ### July 28, 2022 * Add freshly minted DeiT-III Medium (width=512, depth=12, num_heads=8) model weights. Thanks [Hugo Touvron](https://github.com/TouvronHugo)! ### July 27, 2022 * All runtime benchmark and validation result csv files are finally up-to-date! * A few more weights & model defs added: * `darknetaa53` - 79.8 @ 256, 80.5 @ 288 * `convnext_nano` - 80.8 @ 224, 81.5 @ 288 * `cs3sedarknet_l` - 81.2 @ 256, 81.8 @ 288 * `cs3darknet_x` - 81.8 @ 256, 82.2 @ 288 * `cs3sedarknet_x` - 82.2 @ 256, 82.7 @ 288 * `cs3edgenet_x` - 82.2 @ 256, 82.7 @ 288 * `cs3se_edgenet_x` - 82.8 @ 256, 83.5 @ 320 * `cs3*` weights above all trained on TPU w/ `bits_and_tpu` branch. Thanks to TRC program! * Add output_stride=8 and 16 support to ConvNeXt (dilation) * deit3 models not being able to resize pos_emb fixed * Version 0.6.7 PyPi release (/w above bug fixes and new weighs since 0.6.5) ### July 8, 2022 More models, more fixes * Official research models (w/ weights) added: * EdgeNeXt from (https://github.com/mmaaz60/EdgeNeXt) * MobileViT-V2 from (https://github.com/apple/ml-cvnets) * DeiT III (Revenge of the ViT) from (https://github.com/facebookresearch/deit) * My own models: * Small `ResNet` defs added by request with 1 block repeats for both basic and bottleneck (resnet10 and resnet14) * `CspNet` refactored with dataclass config, simplified CrossStage3 (`cs3`) option. These are closer to YOLO-v5+ backbone defs. * More relative position vit fiddling. Two `srelpos` (shared relative position) models trained, and a medium w/ class token. * Add an alternate downsample mode to EdgeNeXt and train a `small` model. Better than original small, but not their new USI trained weights. * My own model weight results (all ImageNet-1k training) * `resnet10t` - 66.5 @ 176, 68.3 @ 224 * `resnet14t` - 71.3 @ 176, 72.3 @ 224 * `resnetaa50` - 80.6 @ 224 , 81.6 @ 288 * `darknet53` - 80.0 @ 256, 80.5 @ 288 * `cs3darknet_m` - 77.0 @ 256, 77.6 @ 288 * `cs3darknet_focus_m` - 76.7 @ 256, 77.3 @ 288 * `cs3darknet_l` - 80.4 @ 256, 80.9 @ 288 * `cs3darknet_focus_l` - 80.3 @ 256, 80.9 @ 288 * `vit_srelpos_small_patch16_224` - 81.1 @ 224, 82.1 @ 320 * `vit_srelpos_medium_patch16_224` - 82.3 @ 224, 83.1 @ 320 * `vit_relpos_small_patch16_cls_224` - 82.6 @ 224, 83.6 @ 320 * `edgnext_small_rw` - 79.6 @ 224, 80.4 @ 320 * `cs3`, `darknet`, and `vit_*relpos` weights above all trained on TPU thanks to TRC program! Rest trained on overheating GPUs. * Hugging Face Hub support fixes verified, demo notebook TBA * Pretrained weights / configs can be loaded externally (ie from local disk) w/ support for head adaptation. * Add support to change image extensions scanned by `timm` datasets/readers. See (https://github.com/rwightman/pytorch-image-models/pull/1274#issuecomment-1178303103) * Default ConvNeXt LayerNorm impl to use `F.layer_norm(x.permute(0, 2, 3, 1), ...).permute(0, 3, 1, 2)` via `LayerNorm2d` in all cases. * a bit slower than previous custom impl on some hardware (ie Ampere w/ CL), but overall fewer regressions across wider HW / PyTorch version ranges. * previous impl exists as `LayerNormExp2d` in `models/layers/norm.py` * Numerous bug fixes * Currently testing for imminent PyPi 0.6.x release * LeViT pretraining of larger models still a WIP, they don't train well / easily without distillation. Time to add distill support (finally)? * ImageNet-22k weight training + finetune ongoing, work on multi-weight support (slowly) chugging along (there are a LOT of weights, sigh) ... ### May 13, 2022 * Official Swin-V2 models and weights added from (https://github.com/microsoft/Swin-Transformer). Cleaned up to support torchscript. * Some refactoring for existing `timm` Swin-V2-CR impl, will likely do a bit more to bring parts closer to official and decide whether to merge some aspects. * More Vision Transformer relative position / residual post-norm experiments (all trained on TPU thanks to TRC program) * `vit_relpos_small_patch16_224` - 81.5 @ 224, 82.5 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_relpos_medium_patch16_rpn_224` - 82.3 @ 224, 83.1 @ 320 -- rel pos + res-post-norm, no class token, avg pool * `vit_relpos_medium_patch16_224` - 82.5 @ 224, 83.3 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_relpos_base_patch16_gapcls_224` - 82.8 @ 224, 83.9 @ 320 -- rel pos, layer scale, class token, avg pool (by mistake) * Bring 512 dim, 8-head 'medium' ViT model variant back to life (after using in a pre DeiT 'small' model for first ViT impl back in 2020) * Add ViT relative position support for switching btw existing impl and some additions in official Swin-V2 impl for future trials * Sequencer2D impl (https://arxiv.org/abs/2205.01972), added via PR from author (https://github.com/okojoalg) ### May 2, 2022 * Vision Transformer experiments adding Relative Position (Swin-V2 log-coord) (`vision_transformer_relpos.py`) and Residual Post-Norm branches (from Swin-V2) (`vision_transformer*.py`) * `vit_relpos_base_patch32_plus_rpn_256` - 79.5 @ 256, 80.6 @ 320 -- rel pos + extended width + res-post-norm, no class token, avg pool * `vit_relpos_base_patch16_224` - 82.5 @ 224, 83.6 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_base_patch16_rpn_224` - 82.3 @ 224 -- rel pos + res-post-norm, no class token, avg pool * Vision Transformer refactor to remove representation layer that was only used in initial vit and rarely used since with newer pretrain (ie `How to Train Your ViT`) * `vit_*` models support removal of class token, use of global average pool, use of fc_norm (ala beit, mae). ### April 22, 2022 * `timm` models are now officially supported in [fast.ai](https://www.fast.ai/)! Just in time for the new Practical Deep Learning course. `timmdocs` documentation link updated to [timm.fast.ai](http://timm.fast.ai/). * Two more model weights added in the TPU trained [series](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights). Some In22k pretrain still in progress. * `seresnext101d_32x8d` - 83.69 @ 224, 84.35 @ 288 * `seresnextaa101d_32x8d` (anti-aliased w/ AvgPool2d) - 83.85 @ 224, 84.57 @ 288 ### March 23, 2022 * Add `ParallelBlock` and `LayerScale` option to base vit models to support model configs in [Three things everyone should know about ViT](https://arxiv.org/abs/2203.09795) * `convnext_tiny_hnf` (head norm first) weights trained with (close to) A2 recipe, 82.2% top-1, could do better with more epochs. ### March 21, 2022 * Merge `norm_norm_norm`. **IMPORTANT** this update for a coming 0.6.x release will likely de-stabilize the master branch for a while. Branch [`0.5.x`](https://github.com/rwightman/pytorch-image-models/tree/0.5.x) or a previous 0.5.x release can be used if stability is required. * Significant weights update (all TPU trained) as described in this [release](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights) * `regnety_040` - 82.3 @ 224, 82.96 @ 288 * `regnety_064` - 83.0 @ 224, 83.65 @ 288 * `regnety_080` - 83.17 @ 224, 83.86 @ 288 * `regnetv_040` - 82.44 @ 224, 83.18 @ 288 (timm pre-act) * `regnetv_064` - 83.1 @ 224, 83.71 @ 288 (timm pre-act) * `regnetz_040` - 83.67 @ 256, 84.25 @ 320 * `regnetz_040h` - 83.77 @ 256, 84.5 @ 320 (w/ extra fc in head) * `resnetv2_50d_gn` - 80.8 @ 224, 81.96 @ 288 (pre-act GroupNorm) * `resnetv2_50d_evos` 80.77 @ 224, 82.04 @ 288 (pre-act EvoNormS) * `regnetz_c16_evos` - 81.9 @ 256, 82.64 @ 320 (EvoNormS) * `regnetz_d8_evos` - 83.42 @ 256, 84.04 @ 320 (EvoNormS) * `xception41p` - 82 @ 299 (timm pre-act) * `xception65` - 83.17 @ 299 * `xception65p` - 83.14 @ 299 (timm pre-act) * `resnext101_64x4d` - 82.46 @ 224, 83.16 @ 288 * `seresnext101_32x8d` - 83.57 @ 224, 84.270 @ 288 * `resnetrs200` - 83.85 @ 256, 84.44 @ 320 * HuggingFace hub support fixed w/ initial groundwork for allowing alternative 'config sources' for pretrained model definitions and weights (generic local file / remote url support soon) * SwinTransformer-V2 implementation added. Submitted by [Christoph Reich](https://github.com/ChristophReich1996). Training experiments and model changes by myself are ongoing so expect compat breaks. * Swin-S3 (AutoFormerV2) models / weights added from https://github.com/microsoft/Cream/tree/main/AutoFormerV2 * MobileViT models w/ weights adapted from https://github.com/apple/ml-cvnets * PoolFormer models w/ weights adapted from https://github.com/sail-sg/poolformer * VOLO models w/ weights adapted from https://github.com/sail-sg/volo * Significant work experimenting with non-BatchNorm norm layers such as EvoNorm, FilterResponseNorm, GroupNorm, etc * Enhance support for alternate norm + act ('NormAct') layers added to a number of models, esp EfficientNet/MobileNetV3, RegNet, and aligned Xception * Grouped conv support added to EfficientNet family * Add 'group matching' API to all models to allow grouping model parameters for application of 'layer-wise' LR decay, lr scale added to LR scheduler * Gradient checkpointing support added to many models * `forward_head(x, pre_logits=False)` fn added to all models to allow separate calls of `forward_features` + `forward_head` * All vision transformer and vision MLP models update to return non-pooled / non-token selected features from `forward_features`, for consistency with CNN models, token selection or pooling now applied in `forward_head` ### Feb 2, 2022 * [Chris Hughes](https://github.com/Chris-hughes10) posted an exhaustive run through of `timm` on his blog yesterday. Well worth a read. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055) * I'm currently prepping to merge the `norm_norm_norm` branch back to master (ver 0.6.x) in next week or so. * The changes are more extensive than usual and may destabilize and break some model API use (aiming for full backwards compat). So, beware `pip install git+https://github.com/rwightman/pytorch-image-models` installs! * `0.5.x` releases and a `0.5.x` branch will remain stable with a cherry pick or two until dust clears. Recommend sticking to pypi install for a bit if you want stable. ### Jan 14, 2022 * Version 0.5.4 w/ release to be pushed to pypi. It's been a while since last pypi update and riskier changes will be merged to main branch soon.... * Add ConvNeXT models /w weights from official impl (https://github.com/facebookresearch/ConvNeXt), a few perf tweaks, compatible with timm features * Tried training a few small (~1.8-3M param) / mobile optimized models, a few are good so far, more on the way... * `mnasnet_small` - 65.6 top-1 * `mobilenetv2_050` - 65.9 * `lcnet_100/075/050` - 72.1 / 68.8 / 63.1 * `semnasnet_075` - 73 * `fbnetv3_b/d/g` - 79.1 / 79.7 / 82.0 * TinyNet models added by [rsomani95](https://github.com/rsomani95) * LCNet added via MobileNetV3 architecture ### Jan 5, 2023 * ConvNeXt-V2 models and weights added to existing `convnext.py` * Paper: [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](http://arxiv.org/abs/2301.00808) * Reference impl: https://github.com/facebookresearch/ConvNeXt-V2 (NOTE: weights currently CC-BY-NC) ### Dec 23, 2022 🎄☃ * Add FlexiViT models and weights from https://github.com/google-research/big_vision (check out paper at https://arxiv.org/abs/2212.08013) * NOTE currently resizing is static on model creation, on-the-fly dynamic / train patch size sampling is a WIP * Many more models updated to multi-weight and downloadable via HF hub now (convnext, efficientnet, mobilenet, vision_transformer*, beit) * More model pretrained tag and adjustments, some model names changed (working on deprecation translations, consider main branch DEV branch right now, use 0.6.x for stable use) * More ImageNet-12k (subset of 22k) pretrain models popping up: * `efficientnet_b5.in12k_ft_in1k` - 85.9 @ 448x448 * `vit_medium_patch16_gap_384.in12k_ft_in1k` - 85.5 @ 384x384 * `vit_medium_patch16_gap_256.in12k_ft_in1k` - 84.5 @ 256x256 * `convnext_nano.in12k_ft_in1k` - 82.9 @ 288x288 ### Dec 8, 2022 * Add 'EVA l' to `vision_transformer.py`, MAE style ViT-L/14 MIM pretrain w/ EVA-CLIP targets, FT on ImageNet-1k (w/ ImageNet-22k intermediate for some) * original source: https://github.com/baaivision/EVA | model | top1 | param_count | gmac | macts | hub | |:------------------------------------------|-----:|------------:|------:|------:|:----------------------------------------| | eva_large_patch14_336.in22k_ft_in22k_in1k | 89.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) | | eva_large_patch14_336.in22k_ft_in1k | 88.7 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) | | eva_large_patch14_196.in22k_ft_in22k_in1k | 88.6 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) | | eva_large_patch14_196.in22k_ft_in1k | 87.9 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) | ### Dec 6, 2022 * Add 'EVA g', BEiT style ViT-g/14 model weights w/ both MIM pretrain and CLIP pretrain to `beit.py`. * original source: https://github.com/baaivision/EVA * paper: https://arxiv.org/abs/2211.07636 | model | top1 | param_count | gmac | macts | hub | |:-----------------------------------------|-------:|--------------:|-------:|--------:|:----------------------------------------| | eva_giant_patch14_560.m30m_ft_in22k_in1k | 89.8 | 1014.4 | 1906.8 | 2577.2 | [link](https://huggingface.co/BAAI/EVA) | | eva_giant_patch14_336.m30m_ft_in22k_in1k | 89.6 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) | | eva_giant_patch14_336.clip_ft_in1k | 89.4 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) | | eva_giant_patch14_224.clip_ft_in1k | 89.1 | 1012.6 | 267.2 | 192.6 | [link](https://huggingface.co/BAAI/EVA) | ### Dec 5, 2022 * Pre-release (`0.8.0dev0`) of multi-weight support (`model_arch.pretrained_tag`). Install with `pip install --pre timm` * vision_transformer, maxvit, convnext are the first three model impl w/ support * model names are changing with this (previous _21k, etc. fn will merge), still sorting out deprecation handling * bugs are likely, but I need feedback so please try it out * if stability is needed, please use 0.6.x pypi releases or clone from [0.6.x branch](https://github.com/rwightman/pytorch-image-models/tree/0.6.x) * Support for PyTorch 2.0 compile is added in train/validate/inference/benchmark, use `--torchcompile` argument * Inference script allows more control over output, select k for top-class index + prob json, csv or parquet output * Add a full set of fine-tuned CLIP image tower weights from both LAION-2B and original OpenAI CLIP models | model | top1 | param_count | gmac | macts | hub | |:-------------------------------------------------|-------:|--------------:|-------:|--------:|:-------------------------------------------------------------------------------------| | vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k | 88.6 | 632.5 | 391 | 407.5 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_336.openai_ft_in12k_in1k | 88.3 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.openai_ft_in12k_in1k) | | vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k | 88.2 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_336.laion2b_ft_in12k_in1k | 88.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_224.openai_ft_in12k_in1k | 88.2 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in12k_in1k) | | vit_large_patch14_clip_224.laion2b_ft_in12k_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in12k_in1k) | | vit_large_patch14_clip_224.openai_ft_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in1k) | | vit_large_patch14_clip_336.laion2b_ft_in1k | 87.9 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in1k) | | vit_huge_patch14_clip_224.laion2b_ft_in1k | 87.6 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in1k) | | vit_large_patch14_clip_224.laion2b_ft_in1k | 87.3 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in1k) | | vit_base_patch16_clip_384.laion2b_ft_in12k_in1k | 87.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_384.openai_ft_in12k_in1k | 87 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in12k_in1k) | | vit_base_patch16_clip_384.laion2b_ft_in1k | 86.6 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in1k) | | vit_base_patch16_clip_384.openai_ft_in1k | 86.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in1k) | | vit_base_patch16_clip_224.laion2b_ft_in12k_in1k | 86.2 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_224.openai_ft_in12k_in1k | 85.9 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in12k_in1k) | | vit_base_patch32_clip_448.laion2b_ft_in12k_in1k | 85.8 | 88.3 | 17.9 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_224.laion2b_ft_in1k | 85.5 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in1k) | | vit_base_patch32_clip_384.laion2b_ft_in12k_in1k | 85.4 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.laion2b_ft_in12k_in1k) | | vit_base_patch16_clip_224.openai_ft_in1k | 85.3 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in1k) | | vit_base_patch32_clip_384.openai_ft_in12k_in1k | 85.2 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.openai_ft_in12k_in1k) | | vit_base_patch32_clip_224.laion2b_ft_in12k_in1k | 83.3 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in12k_in1k) | | vit_base_patch32_clip_224.laion2b_ft_in1k | 82.6 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in1k) | | vit_base_patch32_clip_224.openai_ft_in1k | 81.9 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.openai_ft_in1k) | * Port of MaxViT Tensorflow Weights from official impl at https://github.com/google-research/maxvit * There was larger than expected drops for the upscaled 384/512 in21k fine-tune weights, possible detail missing, but the 21k FT did seem sensitive to small preprocessing | model | top1 | param_count | gmac | macts | hub | |:-----------------------------------|-------:|--------------:|-------:|--------:|:-----------------------------------------------------------------------| | maxvit_xlarge_tf_512.in21k_ft_in1k | 88.5 | 475.8 | 534.1 | 1413.2 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) | | maxvit_xlarge_tf_384.in21k_ft_in1k | 88.3 | 475.3 | 292.8 | 668.8 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) | | maxvit_base_tf_512.in21k_ft_in1k | 88.2 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) | | maxvit_large_tf_512.in21k_ft_in1k | 88 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) | | maxvit_large_tf_384.in21k_ft_in1k | 88 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) | | maxvit_base_tf_384.in21k_ft_in1k | 87.9 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) | | maxvit_base_tf_512.in1k | 86.6 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in1k) | | maxvit_large_tf_512.in1k | 86.5 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in1k) | | maxvit_base_tf_384.in1k | 86.3 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in1k) | | maxvit_large_tf_384.in1k | 86.2 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in1k) | | maxvit_small_tf_512.in1k | 86.1 | 69.1 | 67.3 | 383.8 | [link](https://huggingface.co/timm/maxvit_small_tf_512.in1k) | | maxvit_tiny_tf_512.in1k | 85.7 | 31 | 33.5 | 257.6 | [link](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) | | maxvit_small_tf_384.in1k | 85.5 | 69 | 35.9 | 183.6 | [link](https://huggingface.co/timm/maxvit_small_tf_384.in1k) | | maxvit_tiny_tf_384.in1k | 85.1 | 31 | 17.5 | 123.4 | [link](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) | | maxvit_large_tf_224.in1k | 84.9 | 211.8 | 43.7 | 127.4 | [link](https://huggingface.co/timm/maxvit_large_tf_224.in1k) | | maxvit_base_tf_224.in1k | 84.9 | 119.5 | 24 | 95 | [link](https://huggingface.co/timm/maxvit_base_tf_224.in1k) | | maxvit_small_tf_224.in1k | 84.4 | 68.9 | 11.7 | 53.2 | [link](https://huggingface.co/timm/maxvit_small_tf_224.in1k) | | maxvit_tiny_tf_224.in1k | 83.4 | 30.9 | 5.6 | 35.8 | [link](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) | ### Oct 15, 2022 * Train and validation script enhancements * Non-GPU (ie CPU) device support * SLURM compatibility for train script * HF datasets support (via ReaderHfds) * TFDS/WDS dataloading improvements (sample padding/wrap for distributed use fixed wrt sample count estimate) * in_chans !=3 support for scripts / loader * Adan optimizer * Can enable per-step LR scheduling via args * Dataset 'parsers' renamed to 'readers', more descriptive of purpose * AMP args changed, APEX via `--amp-impl apex`, bfloat16 supportedf via `--amp-dtype bfloat16` * main branch switched to 0.7.x version, 0.6x forked for stable release of weight only adds * master -> main branch rename ### Oct 10, 2022 * More weights in `maxxvit` series, incl first ConvNeXt block based `coatnext` and `maxxvit` experiments: * `coatnext_nano_rw_224` - 82.0 @ 224 (G) -- (uses ConvNeXt conv block, no BatchNorm) * `maxxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.7 @ 320 (G) (uses ConvNeXt conv block, no BN) * `maxvit_rmlp_small_rw_224` - 84.5 @ 224, 85.1 @ 320 (G) * `maxxvit_rmlp_small_rw_256` - 84.6 @ 256, 84.9 @ 288 (G) -- could be trained better, hparams need tuning (uses ConvNeXt block, no BN) * `coatnet_rmlp_2_rw_224` - 84.6 @ 224, 85 @ 320 (T) * NOTE: official MaxVit weights (in1k) have been released at https://github.com/google-research/maxvit -- some extra work is needed to port and adapt since my impl was created independently of theirs and has a few small differences + the whole TF same padding fun. ### Sept 23, 2022 * LAION-2B CLIP image towers supported as pretrained backbones for fine-tune or features (no classifier) * vit_base_patch32_224_clip_laion2b * vit_large_patch14_224_clip_laion2b * vit_huge_patch14_224_clip_laion2b * vit_giant_patch14_224_clip_laion2b ### Sept 7, 2022 * Hugging Face [`timm` docs](https://huggingface.co/docs/hub/timm) home now exists, look for more here in the future * Add BEiT-v2 weights for base and large 224x224 models from https://github.com/microsoft/unilm/tree/master/beit2 * Add more weights in `maxxvit` series incl a `pico` (7.5M params, 1.9 GMACs), two `tiny` variants: * `maxvit_rmlp_pico_rw_256` - 80.5 @ 256, 81.3 @ 320 (T) * `maxvit_tiny_rw_224` - 83.5 @ 224 (G) * `maxvit_rmlp_tiny_rw_256` - 84.2 @ 256, 84.8 @ 320 (T) ### Aug 29, 2022 * MaxVit window size scales with img_size by default. Add new RelPosMlp MaxViT weight that leverages this: * `maxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.6 @ 320 (T) ### Aug 26, 2022 * CoAtNet (https://arxiv.org/abs/2106.04803) and MaxVit (https://arxiv.org/abs/2204.01697) `timm` original models * both found in [`maxxvit.py`](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/maxxvit.py) model def, contains numerous experiments outside scope of original papers * an unfinished Tensorflow version from MaxVit authors can be found https://github.com/google-research/maxvit * Initial CoAtNet and MaxVit timm pretrained weights (working on more): * `coatnet_nano_rw_224` - 81.7 @ 224 (T) * `coatnet_rmlp_nano_rw_224` - 82.0 @ 224, 82.8 @ 320 (T) * `coatnet_0_rw_224` - 82.4 (T) -- NOTE timm '0' coatnets have 2 more 3rd stage blocks * `coatnet_bn_0_rw_224` - 82.4 (T) * `maxvit_nano_rw_256` - 82.9 @ 256 (T) * `coatnet_rmlp_1_rw_224` - 83.4 @ 224, 84 @ 320 (T) * `coatnet_1_rw_224` - 83.6 @ 224 (G) * (T) = TPU trained with `bits_and_tpu` branch training code, (G) = GPU trained * GCVit (weights adapted from https://github.com/NVlabs/GCVit, code 100% `timm` re-write for license purposes) * MViT-V2 (multi-scale vit, adapted from https://github.com/facebookresearch/mvit) * EfficientFormer (adapted from https://github.com/snap-research/EfficientFormer) * PyramidVisionTransformer-V2 (adapted from https://github.com/whai362/PVT) * 'Fast Norm' support for LayerNorm and GroupNorm that avoids float32 upcast w/ AMP (uses APEX LN if available for further boost) ### Aug 15, 2022 * ConvNeXt atto weights added * `convnext_atto` - 75.7 @ 224, 77.0 @ 288 * `convnext_atto_ols` - 75.9 @ 224, 77.2 @ 288 ### Aug 5, 2022 * More custom ConvNeXt smaller model defs with weights * `convnext_femto` - 77.5 @ 224, 78.7 @ 288 * `convnext_femto_ols` - 77.9 @ 224, 78.9 @ 288 * `convnext_pico` - 79.5 @ 224, 80.4 @ 288 * `convnext_pico_ols` - 79.5 @ 224, 80.5 @ 288 * `convnext_nano_ols` - 80.9 @ 224, 81.6 @ 288 * Updated EdgeNeXt to improve ONNX export, add new base variant and weights from original (https://github.com/mmaaz60/EdgeNeXt) ### July 28, 2022 * Add freshly minted DeiT-III Medium (width=512, depth=12, num_heads=8) model weights. Thanks [Hugo Touvron](https://github.com/TouvronHugo)! ### July 27, 2022 * All runtime benchmark and validation result csv files are up-to-date! * A few more weights & model defs added: * `darknetaa53` - 79.8 @ 256, 80.5 @ 288 * `convnext_nano` - 80.8 @ 224, 81.5 @ 288 * `cs3sedarknet_l` - 81.2 @ 256, 81.8 @ 288 * `cs3darknet_x` - 81.8 @ 256, 82.2 @ 288 * `cs3sedarknet_x` - 82.2 @ 256, 82.7 @ 288 * `cs3edgenet_x` - 82.2 @ 256, 82.7 @ 288 * `cs3se_edgenet_x` - 82.8 @ 256, 83.5 @ 320 * `cs3*` weights above all trained on TPU w/ `bits_and_tpu` branch. Thanks to TRC program! * Add output_stride=8 and 16 support to ConvNeXt (dilation) * deit3 models not being able to resize pos_emb fixed * Version 0.6.7 PyPi release (/w above bug fixes and new weighs since 0.6.5) ### July 8, 2022 More models, more fixes * Official research models (w/ weights) added: * EdgeNeXt from (https://github.com/mmaaz60/EdgeNeXt) * MobileViT-V2 from (https://github.com/apple/ml-cvnets) * DeiT III (Revenge of the ViT) from (https://github.com/facebookresearch/deit) * My own models: * Small `ResNet` defs added by request with 1 block repeats for both basic and bottleneck (resnet10 and resnet14) * `CspNet` refactored with dataclass config, simplified CrossStage3 (`cs3`) option. These are closer to YOLO-v5+ backbone defs. * More relative position vit fiddling. Two `srelpos` (shared relative position) models trained, and a medium w/ class token. * Add an alternate downsample mode to EdgeNeXt and train a `small` model. Better than original small, but not their new USI trained weights. * My own model weight results (all ImageNet-1k training) * `resnet10t` - 66.5 @ 176, 68.3 @ 224 * `resnet14t` - 71.3 @ 176, 72.3 @ 224 * `resnetaa50` - 80.6 @ 224 , 81.6 @ 288 * `darknet53` - 80.0 @ 256, 80.5 @ 288 * `cs3darknet_m` - 77.0 @ 256, 77.6 @ 288 * `cs3darknet_focus_m` - 76.7 @ 256, 77.3 @ 288 * `cs3darknet_l` - 80.4 @ 256, 80.9 @ 288 * `cs3darknet_focus_l` - 80.3 @ 256, 80.9 @ 288 * `vit_srelpos_small_patch16_224` - 81.1 @ 224, 82.1 @ 320 * `vit_srelpos_medium_patch16_224` - 82.3 @ 224, 83.1 @ 320 * `vit_relpos_small_patch16_cls_224` - 82.6 @ 224, 83.6 @ 320 * `edgnext_small_rw` - 79.6 @ 224, 80.4 @ 320 * `cs3`, `darknet`, and `vit_*relpos` weights above all trained on TPU thanks to TRC program! Rest trained on overheating GPUs. * Hugging Face Hub support fixes verified, demo notebook TBA * Pretrained weights / configs can be loaded externally (ie from local disk) w/ support for head adaptation. * Add support to change image extensions scanned by `timm` datasets/parsers. See (https://github.com/rwightman/pytorch-image-models/pull/1274#issuecomment-1178303103) * Default ConvNeXt LayerNorm impl to use `F.layer_norm(x.permute(0, 2, 3, 1), ...).permute(0, 3, 1, 2)` via `LayerNorm2d` in all cases. * a bit slower than previous custom impl on some hardware (ie Ampere w/ CL), but overall fewer regressions across wider HW / PyTorch version ranges. * previous impl exists as `LayerNormExp2d` in `models/layers/norm.py` * Numerous bug fixes * Currently testing for imminent PyPi 0.6.x release * LeViT pretraining of larger models still a WIP, they don't train well / easily without distillation. Time to add distill support (finally)? * ImageNet-22k weight training + finetune ongoing, work on multi-weight support (slowly) chugging along (there are a LOT of weights, sigh) ... ### May 13, 2022 * Official Swin-V2 models and weights added from (https://github.com/microsoft/Swin-Transformer). Cleaned up to support torchscript. * Some refactoring for existing `timm` Swin-V2-CR impl, will likely do a bit more to bring parts closer to official and decide whether to merge some aspects. * More Vision Transformer relative position / residual post-norm experiments (all trained on TPU thanks to TRC program) * `vit_relpos_small_patch16_224` - 81.5 @ 224, 82.5 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_relpos_medium_patch16_rpn_224` - 82.3 @ 224, 83.1 @ 320 -- rel pos + res-post-norm, no class token, avg pool * `vit_relpos_medium_patch16_224` - 82.5 @ 224, 83.3 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_relpos_base_patch16_gapcls_224` - 82.8 @ 224, 83.9 @ 320 -- rel pos, layer scale, class token, avg pool (by mistake) * Bring 512 dim, 8-head 'medium' ViT model variant back to life (after using in a pre DeiT 'small' model for first ViT impl back in 2020) * Add ViT relative position support for switching btw existing impl and some additions in official Swin-V2 impl for future trials * Sequencer2D impl (https://arxiv.org/abs/2205.01972), added via PR from author (https://github.com/okojoalg) ### May 2, 2022 * Vision Transformer experiments adding Relative Position (Swin-V2 log-coord) (`vision_transformer_relpos.py`) and Residual Post-Norm branches (from Swin-V2) (`vision_transformer*.py`) * `vit_relpos_base_patch32_plus_rpn_256` - 79.5 @ 256, 80.6 @ 320 -- rel pos + extended width + res-post-norm, no class token, avg pool * `vit_relpos_base_patch16_224` - 82.5 @ 224, 83.6 @ 320 -- rel pos, layer scale, no class token, avg pool * `vit_base_patch16_rpn_224` - 82.3 @ 224 -- rel pos + res-post-norm, no class token, avg pool * Vision Transformer refactor to remove representation layer that was only used in initial vit and rarely used since with newer pretrain (ie `How to Train Your ViT`) * `vit_*` models support removal of class token, use of global average pool, use of fc_norm (ala beit, mae). ### April 22, 2022 * `timm` models are now officially supported in [fast.ai](https://www.fast.ai/)! Just in time for the new Practical Deep Learning course. `timmdocs` documentation link updated to [timm.fast.ai](http://timm.fast.ai/). * Two more model weights added in the TPU trained [series](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights). Some In22k pretrain still in progress. * `seresnext101d_32x8d` - 83.69 @ 224, 84.35 @ 288 * `seresnextaa101d_32x8d` (anti-aliased w/ AvgPool2d) - 83.85 @ 224, 84.57 @ 288 ### March 23, 2022 * Add `ParallelBlock` and `LayerScale` option to base vit models to support model configs in [Three things everyone should know about ViT](https://arxiv.org/abs/2203.09795) * `convnext_tiny_hnf` (head norm first) weights trained with (close to) A2 recipe, 82.2% top-1, could do better with more epochs. ### March 21, 2022 * Merge `norm_norm_norm`. **IMPORTANT** this update for a coming 0.6.x release will likely de-stabilize the master branch for a while. Branch [`0.5.x`](https://github.com/rwightman/pytorch-image-models/tree/0.5.x) or a previous 0.5.x release can be used if stability is required. * Significant weights update (all TPU trained) as described in this [release](https://github.com/rwightman/pytorch-image-models/releases/tag/v0.1-tpu-weights) * `regnety_040` - 82.3 @ 224, 82.96 @ 288 * `regnety_064` - 83.0 @ 224, 83.65 @ 288 * `regnety_080` - 83.17 @ 224, 83.86 @ 288 * `regnetv_040` - 82.44 @ 224, 83.18 @ 288 (timm pre-act) * `regnetv_064` - 83.1 @ 224, 83.71 @ 288 (timm pre-act) * `regnetz_040` - 83.67 @ 256, 84.25 @ 320 * `regnetz_040h` - 83.77 @ 256, 84.5 @ 320 (w/ extra fc in head) * `resnetv2_50d_gn` - 80.8 @ 224, 81.96 @ 288 (pre-act GroupNorm) * `resnetv2_50d_evos` 80.77 @ 224, 82.04 @ 288 (pre-act EvoNormS) * `regnetz_c16_evos` - 81.9 @ 256, 82.64 @ 320 (EvoNormS) * `regnetz_d8_evos` - 83.42 @ 256, 84.04 @ 320 (EvoNormS) * `xception41p` - 82 @ 299 (timm pre-act) * `xception65` - 83.17 @ 299 * `xception65p` - 83.14 @ 299 (timm pre-act) * `resnext101_64x4d` - 82.46 @ 224, 83.16 @ 288 * `seresnext101_32x8d` - 83.57 @ 224, 84.270 @ 288 * `resnetrs200` - 83.85 @ 256, 84.44 @ 320 * HuggingFace hub support fixed w/ initial groundwork for allowing alternative 'config sources' for pretrained model definitions and weights (generic local file / remote url support soon) * SwinTransformer-V2 implementation added. Submitted by [Christoph Reich](https://github.com/ChristophReich1996). Training experiments and model changes by myself are ongoing so expect compat breaks. * Swin-S3 (AutoFormerV2) models / weights added from https://github.com/microsoft/Cream/tree/main/AutoFormerV2 * MobileViT models w/ weights adapted from https://github.com/apple/ml-cvnets * PoolFormer models w/ weights adapted from https://github.com/sail-sg/poolformer * VOLO models w/ weights adapted from https://github.com/sail-sg/volo * Significant work experimenting with non-BatchNorm norm layers such as EvoNorm, FilterResponseNorm, GroupNorm, etc * Enhance support for alternate norm + act ('NormAct') layers added to a number of models, esp EfficientNet/MobileNetV3, RegNet, and aligned Xception * Grouped conv support added to EfficientNet family * Add 'group matching' API to all models to allow grouping model parameters for application of 'layer-wise' LR decay, lr scale added to LR scheduler * Gradient checkpointing support added to many models * `forward_head(x, pre_logits=False)` fn added to all models to allow separate calls of `forward_features` + `forward_head` * All vision transformer and vision MLP models update to return non-pooled / non-token selected features from `forward_features`, for consistency with CNN models, token selection or pooling now applied in `forward_head` ### Feb 2, 2022 * [Chris Hughes](https://github.com/Chris-hughes10) posted an exhaustive run through of `timm` on his blog yesterday. Well worth a read. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055) * I'm currently prepping to merge the `norm_norm_norm` branch back to master (ver 0.6.x) in next week or so. * The changes are more extensive than usual and may destabilize and break some model API use (aiming for full backwards compat). So, beware `pip install git+https://github.com/rwightman/pytorch-image-models` installs! * `0.5.x` releases and a `0.5.x` branch will remain stable with a cherry pick or two until dust clears. Recommend sticking to pypi install for a bit if you want stable. ### Jan 14, 2022 * Version 0.5.4 w/ release to be pushed to pypi. It's been a while since last pypi update and riskier changes will be merged to main branch soon.... * Add ConvNeXT models /w weights from official impl (https://github.com/facebookresearch/ConvNeXt), a few perf tweaks, compatible with timm features * Tried training a few small (~1.8-3M param) / mobile optimized models, a few are good so far, more on the way... * `mnasnet_small` - 65.6 top-1 * `mobilenetv2_050` - 65.9 * `lcnet_100/075/050` - 72.1 / 68.8 / 63.1 * `semnasnet_075` - 73 * `fbnetv3_b/d/g` - 79.1 / 79.7 / 82.0 * TinyNet models added by [rsomani95](https://github.com/rsomani95) * LCNet added via MobileNetV3 architecture
pytorch-image-models/hfdocs/source/changes.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/changes.mdx", "repo_id": "pytorch-image-models", "token_count": 50077 }
256
# EfficientNet (Knapsack Pruned) **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). This collection consists of pruned EfficientNet models. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('efficientnet_b1_pruned', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `efficientnet_b1_pruned`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('efficientnet_b1_pruned', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{tan2020efficientnet, title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, author={Mingxing Tan and Quoc V. Le}, year={2020}, eprint={1905.11946}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` ``` @misc{aflalo2020knapsack, title={Knapsack Pruning with Inner Distillation}, author={Yonathan Aflalo and Asaf Noy and Ming Lin and Itamar Friedman and Lihi Zelnik}, year={2020}, eprint={2002.08258}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: EfficientNet Pruned Paper: Title: Knapsack Pruning with Inner Distillation URL: https://paperswithcode.com/paper/knapsack-pruning-with-inner-distillation Models: - Name: efficientnet_b1_pruned In Collection: EfficientNet Pruned Metadata: FLOPs: 489653114 Parameters: 6330000 File Size: 25595162 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b1_pruned Crop Pct: '0.882' Image Size: '240' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1208 Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.25% Top 5 Accuracy: 93.84% - Name: efficientnet_b2_pruned In Collection: EfficientNet Pruned Metadata: FLOPs: 878133915 Parameters: 8310000 File Size: 33555005 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b2_pruned Crop Pct: '0.89' Image Size: '260' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1219 Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.91% Top 5 Accuracy: 94.86% - Name: efficientnet_b3_pruned In Collection: EfficientNet Pruned Metadata: FLOPs: 1239590641 Parameters: 9860000 File Size: 39770812 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b3_pruned Crop Pct: '0.904' Image Size: '300' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1230 Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.86% Top 5 Accuracy: 95.24% -->
pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/efficientnet-pruned.mdx", "repo_id": "pytorch-image-models", "token_count": 2778 }
257
# ResNet **Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('resnet18', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `resnet18`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/HeZRS15, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Deep Residual Learning for Image Recognition}, journal = {CoRR}, volume = {abs/1512.03385}, year = {2015}, url = {http://arxiv.org/abs/1512.03385}, archivePrefix = {arXiv}, eprint = {1512.03385}, timestamp = {Wed, 17 Apr 2019 17:23:45 +0200}, biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: ResNet Paper: Title: Deep Residual Learning for Image Recognition URL: https://paperswithcode.com/paper/deep-residual-learning-for-image-recognition Models: - Name: resnet18 In Collection: ResNet Metadata: FLOPs: 2337073152 Parameters: 11690000 File Size: 46827520 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet18 Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L641 Weights: https://download.pytorch.org/models/resnet18-5c106cde.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 69.74% Top 5 Accuracy: 89.09% - Name: resnet26 In Collection: ResNet Metadata: FLOPs: 3026804736 Parameters: 16000000 File Size: 64129972 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet26 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L675 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.29% Top 5 Accuracy: 92.57% - Name: resnet34 In Collection: ResNet Metadata: FLOPs: 4718469120 Parameters: 21800000 File Size: 87290831 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet34 Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L658 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.11% Top 5 Accuracy: 92.28% - Name: resnet50 In Collection: ResNet Metadata: FLOPs: 5282531328 Parameters: 25560000 File Size: 102488165 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet50 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L691 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.04% Top 5 Accuracy: 94.39% - Name: resnetblur50 In Collection: ResNet Metadata: FLOPs: 6621606912 Parameters: 25560000 File Size: 102488165 Architecture: - 1x1 Convolution - Batch Normalization - Blur Pooling - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnetblur50 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L1160 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.29% Top 5 Accuracy: 94.64% - Name: tv_resnet101 In Collection: ResNet Metadata: FLOPs: 10068547584 Parameters: 44550000 File Size: 178728960 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet101 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L761 Weights: https://download.pytorch.org/models/resnet101-5d3b4d8f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.37% Top 5 Accuracy: 93.56% - Name: tv_resnet152 In Collection: ResNet Metadata: FLOPs: 14857660416 Parameters: 60190000 File Size: 241530880 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet152 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L769 Weights: https://download.pytorch.org/models/resnet152-b121ed2d.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.32% Top 5 Accuracy: 94.05% - Name: tv_resnet34 In Collection: ResNet Metadata: FLOPs: 4718469120 Parameters: 21800000 File Size: 87306240 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet34 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L745 Weights: https://download.pytorch.org/models/resnet34-333f7ec4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.3% Top 5 Accuracy: 91.42% - Name: tv_resnet50 In Collection: ResNet Metadata: FLOPs: 5282531328 Parameters: 25560000 File Size: 102502400 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet50 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L753 Weights: https://download.pytorch.org/models/resnet50-19c8e357.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.16% Top 5 Accuracy: 92.88% -->
pytorch-image-models/hfdocs/source/models/resnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/resnet.mdx", "repo_id": "pytorch-image-models", "token_count": 5077 }
258
# (Tensorflow) MixNet **MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution). The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('tf_mixnet_l', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_mixnet_l`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('tf_mixnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{tan2019mixconv, title={MixConv: Mixed Depthwise Convolutional Kernels}, author={Mingxing Tan and Quoc V. Le}, year={2019}, eprint={1907.09595}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: TF MixNet Paper: Title: 'MixConv: Mixed Depthwise Convolutional Kernels' URL: https://paperswithcode.com/paper/mixnet-mixed-depthwise-convolutional-kernels Models: - Name: tf_mixnet_l In Collection: TF MixNet Metadata: FLOPs: 688674516 Parameters: 7330000 File Size: 29620756 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: tf_mixnet_l Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1720 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.78% Top 5 Accuracy: 94.0% - Name: tf_mixnet_m In Collection: TF MixNet Metadata: FLOPs: 416633502 Parameters: 5010000 File Size: 20310871 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: tf_mixnet_m Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1709 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.96% Top 5 Accuracy: 93.16% - Name: tf_mixnet_s In Collection: TF MixNet Metadata: FLOPs: 302587678 Parameters: 4130000 File Size: 16738218 Architecture: - Batch Normalization - Dense Connections - Dropout - Global Average Pooling - Grouped Convolution - MixConv - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - MNAS Training Data: - ImageNet ID: tf_mixnet_s Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1698 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.68% Top 5 Accuracy: 92.64% -->
pytorch-image-models/hfdocs/source/models/tf-mixnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/tf-mixnet.mdx", "repo_id": "pytorch-image-models", "token_count": 2362 }
259
[build-system] requires = ["pdm-backend"] build-backend = "pdm.backend" [project] name = "timm" authors = [ {name = "Ross Wightman", email = "ross@huggingface.co"}, ] description = "PyTorch Image Models" readme = "README.md" requires-python = ">=3.8" keywords = ["pytorch", "image-classification"] license = {text = "Apache-2.0"} classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ] dependencies = [ 'torch', 'torchvision', 'pyyaml', 'huggingface_hub', 'safetensors', ] dynamic = ["version"] [project.urls] homepage = "https://github.com/huggingface/pytorch-image-models" documentation = "https://huggingface.co/docs/timm/en/index" repository = "https://github.com/huggingface/pytorch-image-models" [tool.pdm.dev-dependencies] test = [ 'pytest', 'pytest-timeout', 'pytest-xdist', 'pytest-forked', 'expecttest', ] [tool.pdm.version] source = "file" path = "timm/version.py" [tool.pytest.ini_options] testpaths = ['tests'] markers = [ "base: marker for model tests using the basic setup", "cfg: marker for model tests checking the config", "torchscript: marker for model tests using torchscript", "features: marker for model tests checking feature extraction", "fxforward: marker for model tests using torch fx (only forward)", "fxbackward: marker for model tests using torch fx (only backward)", ]
pytorch-image-models/pyproject.toml/0
{ "file_path": "pytorch-image-models/pyproject.toml", "repo_id": "pytorch-image-models", "token_count": 800 }
260
""" Optimzier Tests These tests were adapted from PyTorch' optimizer tests. """ import functools import importlib import os from copy import deepcopy import pytest import torch from torch.nn import Parameter from torch.testing._internal.common_utils import TestCase from timm.optim import create_optimizer_v2, list_optimizers, get_optimizer_class, get_optimizer_info, OptimInfo from timm.optim import param_groups_layer_decay, param_groups_weight_decay from timm.scheduler import PlateauLRScheduler torch_backend = os.environ.get('TORCH_BACKEND') if torch_backend is not None: importlib.import_module(torch_backend) torch_device = os.environ.get('TORCH_DEVICE', 'cuda') # HACK relying on internal PyTorch test functionality for comparisons that I don't want to write torch_tc = TestCase() def _test_basic_cases_template(weight, bias, input, constructor, scheduler_constructors): weight = Parameter(weight) bias = Parameter(bias) input = Parameter(input) optimizer = constructor(weight, bias) schedulers = [] for scheduler_constructor in scheduler_constructors: schedulers.append(scheduler_constructor(optimizer)) # to check if the optimizer can be printed as a string optimizer.__repr__() def fn(): optimizer.zero_grad() y = weight.mv(input) if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device(): y = y.cuda(bias.get_device()) loss = (y + bias).pow(2).sum() loss.backward() return loss initial_value = fn().item() for _i in range(200): for scheduler in schedulers: if isinstance(scheduler, PlateauLRScheduler): val_loss = fn() scheduler.step(val_loss) else: scheduler.step() optimizer.step(fn) assert fn().item() < initial_value def _test_state_dict(weight, bias, input, constructor): weight = Parameter(weight) bias = Parameter(bias) input = Parameter(input) def fn_base(optimizer, weight, bias): optimizer.zero_grad() i = input_device if weight.device.type != 'cpu' else input loss = (weight.mv(i) + bias).pow(2).sum() loss.backward() return loss optimizer = constructor(weight, bias) fn = functools.partial(fn_base, optimizer, weight, bias) # Prime the optimizer for _i in range(20): optimizer.step(fn) # Clone the weights and construct new optimizer for them with torch.no_grad(): weight_c = Parameter(weight.clone().detach()) bias_c = Parameter(bias.clone().detach()) optimizer_c = constructor(weight_c, bias_c) fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c) # Load state dict state_dict = deepcopy(optimizer.state_dict()) state_dict_c = deepcopy(optimizer.state_dict()) optimizer_c.load_state_dict(state_dict_c) # Run both optimizations in parallel for _i in range(20): optimizer.step(fn) optimizer_c.step(fn_c) torch_tc.assertEqual(weight, weight_c) torch_tc.assertEqual(bias, bias_c) # Make sure state dict is deterministic with equal but not identical parameters torch_tc.assertEqual(optimizer.state_dict(), optimizer_c.state_dict()) # Make sure repeated parameters have identical representation in state dict optimizer_c.param_groups.extend(optimizer_c.param_groups) torch_tc.assertEqual(optimizer.state_dict()['param_groups'][-1], optimizer_c.state_dict()['param_groups'][-1]) # Check that state dict can be loaded even when we cast parameters # to a different type and move to a different device. if torch_device == 'cpu': return elif torch_device == 'cuda' and not torch.cuda.is_available(): return with torch.no_grad(): input_device = Parameter(input.clone().detach().float().to(torch_device)) weight_device = Parameter(weight.clone().detach().to(torch_device)) bias_device = Parameter(bias.clone().detach().to(torch_device)) optimizer_device = constructor(weight_device, bias_device) fn_device = functools.partial(fn_base, optimizer_device, weight_device, bias_device) state_dict = deepcopy(optimizer.state_dict()) state_dict_c = deepcopy(optimizer.state_dict()) optimizer_device.load_state_dict(state_dict_c) # Make sure state dict wasn't modified torch_tc.assertEqual(state_dict, state_dict_c) for _i in range(20): optimizer.step(fn) optimizer_device.step(fn_device) torch_tc.assertEqual(weight, weight_device) torch_tc.assertEqual(bias, bias_device) # validate deepcopy() copies all public attributes def getPublicAttr(obj): return set(k for k in obj.__dict__ if not k.startswith('_')) assert getPublicAttr(optimizer) == getPublicAttr(deepcopy(optimizer)) def _test_basic_cases(constructor, scheduler_constructors=None): if scheduler_constructors is None: scheduler_constructors = [] _test_state_dict( torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor ) _test_basic_cases_template( torch.randn(10, 5), torch.randn(10), torch.randn(5), constructor, scheduler_constructors ) # non-contiguous parameters _test_basic_cases_template( torch.randn(10, 5, 2)[..., 0], torch.randn(10, 2)[..., 0], torch.randn(5), constructor, scheduler_constructors ) # CUDA if torch_device == 'cpu': return elif torch_device == 'cuda' and not torch.cuda.is_available(): return _test_basic_cases_template( torch.randn(10, 5).to(torch_device), torch.randn(10).to(torch_device), torch.randn(5).to(torch_device), constructor, scheduler_constructors ) def _test_model(optimizer, params, device=torch.device('cpu'), after_step=0): weight = torch.tensor( [[-0.2109, -0.4976], [-0.1413, -0.3420], [-0.2524, 0.6976]], device=device, requires_grad=True) bias = torch.tensor([-0.1085, -0.2979, 0.6892], device=device, requires_grad=True) weight2 = torch.tensor([[-0.0508, -0.3941, -0.2843]], device=device, requires_grad=True) bias2 = torch.tensor([-0.0711], device=device, requires_grad=True) input = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], device=device).reshape(3, 2) model = torch.nn.Sequential(torch.nn.Linear(2, 3), torch.nn.Sigmoid(), torch.nn.Linear(3, 1), torch.nn.Sigmoid()) model.to(device) pretrained_dict = model.state_dict() pretrained_dict['0.weight'] = weight pretrained_dict['0.bias'] = bias pretrained_dict['2.weight'] = weight2 pretrained_dict['2.bias'] = bias2 model.load_state_dict(pretrained_dict) optimizer = create_optimizer_v2(model, opt=optimizer, **params) prev_loss = float('inf') for i in range(20): optimizer.zero_grad() output = model(input) loss = output.sum() loss.backward() loss = loss.item() if i > after_step: assert loss < prev_loss prev_loss = loss optimizer.step() def rosenbrock(tensor): x, y = tensor return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2 def drosenbrock(tensor): x, y = tensor return torch.tensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2))) def _test_rosenbrock(constructor, scheduler_constructors=None): if scheduler_constructors is None: scheduler_constructors = [] params_t = torch.tensor([1.5, 1.5]) params = Parameter(params_t) optimizer = constructor([params]) schedulers = [] for scheduler_constructor in scheduler_constructors: schedulers.append(scheduler_constructor(optimizer)) solution = torch.tensor([1, 1]) initial_dist = params.clone().detach().dist(solution) def get_grad(_param, _sparse_grad, _w): grad = drosenbrock(params.clone().detach()) # Depending on w, provide only the x or y gradient if _sparse_grad: if _w: i = torch.tensor([[0, 0]], dtype=torch.int64) x = grad[0] v = torch.tensor([x / 4.0, x - x / 4.0]) else: i = torch.tensor([[1, 1]], dtype=torch.int64) y = grad[1] v = torch.tensor([y - y / 4.0, y / 4.0]) grad_out = torch.sparse_coo_tensor(i, v, (2,), dtype=v.dtype) else: if _w: grad_out = torch.tensor([grad[0], 0], dtype=_param.dtype) else: grad_out = torch.tensor([0, grad[1]], dtype=_param.dtype) return grad_out def eval(_param, _sparse_grad, _w): # Depending on w, provide only the x or y gradient optimizer.zero_grad() loss = rosenbrock(_param) loss.backward() grad_out = get_grad(_param, _sparse_grad, _w) with torch.no_grad(): _param.grad = grad_out.to_dense() return loss for i in range(2000): # Do cyclic coordinate descent w = i % 2 optimizer.step(functools.partial(eval, params, True, w)) for scheduler in schedulers: if isinstance(scheduler, PlateauLRScheduler): scheduler.step(rosenbrock(params)) else: scheduler.step() torch_tc.assertLessEqual(params.clone().detach().dist(solution), initial_dist) def _build_params_dict(weight, bias, **kwargs): return [{'params': [weight]}, dict(params=[bias], **kwargs)] def _build_params_dict_single(weight, bias, **kwargs): return [dict(params=bias, **kwargs)] @pytest.mark.parametrize('optimizer', list_optimizers(exclude_filters=('fused*', 'bnb*', 'kron*'))) def test_optim_factory(optimizer): assert issubclass(get_optimizer_class(optimizer, bind_defaults=False), torch.optim.Optimizer) opt_info = get_optimizer_info(optimizer) assert isinstance(opt_info, OptimInfo) lr = (1e-2,) * 4 if optimizer in ('mars', 'nadam', 'claprop', 'crmsproptf', 'cadafactorbv', 'csgdw', 'csgdc', 'clamb'): lr = (1e-3,) * 4 elif optimizer in ('cmars',): lr = (1e-4,) * 4 try: if not opt_info.second_order: # basic tests don't support second order right now # test basic cases that don't need specific tuning via factory test _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=lr[0]) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict(weight, bias, lr=lr[1]), optimizer, lr=lr[1] / 10) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=lr[2]), optimizer, lr=lr[2] / 10) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2( _build_params_dict_single(weight, bias, lr=lr[3]), optimizer) ) except TypeError as e: if 'radamw' in optimizer: pytest.skip("Expected for 'radamw' (decoupled decay) to fail in older PyTorch versions.") else: raise e #@pytest.mark.parametrize('optimizer', ['sgd', 'momentum']) # FIXME momentum variant frequently fails in GitHub runner, but never local after many attempts @pytest.mark.parametrize('optimizer', ['sgd']) def test_sgd(optimizer): # _test_basic_cases( # lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.9, step_size=10)] # ) # _test_basic_cases( # lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3), # [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="linear")] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="constant")] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.9, step_size=10), # lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4)] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.9, step_size=10), # lambda opt: ReduceLROnPlateau(opt)] # ) # _test_basic_cases( # lambda weight, bias: optimizer([weight, bias], lr=1e-3), # [lambda opt: StepLR(opt, gamma=0.99, step_size=10), # lambda opt: ExponentialLR(opt, gamma=0.99), # lambda opt: ReduceLROnPlateau(opt)] # ) _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1) ) _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1, weight_decay=.1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['adamw', 'adam', 'nadam', 'adamax', 'nadamw', 'adamwlegacy', 'adamc']) def test_adam(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['kron']) def test_kron(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['adopt', 'adoptw']) def test_adopt(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=3e-3) ) _test_model(optimizer, dict(lr=5e-2), after_step=1) # note no convergence in first step for ADOPT @pytest.mark.parametrize('optimizer', ['adan', 'adanw']) def test_adan(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=5e-2), after_step=1) # note no convergence in first step for ADOPT @pytest.mark.parametrize('optimizer', ['adabelief']) def test_adabelief(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['radam', 'radabelief']) def test_rectified(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['adadelta', 'adagrad']) def test_adaother(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-1) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['adafactor', 'adafactorbv']) def test_adafactor(optimizer): _test_basic_cases( lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) ) _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['lamb', 'lambc']) def test_lamb(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['laprop']) def test_laprop(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) ) _test_model(optimizer, dict(lr=1e-2)) @pytest.mark.parametrize('optimizer', ['lars', 'larc', 'nlars', 'nlarc']) def test_lars(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['madgrad', 'madgradw']) def test_madgrad(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) ) _test_model(optimizer, dict(lr=1e-2)) @pytest.mark.parametrize('optimizer', ['mars']) def test_mars(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=5e-2), after_step=1) # note no convergence in first step for ADOPT @pytest.mark.parametrize('optimizer', ['novograd']) def test_novograd(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['rmsprop', 'rmsproptf']) def test_rmsprop(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) ) _test_model(optimizer, dict(lr=1e-2)) @pytest.mark.parametrize('optimizer', ['adamp']) def test_adamp(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) _test_model(optimizer, dict(lr=5e-2)) @pytest.mark.parametrize('optimizer', ['sgdp']) def test_sgdp(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) _test_model(optimizer, dict(lr=1e-3)) @pytest.mark.parametrize('optimizer', ['lookahead_sgd', 'lookahead_momentum']) def test_lookahead_sgd(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) ) @pytest.mark.parametrize('optimizer', ['lookahead_adamw', 'lookahead_adam']) def test_lookahead_adam(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) ) @pytest.mark.parametrize('optimizer', ['lookahead_radam']) def test_lookahead_radam(optimizer): _test_rosenbrock( lambda params: create_optimizer_v2(params, optimizer, lr=1e-4) ) def test_param_groups_layer_decay_with_end_decay(): model = torch.nn.Sequential( torch.nn.Linear(10, 5), torch.nn.ReLU(), torch.nn.Linear(5, 2) ) param_groups = param_groups_layer_decay( model, weight_decay=0.05, layer_decay=0.75, end_layer_decay=0.5, verbose=True ) assert len(param_groups) > 0 # Verify layer scaling is applied with end decay for group in param_groups: assert 'lr_scale' in group assert group['lr_scale'] <= 1.0 assert group['lr_scale'] >= 0.5 def test_param_groups_layer_decay_with_matcher(): class ModelWithMatcher(torch.nn.Module): def __init__(self): super().__init__() self.layer1 = torch.nn.Linear(10, 5) self.layer2 = torch.nn.Linear(5, 2) def group_matcher(self, coarse=False): return lambda name: int(name.split('.')[0][-1]) model = ModelWithMatcher() param_groups = param_groups_layer_decay( model, weight_decay=0.05, layer_decay=0.75, verbose=True ) assert len(param_groups) > 0 # Verify layer scaling is applied for group in param_groups: assert 'lr_scale' in group assert 'weight_decay' in group assert len(group['params']) > 0 def test_param_groups_weight_decay(): model = torch.nn.Sequential( torch.nn.Linear(10, 5), torch.nn.ReLU(), torch.nn.Linear(5, 2) ) weight_decay = 0.01 no_weight_decay_list = ['1.weight'] param_groups = param_groups_weight_decay( model, weight_decay=weight_decay, no_weight_decay_list=no_weight_decay_list ) assert len(param_groups) == 2 assert param_groups[0]['weight_decay'] == 0.0 assert param_groups[1]['weight_decay'] == weight_decay # Verify parameters are correctly grouped no_decay_params = set(param_groups[0]['params']) decay_params = set(param_groups[1]['params']) for name, param in model.named_parameters(): if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list: assert param in no_decay_params else: assert param in decay_params
pytorch-image-models/tests/test_optim.py/0
{ "file_path": "pytorch-image-models/tests/test_optim.py", "repo_id": "pytorch-image-models", "token_count": 9463 }
261
import csv import os import pkgutil import re from typing import Dict, List, Optional, Union from .dataset_info import DatasetInfo # NOTE no ambiguity wrt to mapping from # classes to ImageNet subset so far, but likely to change _NUM_CLASSES_TO_SUBSET = { 1000: 'imagenet-1k', 11221: 'imagenet-21k-miil', # miil subset of fall11 11821: 'imagenet-12k', # timm specific 12k subset of fall11 21841: 'imagenet-22k', # as in fall11.tar 21842: 'imagenet-22k-ms', # a Microsoft (for FocalNet) remapping of 22k w/ moves ImageNet-1k classes to first 1000 21843: 'imagenet-21k-goog', # Google's ImageNet full has two classes not in fall11 } _SUBSETS = { 'imagenet1k': 'imagenet_synsets.txt', 'imagenet12k': 'imagenet12k_synsets.txt', 'imagenet22k': 'imagenet22k_synsets.txt', 'imagenet21k': 'imagenet21k_goog_synsets.txt', 'imagenet21kgoog': 'imagenet21k_goog_synsets.txt', 'imagenet21kmiil': 'imagenet21k_miil_synsets.txt', 'imagenet22kms': 'imagenet22k_ms_synsets.txt', } _LEMMA_FILE = 'imagenet_synset_to_lemma.txt' _DEFINITION_FILE = 'imagenet_synset_to_definition.txt' def infer_imagenet_subset(model_or_cfg) -> Optional[str]: if isinstance(model_or_cfg, dict): num_classes = model_or_cfg.get('num_classes', None) else: num_classes = getattr(model_or_cfg, 'num_classes', None) if not num_classes: pretrained_cfg = getattr(model_or_cfg, 'pretrained_cfg', {}) # FIXME at some point pretrained_cfg should include dataset-tag, # which will be more robust than a guess based on num_classes num_classes = pretrained_cfg.get('num_classes', None) if not num_classes or num_classes not in _NUM_CLASSES_TO_SUBSET: return None return _NUM_CLASSES_TO_SUBSET[num_classes] class ImageNetInfo(DatasetInfo): def __init__(self, subset: str = 'imagenet-1k'): super().__init__() subset = re.sub(r'[-_\s]', '', subset.lower()) assert subset in _SUBSETS, f'Unknown imagenet subset {subset}.' # WordNet synsets (part-of-speech + offset) are the unique class label names for ImageNet classifiers synset_file = _SUBSETS[subset] synset_data = pkgutil.get_data(__name__, os.path.join('_info', synset_file)) self._synsets = synset_data.decode('utf-8').splitlines() # WordNet lemmas (canonical dictionary form of word) and definitions are used to build # the class descriptions. If detailed=True both are used, otherwise just the lemmas. lemma_data = pkgutil.get_data(__name__, os.path.join('_info', _LEMMA_FILE)) reader = csv.reader(lemma_data.decode('utf-8').splitlines(), delimiter='\t') self._lemmas = dict(reader) definition_data = pkgutil.get_data(__name__, os.path.join('_info', _DEFINITION_FILE)) reader = csv.reader(definition_data.decode('utf-8').splitlines(), delimiter='\t') self._definitions = dict(reader) def num_classes(self): return len(self._synsets) def label_names(self): return self._synsets def label_descriptions(self, detailed: bool = False, as_dict: bool = False) -> Union[List[str], Dict[str, str]]: if as_dict: return {label: self.label_name_to_description(label, detailed=detailed) for label in self._synsets} else: return [self.label_name_to_description(label, detailed=detailed) for label in self._synsets] def index_to_label_name(self, index) -> str: assert 0 <= index < len(self._synsets), \ f'Index ({index}) out of range for dataset with {len(self._synsets)} classes.' return self._synsets[index] def index_to_description(self, index: int, detailed: bool = False) -> str: label = self.index_to_label_name(index) return self.label_name_to_description(label, detailed=detailed) def label_name_to_description(self, label: str, detailed: bool = False) -> str: if detailed: description = f'{self._lemmas[label]}: {self._definitions[label]}' else: description = f'{self._lemmas[label]}' return description
pytorch-image-models/timm/data/imagenet_info.py/0
{ "file_path": "pytorch-image-models/timm/data/imagenet_info.py", "repo_id": "pytorch-image-models", "token_count": 1732 }
262
""" A dataset reader that extracts images from folders Folders are scanned recursively to find image files. Labels are based on the folder hierarchy, just leaf folders by default. Hacked together by / Copyright 2020 Ross Wightman """ import os from typing import Dict, List, Optional, Set, Tuple, Union from timm.utils.misc import natural_key from .class_map import load_class_map from .img_extensions import get_img_extensions from .reader import Reader def find_images_and_targets( folder: str, types: Optional[Union[List, Tuple, Set]] = None, class_to_idx: Optional[Dict] = None, leaf_name_only: bool = True, sort: bool = True ): """ Walk folder recursively to discover images and map them to classes by folder names. Args: folder: root of folder to recursively search types: types (file extensions) to search for in path class_to_idx: specify mapping for class (folder name) to class index if set leaf_name_only: use only leaf-name of folder walk for class names sort: re-sort found images by name (for consistent ordering) Returns: A list of image and target tuples, class_to_idx mapping """ types = get_img_extensions(as_set=True) if not types else set(types) labels = [] filenames = [] for root, subdirs, files in os.walk(folder, topdown=False, followlinks=True): rel_path = os.path.relpath(root, folder) if (root != folder) else '' label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') for f in files: base, ext = os.path.splitext(f) if ext.lower() in types: filenames.append(os.path.join(root, f)) labels.append(label) if class_to_idx is None: # building class index unique_labels = set(labels) sorted_labels = list(sorted(unique_labels, key=natural_key)) class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx] if sort: images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) return images_and_targets, class_to_idx class ReaderImageFolder(Reader): def __init__( self, root, class_map='', input_key=None, ): super().__init__() self.root = root class_to_idx = None if class_map: class_to_idx = load_class_map(class_map, root) find_types = None if input_key: find_types = input_key.split(';') self.samples, self.class_to_idx = find_images_and_targets( root, class_to_idx=class_to_idx, types=find_types, ) if len(self.samples) == 0: raise RuntimeError( f'Found 0 images in subfolders of {root}. ' f'Supported image extensions are {", ".join(get_img_extensions())}') def __getitem__(self, index): path, target = self.samples[index] return open(path, 'rb'), target def __len__(self): return len(self.samples) def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0] if basename: filename = os.path.basename(filename) elif not absolute: filename = os.path.relpath(filename, self.root) return filename
pytorch-image-models/timm/data/readers/reader_image_folder.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_image_folder.py", "repo_id": "pytorch-image-models", "token_count": 1510 }
263
from typing import List, Optional, Type, Union import torch from torch import nn as nn from torch.nn import functional as F from .config import use_fused_attn from .create_conv2d import create_conv2d from .helpers import to_2tuple from .pool2d_same import create_pool2d class MultiQueryAttentionV2(nn.Module): """Multi Query Attention. Fast Transformer Decoding: One Write-Head is All You Need https://arxiv.org/pdf/1911.02150.pdf This is an acceletor optimized version - removing multiple unnecessary tensor transpose by re-arranging indices according to the following rules: 1) contracted indices are at the end, 2) other indices have the same order in the input and output tensores. Compared to V1, this gives 3x speed up. """ def __init__( self, dim: int, dim_out: Optional[int] = None, num_heads: int = 8, key_dim: int = 64, value_dim: int = 64, attn_drop: float = 0., proj_drop: float = 0., ): """Initializer.""" super().__init__() dim_out = dim_out or dim self.num_heads = num_heads self.key_dim = key_dim self.value_dim = value_dim self.scale = key_dim ** -0.5 self.query_proj = nn.Parameter(torch.randn([self.num_heads, self.key_dim, dim])) self.key_proj = nn.Parameter(torch.randn([dim, self.key_dim])) self.value_proj = nn.Parameter(torch.randn([dim, self.value_dim])) self.attn_drop = nn.Dropout(attn_drop) self.out_proj = nn.Parameter(torch.randn([dim_out, self.num_heads, self.value_dim])) self.proj_drop = nn.Dropout(proj_drop) def _reshape_input(self, t): """Reshapes a tensor to three dimensions, keeping the first and last.""" s = t.shape # Propagate the shape statically where possible. #num = t.shape[1:-1].numel() #return t.reshape(s[0], num, s[-1]) return t.reshape(s[0], s[1], -1).transpose(1, 2) def forward(self, x, m: Optional[torch.Tensor] = None): """Run layer computation.""" b, _, h, w = x.shape m = m if m is not None else x reshaped_x = self._reshape_input(x) reshaped_m = self._reshape_input(m) q = torch.einsum('bnd,hkd->bnhk', reshaped_x, self.query_proj) k = torch.einsum('bmd,dk->bmk', reshaped_m, self.key_proj) attn = torch.einsum('bnhk,bmk->bnhm', q, k) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) v = torch.einsum('bmd,dv->bmv', reshaped_m, self.value_proj) o = torch.einsum('bnhm,bmv->bnhv', attn, v) result = torch.einsum('bnhv,dhv->bdn', o, self.out_proj) result = self.proj_drop(result) return result.reshape(b, -1, h, w) class MultiQueryAttention2d(nn.Module): """Multi Query Attention with spatial downsampling. 3 parameters are introduced for the spatial downsampling: 1. kv_stride: downsampling factor on Key and Values only. 2. query_strides: horizontal & vertical strides on Query only. This is an optimized version. 1. Projections in Attention is explicit written out as 1x1 Conv2D. 2. Additional reshapes are introduced to bring a up to 3x speed up. """ fused_attn: torch.jit.Final[bool] def __init__( self, dim: int, dim_out: Optional[int] = None, num_heads: int = 8, key_dim: Optional[int] = None, value_dim: Optional[int] = None, query_strides: int = 1, kv_stride: int = 1, dw_kernel_size: int = 3, dilation: int = 1, padding: Union[str, int, List[int]] = '', attn_drop: float = 0., proj_drop: float = 0., norm_layer: Type[nn.Module] = nn.BatchNorm2d, use_bias: bool = False, ): """Initializer. Args: num_heads: Number of attention heads. key_dim: Size of the attention key dimension. value_dim: Size of the attention value dimension. query_strides: Vertical stride size for query only. kv_stride: Key and value stride size. dw_kernel_size: Spatial dimension of the depthwise kernel. """ super().__init__() dim_out = dim_out or dim self.num_heads = num_heads self.key_dim = key_dim or dim // num_heads self.value_dim = value_dim or dim // num_heads self.query_strides = to_2tuple(query_strides) self.kv_stride = kv_stride self.has_query_strides = any([s > 1 for s in self.query_strides]) self.scale = self.key_dim ** -0.5 self.fused_attn = use_fused_attn() self.drop = attn_drop self.query = nn.Sequential() if self.has_query_strides: # FIXME dilation if padding == 'same': self.query.add_module('down_pool', create_pool2d( 'avg', kernel_size=self.query_strides, padding='same', )) else: # no pad if not 'same' as kern=stride=even self.query.add_module('down_pool', nn.AvgPool2d(kernel_size=query_strides)) self.query.add_module('norm', norm_layer(dim)) self.query.add_module('proj', create_conv2d( dim, self.num_heads * self.key_dim, kernel_size=1, bias=use_bias, )) self.key = nn.Sequential() if kv_stride > 1: self.key.add_module('down_conv', create_conv2d( dim, dim, kernel_size=dw_kernel_size, stride=kv_stride, dilation=dilation, padding=padding, depthwise=True, )) self.key.add_module('norm', norm_layer(dim)) self.key.add_module('proj', create_conv2d( dim, self.key_dim, kernel_size=1, padding=padding, bias=use_bias, )) self.value = nn.Sequential() if kv_stride > 1: self.value.add_module('down_conv', create_conv2d( dim, dim, kernel_size=dw_kernel_size, stride=kv_stride, dilation=dilation, padding=padding, depthwise=True, )) self.value.add_module('norm', norm_layer(dim)) self.value.add_module('proj', create_conv2d( dim, self.value_dim, kernel_size=1, bias=use_bias, )) self.attn_drop = nn.Dropout(attn_drop) self.output = nn.Sequential() if self.has_query_strides: self.output.add_module('upsample', nn.Upsample(scale_factor=self.query_strides, mode='bilinear', align_corners=False)) self.output.add_module('proj', create_conv2d( self.value_dim * self.num_heads, dim_out, kernel_size=1, bias=use_bias, )) self.output.add_module('drop', nn.Dropout(proj_drop)) self.einsum = False def init_weights(self): # using xavier appeared to improve stability for mobilenetv4 hybrid w/ this layer nn.init.xavier_uniform_(self.query.proj.weight) nn.init.xavier_uniform_(self.key.proj.weight) nn.init.xavier_uniform_(self.value.proj.weight) if self.kv_stride > 1: nn.init.xavier_uniform_(self.key.down_conv.weight) nn.init.xavier_uniform_(self.value.down_conv.weight) nn.init.xavier_uniform_(self.output.proj.weight) def _reshape_input(self, t: torch.Tensor): """Reshapes a tensor to three dimensions, keeping the batch and channels.""" s = t.shape t = t.reshape(s[0], s[1], -1).transpose(1, 2) if self.einsum: return t else: return t.unsqueeze(1).contiguous() def _reshape_projected_query(self, t: torch.Tensor, num_heads: int, key_dim: int): """Reshapes projected query: [b, n, n, h x k] -> [b, n x n, h, k].""" s = t.shape t = t.reshape(s[0], num_heads, key_dim, -1) if self.einsum: return t.permute(0, 3, 1, 2).contiguous() else: return t.transpose(-1, -2).contiguous() def _reshape_output(self, t: torch.Tensor, num_heads: int, h_px: int, w_px: int): """Reshape output:[b, n x n x h, k] -> [b, n, n, hk].""" s = t.shape feat_dim = s[-1] * num_heads if not self.einsum: t = t.transpose(1, 2) return t.reshape(s[0], h_px, w_px, feat_dim).permute(0, 3, 1, 2).contiguous() def forward(self, x, attn_mask: Optional[torch.Tensor] = None): """Run layer computation.""" B, C, H, W = s = x.shape q = self.query(x) # desired q shape: [b, h, k, n x n] - [b, l, h, k] q = self._reshape_projected_query(q, self.num_heads, self.key_dim) k = self.key(x) # output shape of k: [b, k, p], p = m x m k = self._reshape_input(k) v = self.value(x) # output shape of v: [ b, p, k], p = m x m v = self._reshape_input(v) # desired q shape: [b, n x n, h, k] # desired k shape: [b, m x m, k] # desired logits shape: [b, n x n, h, m x m] if self.einsum: attn = torch.einsum('blhk,bpk->blhp', q, k) * self.scale if attn_mask is not None: # NOTE: assumes mask is float and in correct shape attn = attn + attn_mask attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) o = torch.einsum('blhp,bpk->blhk', attn, v) else: if self.fused_attn: o = F.scaled_dot_product_attention( q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p if self.training else 0. ) else: q = q * self.scale attn = q @ k.transpose(-1, -2) if attn_mask is not None: # NOTE: assumes mask is float and in correct shape attn = attn + attn_mask attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) o = attn @ v # reshape o into [b, hk, n, n,] o = self._reshape_output(o, self.num_heads, H // self.query_strides[0], W // self.query_strides[1]) x = self.output(o) return x class Attention2d(nn.Module): fused_attn: torch.jit.Final[bool] """ multi-head attention for 2D NCHW tensors""" def __init__( self, dim: int, dim_out: Optional[int] = None, num_heads: int = 32, bias: bool = True, expand_first: bool = False, head_first: bool = False, attn_drop: float = 0., proj_drop: float = 0. ): super().__init__() dim_out = dim_out or dim dim_attn = dim_out if expand_first else dim self.num_heads = num_heads self.dim_head = dim_attn // num_heads self.head_first = head_first self.fused_attn = use_fused_attn() self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, attn_mask: Optional[torch.Tensor] = None): B, C, H, W = x.shape if self.head_first: q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2) else: q, k, v = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q.transpose(-1, -2).contiguous(), k.transpose(-1, -2).contiguous(), v.transpose(-1, -2).contiguous(), attn_mask=attn_mask, dropout_p=self.attn_drop.p if self.training else 0., ).transpose(-1, -2).reshape(B, -1, H, W) else: q = q.transpose(-1, -2) v = v.transpose(-1, -2) attn = q @ k * q.size(-1) ** -0.5 if attn_mask is not None: # NOTE: assumes mask is float and in correct shape attn = attn + attn_mask attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(-1, -2).reshape(B, -1, H, W) x = self.proj(x) x = self.proj_drop(x) return x
pytorch-image-models/timm/layers/attention2d.py/0
{ "file_path": "pytorch-image-models/timm/layers/attention2d.py", "repo_id": "pytorch-image-models", "token_count": 6678 }
264