text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
import inspect import warnings from typing import Callable, List, Optional, Union import numpy as np import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput from ...loaders import IPAdapterMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import deprecate, is_torch_xla_available, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from . import StableDiffusionSafePipelineOutput from .safety_checker import SafeStableDiffusionSafetyChecker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name class StableDiffusionPipelineSafe(DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin, IPAdapterMixin): _last_supported_version = "0.33.1" r""" Pipeline based on the [`StableDiffusionPipeline`] for text-to-image generation using Safe Latent Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: SafeStableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection] = None, requires_safety_checker: bool = True, ): super().__init__() safety_concept: Optional[str] = ( "an image showing hate, harassment, violence, suffering, humiliation, harm, suicide, sexual, nudity," " bodily fluids, blood, obscene gestures, illegal activity, drug use, theft, vandalism, weapons, child" " abuse, brutality, cruelty" ) if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = ( unet is not None and hasattr(unet.config, "_diffusers_version") and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") ) is_unet_sample_size_less_64 = ( unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 ) if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5" " \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self._safety_text_concept = safety_concept self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.register_to_config(requires_safety_checker=requires_safety_checker) @property def safety_concept(self): r""" Getter method for the safety concept used with SLD Returns: `str`: The text describing the safety concept """ return self._safety_text_concept @safety_concept.setter def safety_concept(self, concept): r""" Setter method for the safety concept used with SLD Args: concept (`str`): The text of the new safety concept """ self._safety_text_concept = concept def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, enable_safety_guidance, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids if not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # Encode the safety concept text if enable_safety_guidance: safety_concept_input = self.tokenizer( [self._safety_text_concept], padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) safety_embeddings = self.text_encoder(safety_concept_input.input_ids.to(self.device))[0] # duplicate safety embeddings for each generation per prompt, using mps friendly method seq_len = safety_embeddings.shape[1] safety_embeddings = safety_embeddings.repeat(batch_size, num_images_per_prompt, 1) safety_embeddings = safety_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance + sld, we need to do three forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing three forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, safety_embeddings]) else: # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def run_safety_checker(self, image, device, dtype, enable_safety_guidance): if self.safety_checker is not None: images = image.copy() safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) flagged_images = np.zeros((2, *image.shape[1:])) if any(has_nsfw_concept): logger.warning( "Potential NSFW content was detected in one or more images. A black image will be returned" " instead." f"{'You may look at this images in the `unsafe_images` variable of the output at your own discretion.' if enable_safety_guidance else 'Try again with a different prompt and/or seed.'}" ) for idx, has_nsfw_concept in enumerate(has_nsfw_concept): if has_nsfw_concept: flagged_images[idx] = images[idx] image[idx] = np.zeros(image[idx].shape) # black image else: has_nsfw_concept = None flagged_images = None return image, has_nsfw_concept, flagged_images # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def perform_safety_guidance( self, enable_safety_guidance, safety_momentum, noise_guidance, noise_pred_out, i, sld_guidance_scale, sld_warmup_steps, sld_threshold, sld_momentum_scale, sld_mom_beta, ): # Perform SLD guidance if enable_safety_guidance: if safety_momentum is None: safety_momentum = torch.zeros_like(noise_guidance) noise_pred_text, noise_pred_uncond = noise_pred_out[0], noise_pred_out[1] noise_pred_safety_concept = noise_pred_out[2] # Equation 6 scale = torch.clamp(torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.0) # Equation 6 safety_concept_scale = torch.where( (noise_pred_text - noise_pred_safety_concept) >= sld_threshold, torch.zeros_like(scale), scale ) # Equation 4 noise_guidance_safety = torch.mul((noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale) # Equation 7 noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum # Equation 8 safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety if i >= sld_warmup_steps: # Warmup # Equation 3 noise_guidance = noise_guidance - noise_guidance_safety return noise_guidance, safety_momentum # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, sld_guidance_scale: Optional[float] = 1000, sld_warmup_steps: Optional[int] = 10, sld_threshold: Optional[float] = 0.01, sld_momentum_scale: Optional[float] = 0.3, sld_mom_beta: Optional[float] = 0.4, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. sld_guidance_scale (`float`, *optional*, defaults to 1000): If `sld_guidance_scale < 1`, safety guidance is disabled. sld_warmup_steps (`int`, *optional*, defaults to 10): Number of warmup steps for safety guidance. SLD is only be applied for diffusion steps greater than `sld_warmup_steps`. sld_threshold (`float`, *optional*, defaults to 0.01): Threshold that separates the hyperplane between appropriate and inappropriate images. sld_momentum_scale (`float`, *optional*, defaults to 0.3): Scale of the SLD momentum to be added to the safety guidance at each diffusion step. If set to 0.0, momentum is disabled. Momentum is built up during warmup for diffusion steps smaller than `sld_warmup_steps`. sld_mom_beta (`float`, *optional*, defaults to 0.4): Defines how safety guidance momentum builds up. `sld_mom_beta` indicates how much of the previous momentum is kept. Momentum is built up during warmup for diffusion steps smaller than `sld_warmup_steps`. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. Examples: ```py import torch from diffusers import StableDiffusionPipelineSafe from diffusers.pipelines.stable_diffusion_safe import SafetyConfig pipeline = StableDiffusionPipelineSafe.from_pretrained( "AIML-TUDA/stable-diffusion-safe", torch_dtype=torch.float16 ).to("cuda") prompt = "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c. leyendecker" image = pipeline(prompt=prompt, **SafetyConfig.MEDIUM).images[0] ``` """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 enable_safety_guidance = sld_guidance_scale > 1.0 and do_classifier_free_guidance if not enable_safety_guidance: warnings.warn("Safety checker disabled!") if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True image_embeds, negative_image_embeds = self.encode_image( ip_adapter_image, device, num_images_per_prompt, output_hidden_state ) if do_classifier_free_guidance: if enable_safety_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds, image_embeds]) else: image_embeds = torch.cat([negative_image_embeds, image_embeds]) # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, enable_safety_guidance ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None safety_momentum = None num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = ( torch.cat([latents] * (3 if enable_safety_guidance else 2)) if do_classifier_free_guidance else latents ) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs ).sample # perform guidance if do_classifier_free_guidance: noise_pred_out = noise_pred.chunk((3 if enable_safety_guidance else 2)) noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1] # default classifier free guidance noise_guidance = noise_pred_text - noise_pred_uncond # Perform SLD guidance if enable_safety_guidance: if safety_momentum is None: safety_momentum = torch.zeros_like(noise_guidance) noise_pred_safety_concept = noise_pred_out[2] # Equation 6 scale = torch.clamp( torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.0 ) # Equation 6 safety_concept_scale = torch.where( (noise_pred_text - noise_pred_safety_concept) >= sld_threshold, torch.zeros_like(scale), scale, ) # Equation 4 noise_guidance_safety = torch.mul( (noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale ) # Equation 7 noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum # Equation 8 safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety if i >= sld_warmup_steps: # Warmup # Equation 3 noise_guidance = noise_guidance - noise_guidance_safety noise_pred = noise_pred_uncond + guidance_scale * noise_guidance # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept, flagged_images = self.run_safety_checker( image, device, prompt_embeds.dtype, enable_safety_guidance ) # 10. Convert to PIL if output_type == "pil": image = self.numpy_to_pil(image) if flagged_images is not None: flagged_images = self.numpy_to_pil(flagged_images) if not return_dict: return ( image, has_nsfw_concept, self._safety_text_concept if enable_safety_guidance else None, flagged_images, ) return StableDiffusionSafePipelineOutput( images=image, nsfw_content_detected=has_nsfw_concept, applied_safety_concept=self._safety_text_concept if enable_safety_guidance else None, unsafe_images=flagged_images, )
diffusers/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py", "repo_id": "diffusers", "token_count": 17680 }
173
import torch.nn as nn from ...utils import is_accelerate_available, logging logger = logging.get_logger(__name__) if is_accelerate_available(): from accelerate import init_empty_weights def _replace_with_quanto_layers(model, quantization_config, modules_to_not_convert: list, pre_quantized=False): # Quanto imports diffusers internally. These are placed here to avoid circular imports from optimum.quanto import QLinear, freeze, qfloat8, qint2, qint4, qint8 def _get_weight_type(dtype: str): return {"float8": qfloat8, "int8": qint8, "int4": qint4, "int2": qint2}[dtype] def _replace_layers(model, quantization_config, modules_to_not_convert): has_children = list(model.children()) if not has_children: return model for name, module in model.named_children(): _replace_layers(module, quantization_config, modules_to_not_convert) if name in modules_to_not_convert: continue if isinstance(module, nn.Linear): with init_empty_weights(): qlinear = QLinear( in_features=module.in_features, out_features=module.out_features, bias=module.bias is not None, dtype=module.weight.dtype, weights=_get_weight_type(quantization_config.weights_dtype), ) model._modules[name] = qlinear model._modules[name].source_cls = type(module) model._modules[name].requires_grad_(False) return model model = _replace_layers(model, quantization_config, modules_to_not_convert) has_been_replaced = any(isinstance(replaced_module, QLinear) for _, replaced_module in model.named_modules()) if not has_been_replaced: logger.warning( f"{model.__class__.__name__} does not appear to have any `nn.Linear` modules. Quantization will not be applied." " Please check your model architecture, or submit an issue on Github if you think this is a bug." " https://github.com/huggingface/diffusers/issues/new" ) # We need to freeze the pre_quantized model in order for the loaded state_dict and model state dict # to match when trying to load weights with load_model_dict_into_meta if pre_quantized: freeze(model) return model
diffusers/src/diffusers/quantizers/quanto/utils.py/0
{ "file_path": "diffusers/src/diffusers/quantizers/quanto/utils.py", "repo_id": "diffusers", "token_count": 1048 }
174
# Copyright 2025 Katherine Crowson and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) @flax.struct.dataclass class EulerDiscreteSchedulerState: common: CommonSchedulerState # setable values init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray sigmas: jnp.ndarray num_inference_steps: Optional[int] = None @classmethod def create( cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, sigmas: jnp.ndarray ): return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) @dataclass class FlaxEulerDiscreteSchedulerOutput(FlaxSchedulerOutput): state: EulerDiscreteSchedulerState class FlaxEulerDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin): """ Euler scheduler (Algorithm 2) from Karras et al. (2022) https://huggingface.co/papers/2206.00364. . Based on the original k-diffusion implementation by Katherine Crowson: https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L51 [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear` or `scaled_linear`. trained_betas (`jnp.ndarray`, optional): option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. prediction_type (`str`, default `epsilon`, optional): prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 https://imagen.research.google/video/paper.pdf) dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): the `dtype` used for params and computation. """ _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype @property def has_state(self): return True @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[jnp.ndarray] = None, prediction_type: str = "epsilon", timestep_spacing: str = "linspace", dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState] = None) -> EulerDiscreteSchedulerState: if common is None: common = CommonSchedulerState.create(self) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] sigmas = ((1 - common.alphas_cumprod) / common.alphas_cumprod) ** 0.5 sigmas = jnp.interp(timesteps, jnp.arange(0, len(sigmas)), sigmas) sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: init_noise_sigma = sigmas.max() else: init_noise_sigma = (sigmas.max() ** 2 + 1) ** 0.5 return EulerDiscreteSchedulerState.create( common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas, ) def scale_model_input(self, state: EulerDiscreteSchedulerState, sample: jnp.ndarray, timestep: int) -> jnp.ndarray: """ Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. Args: state (`EulerDiscreteSchedulerState`): the `FlaxEulerDiscreteScheduler` state data class instance. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. timestep (`int`): current discrete timestep in the diffusion chain. Returns: `jnp.ndarray`: scaled input sample """ (step_index,) = jnp.where(state.timesteps == timestep, size=1) step_index = step_index[0] sigma = state.sigmas[step_index] sample = sample / ((sigma**2 + 1) ** 0.5) return sample def set_timesteps( self, state: EulerDiscreteSchedulerState, num_inference_steps: int, shape: Tuple = () ) -> EulerDiscreteSchedulerState: """ Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. Args: state (`EulerDiscreteSchedulerState`): the `FlaxEulerDiscreteScheduler` state data class instance. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. """ if self.config.timestep_spacing == "linspace": timesteps = jnp.linspace(self.config.num_train_timesteps - 1, 0, num_inference_steps, dtype=self.dtype) elif self.config.timestep_spacing == "leading": step_ratio = self.config.num_train_timesteps // num_inference_steps timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float) timesteps += 1 else: raise ValueError( f"timestep_spacing must be one of ['linspace', 'leading'], got {self.config.timestep_spacing}" ) sigmas = ((1 - state.common.alphas_cumprod) / state.common.alphas_cumprod) ** 0.5 sigmas = jnp.interp(timesteps, jnp.arange(0, len(sigmas)), sigmas) sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: init_noise_sigma = sigmas.max() else: init_noise_sigma = (sigmas.max() ** 2 + 1) ** 0.5 return state.replace( timesteps=timesteps, sigmas=sigmas, num_inference_steps=num_inference_steps, init_noise_sigma=init_noise_sigma, ) def step( self, state: EulerDiscreteSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxEulerDiscreteSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: state (`EulerDiscreteSchedulerState`): the `FlaxEulerDiscreteScheduler` state data class instance. model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. order: coefficient for multi-step inference. return_dict (`bool`): option for returning tuple rather than FlaxEulerDiscreteScheduler class Returns: [`FlaxEulerDiscreteScheduler`] or `tuple`: [`FlaxEulerDiscreteScheduler`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ if state.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) (step_index,) = jnp.where(state.timesteps == timestep, size=1) step_index = step_index[0] sigma = state.sigmas[step_index] # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": pred_original_sample = sample - sigma * model_output elif self.config.prediction_type == "v_prediction": # * c_out + input * c_skip pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" ) # 2. Convert to an ODE derivative derivative = (sample - pred_original_sample) / sigma # dt = sigma_down - sigma dt = state.sigmas[step_index + 1] - sigma prev_sample = sample + derivative * dt if not return_dict: return (prev_sample, state) return FlaxEulerDiscreteSchedulerOutput(prev_sample=prev_sample, state=state) def add_noise( self, state: EulerDiscreteSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray, ) -> jnp.ndarray: sigma = state.sigmas[timesteps].flatten() sigma = broadcast_to_shape_from_left(sigma, noise.shape) noisy_samples = original_samples + noise * sigma return noisy_samples def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_euler_discrete_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_euler_discrete_flax.py", "repo_id": "diffusers", "token_count": 4574 }
175
# # Copyright 2025 Sana-Sprint Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..schedulers.scheduling_utils import SchedulerMixin from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->SCM class SCMSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None class SCMScheduler(SchedulerMixin, ConfigMixin): """ `SCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with non-Markovian guidance. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. prediction_type (`str`, defaults to `trigflow`): Prediction type of the scheduler function. Currently only supports "trigflow". sigma_data (`float`, defaults to 0.5): The standard deviation of the noise added during multi-step inference. """ # _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, prediction_type: str = "trigflow", sigma_data: float = 0.5, ): """ Initialize the SCM scheduler. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. prediction_type (`str`, defaults to `trigflow`): Prediction type of the scheduler function. Currently only supports "trigflow". sigma_data (`float`, defaults to 0.5): The standard deviation of the noise added during multi-step inference. """ # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) self._step_index = None self._begin_index = None @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`): The begin index for the scheduler. """ self._begin_index = begin_index def set_timesteps( self, num_inference_steps: int, timesteps: torch.Tensor = None, device: Union[str, torch.device] = None, max_timesteps: float = 1.57080, intermediate_timesteps: float = 1.3, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. timesteps (`torch.Tensor`, *optional*): Custom timesteps to use for the denoising process. max_timesteps (`float`, defaults to 1.57080): The maximum timestep value used in the SCM scheduler. intermediate_timesteps (`float`, *optional*, defaults to 1.3): The intermediate timestep value used in SCM scheduler (only used when num_inference_steps=2). """ if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) if timesteps is not None and len(timesteps) != num_inference_steps + 1: raise ValueError("If providing custom timesteps, `timesteps` must be of length `num_inference_steps + 1`.") if timesteps is not None and max_timesteps is not None: raise ValueError("If providing custom timesteps, `max_timesteps` should not be provided.") if timesteps is None and max_timesteps is None: raise ValueError("Should provide either `timesteps` or `max_timesteps`.") if intermediate_timesteps is not None and num_inference_steps != 2: raise ValueError("Intermediate timesteps for SCM is not supported when num_inference_steps != 2.") self.num_inference_steps = num_inference_steps if timesteps is not None: if isinstance(timesteps, list): self.timesteps = torch.tensor(timesteps, device=device).float() elif isinstance(timesteps, torch.Tensor): self.timesteps = timesteps.to(device).float() else: raise ValueError(f"Unsupported timesteps type: {type(timesteps)}") elif intermediate_timesteps is not None: self.timesteps = torch.tensor([max_timesteps, intermediate_timesteps, 0], device=device).float() else: # max_timesteps=arctan(80/0.5)=1.56454 is the default from sCM paper, we choose a different value here self.timesteps = torch.linspace(max_timesteps, 0, num_inference_steps + 1, device=device).float() self._step_index = None self._begin_index = None # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index def _init_step_index(self, timestep): if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def step( self, model_output: torch.FloatTensor, timestep: float, sample: torch.FloatTensor, generator: torch.Generator = None, return_dict: bool = True, ) -> Union[SCMSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`float`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_scm.SCMSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.SCMSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_scm.SCMSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if self.step_index is None: self._init_step_index(timestep) # 2. compute alphas, betas t = self.timesteps[self.step_index + 1] s = self.timesteps[self.step_index] # 4. Different Parameterization: parameterization = self.config.prediction_type if parameterization == "trigflow": pred_x0 = torch.cos(s) * sample - torch.sin(s) * model_output else: raise ValueError(f"Unsupported parameterization: {parameterization}") # 5. Sample z ~ N(0, I), For MultiStep Inference # Noise is not used for one-step sampling. if len(self.timesteps) > 1: noise = ( randn_tensor(model_output.shape, device=model_output.device, generator=generator) * self.config.sigma_data ) prev_sample = torch.cos(t) * pred_x0 + torch.sin(t) * noise else: prev_sample = pred_x0 self._step_index += 1 if not return_dict: return (prev_sample, pred_x0) return SCMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_x0) def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_scm.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_scm.py", "repo_id": "diffusers", "token_count": 4597 }
176
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class FlaxStableDiffusionControlNetPipeline(metaclass=DummyObject): _backends = ["flax", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) class FlaxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): _backends = ["flax", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) class FlaxStableDiffusionInpaintPipeline(metaclass=DummyObject): _backends = ["flax", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) class FlaxStableDiffusionPipeline(metaclass=DummyObject): _backends = ["flax", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) class FlaxStableDiffusionXLPipeline(metaclass=DummyObject): _backends = ["flax", "transformers"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax", "transformers"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["flax", "transformers"])
diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py", "repo_id": "diffusers", "token_count": 957 }
177
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class SpectrogramDiffusionPipeline(metaclass=DummyObject): _backends = ["transformers", "torch", "note_seq"] def __init__(self, *args, **kwargs): requires_backends(self, ["transformers", "torch", "note_seq"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["transformers", "torch", "note_seq"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["transformers", "torch", "note_seq"])
diffusers/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py", "repo_id": "diffusers", "token_count": 236 }
178
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Typing utilities: Utilities related to type checking and validation """ from typing import Any, Dict, List, Set, Tuple, Type, Union, get_args, get_origin def _is_valid_type(obj: Any, class_or_tuple: Union[Type, Tuple[Type, ...]]) -> bool: """ Checks if an object is an instance of any of the provided types. For collections, it checks if every element is of the correct type as well. """ if not isinstance(class_or_tuple, tuple): class_or_tuple = (class_or_tuple,) # Unpack unions unpacked_class_or_tuple = [] for t in class_or_tuple: if get_origin(t) is Union: unpacked_class_or_tuple.extend(get_args(t)) else: unpacked_class_or_tuple.append(t) class_or_tuple = tuple(unpacked_class_or_tuple) if Any in class_or_tuple: return True obj_type = type(obj) # Classes with obj's type class_or_tuple = {t for t in class_or_tuple if isinstance(obj, get_origin(t) or t)} # Singular types (e.g. int, ControlNet, ...) # Untyped collections (e.g. List, but not List[int]) elem_class_or_tuple = {get_args(t) for t in class_or_tuple} if () in elem_class_or_tuple: return True # Typed lists or sets elif obj_type in (list, set): return any(all(_is_valid_type(x, t) for x in obj) for t in elem_class_or_tuple) # Typed tuples elif obj_type is tuple: return any( # Tuples with any length and single type (e.g. Tuple[int, ...]) (len(t) == 2 and t[-1] is Ellipsis and all(_is_valid_type(x, t[0]) for x in obj)) or # Tuples with fixed length and any types (e.g. Tuple[int, str]) (len(obj) == len(t) and all(_is_valid_type(x, tt) for x, tt in zip(obj, t))) for t in elem_class_or_tuple ) # Typed dicts elif obj_type is dict: return any( all(_is_valid_type(k, kt) and _is_valid_type(v, vt) for k, v in obj.items()) for kt, vt in elem_class_or_tuple ) else: return False def _get_detailed_type(obj: Any) -> Type: """ Gets a detailed type for an object, including nested types for collections. """ obj_type = type(obj) if obj_type in (list, set): obj_origin_type = List if obj_type is list else Set elems_type = Union[tuple({_get_detailed_type(x) for x in obj})] return obj_origin_type[elems_type] elif obj_type is tuple: return Tuple[tuple(_get_detailed_type(x) for x in obj)] elif obj_type is dict: keys_type = Union[tuple({_get_detailed_type(k) for k in obj.keys()})] values_type = Union[tuple({_get_detailed_type(k) for k in obj.values()})] return Dict[keys_type, values_type] else: return obj_type
diffusers/src/diffusers/utils/typing_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/typing_utils.py", "repo_id": "diffusers", "token_count": 1404 }
179
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import numpy as np import pytest import torch from transformers import AutoTokenizer, GemmaForCausalLM from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Lumina2Pipeline, Lumina2Transformer2DModel, ) from diffusers.utils.testing_utils import floats_tensor, is_torch_version, require_peft_backend, skip_mps, torch_device sys.path.append(".") from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 @require_peft_backend class Lumina2LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = Lumina2Pipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_classes = [FlowMatchEulerDiscreteScheduler] scheduler_kwargs = {} transformer_kwargs = { "sample_size": 4, "patch_size": 2, "in_channels": 4, "hidden_size": 8, "num_layers": 2, "num_attention_heads": 1, "num_kv_heads": 1, "multiple_of": 16, "ffn_dim_multiplier": None, "norm_eps": 1e-5, "scaling_factor": 1.0, "axes_dim_rope": [4, 2, 2], "cap_feat_dim": 8, } transformer_cls = Lumina2Transformer2DModel vae_kwargs = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "block_out_channels": (4,), "layers_per_block": 1, "latent_channels": 4, "norm_num_groups": 1, "use_quant_conv": False, "use_post_quant_conv": False, "shift_factor": 0.0609, "scaling_factor": 1.5035, } vae_cls = AutoencoderKL tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/dummy-gemma" text_encoder_cls, text_encoder_id = GemmaForCausalLM, "hf-internal-testing/dummy-gemma-diffusers" @property def output_shape(self): return (1, 4, 4, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 16 num_channels = 4 sizes = (32, 32) generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "A painting of a squirrel eating a burger", "num_inference_steps": 2, "guidance_scale": 5.0, "height": 32, "width": 32, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs @unittest.skip("Not supported in Lumina2.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in Lumina2.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in Lumina2.") def test_modify_padding_mode(self): pass @unittest.skip("Text encoder LoRA is not supported in Lumina2.") def test_simple_inference_with_partial_text_lora(self): pass @unittest.skip("Text encoder LoRA is not supported in Lumina2.") def test_simple_inference_with_text_lora(self): pass @unittest.skip("Text encoder LoRA is not supported in Lumina2.") def test_simple_inference_with_text_lora_and_scale(self): pass @unittest.skip("Text encoder LoRA is not supported in Lumina2.") def test_simple_inference_with_text_lora_fused(self): pass @unittest.skip("Text encoder LoRA is not supported in Lumina2.") def test_simple_inference_with_text_lora_save_load(self): pass @skip_mps @pytest.mark.xfail( condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"), reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.", strict=False, ) def test_lora_fuse_nan(self): for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") # corrupt one LoRA weight with `inf` values with torch.no_grad(): pipe.transformer.layers[0].attn.to_q.lora_A["adapter-1"].weight += float("inf") # with `safe_fusing=True` we should see an Error with self.assertRaises(ValueError): pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True) # without we should not see an error, but every image will be black pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) out = pipe(**inputs)[0] self.assertTrue(np.isnan(out).all())
diffusers/tests/lora/test_lora_layers_lumina2.py/0
{ "file_path": "diffusers/tests/lora/test_lora_layers_lumina2.py", "repo_id": "diffusers", "token_count": 2733 }
180
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_hf_numpy, require_torch_accelerator, require_torch_accelerator_with_fp16, require_torch_gpu, skip_mps, slow, torch_all_close, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class AutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AutoencoderKL main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_kl_config(self, block_out_channels=None, norm_num_groups=None): block_out_channels = block_out_channels or [2, 4] norm_num_groups = norm_num_groups or 2 init_dict = { "block_out_channels": block_out_channels, "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), "latent_channels": 4, "norm_num_groups": norm_num_groups, } return init_dict @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_kl_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_enable_disable_tiling(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) torch.manual_seed(0) output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] torch.manual_seed(0) model.enable_tiling() output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertLess( (output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(), 0.5, "VAE tiling should not affect the inference results", ) torch.manual_seed(0) model.disable_tiling() output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertEqual( output_without_tiling.detach().cpu().numpy().all(), output_without_tiling_2.detach().cpu().numpy().all(), "Without tiling outputs should match with the outputs when tiling is manually disabled.", ) def test_enable_disable_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) torch.manual_seed(0) output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] torch.manual_seed(0) model.enable_slicing() output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertLess( (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), 0.5, "VAE slicing should not affect the inference results", ) torch.manual_seed(0) model.disable_slicing() output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertEqual( output_without_slicing.detach().cpu().numpy().all(), output_without_slicing_2.detach().cpu().numpy().all(), "Without slicing outputs should match with the outputs when slicing is manually disabled.", ) def test_gradient_checkpointing_is_applied(self): expected_set = {"Decoder", "Encoder", "UNetMidBlock2D"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) def test_from_pretrained_hub(self): model, loading_info = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def test_output_pretrained(self): model = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy") model = model.to(torch_device) model.eval() # Keep generator on CPU for non-CUDA devices to compare outputs with CPU result tensors generator_device = "cpu" if not torch_device.startswith(torch_device) else torch_device if torch_device != "mps": generator = torch.Generator(device=generator_device).manual_seed(0) else: generator = torch.manual_seed(0) image = torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0), ) image = image.to(torch_device) with torch.no_grad(): output = model(image, sample_posterior=True, generator=generator).sample output_slice = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": expected_output_slice = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif generator_device == "cpu": expected_output_slice = torch.tensor( [ -0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026, ] ) else: expected_output_slice = torch.tensor( [ -0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485, ] ) self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) @slow class AutoencoderKLIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_sd_vae_model(self, model_id="CompVis/stable-diffusion-v1-4", fp16=False): revision = "fp16" if fp16 else None torch_dtype = torch.float16 if fp16 else torch.float32 model = AutoencoderKL.from_pretrained( model_id, subfolder="vae", torch_dtype=torch_dtype, revision=revision, ) model.to(torch_device) return model def get_generator(self, seed=0): generator_device = "cpu" if not torch_device.startswith(torch_device) else torch_device if torch_device != "mps": return torch.Generator(device=generator_device).manual_seed(seed) return torch.manual_seed(seed) @parameterized.expand( [ # fmt: off [ 33, [-0.1556, 0.9848, -0.0410, -0.0642, -0.2685, 0.8381, -0.2004, -0.0700], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824], ], [ 47, [-0.2376, 0.1200, 0.1337, -0.4830, -0.2504, -0.0759, -0.0486, -0.4077], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131], ], # fmt: on ] ) def test_stable_diffusion(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): sample = model(image, generator=generator, sample_posterior=True).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_stable_diffusion_fp16(self, seed, expected_slice): model = self.get_sd_vae_model(fp16=True) image = self.get_sd_image(seed, fp16=True) generator = self.get_generator(seed) with torch.no_grad(): sample = model(image, generator=generator, sample_posterior=True).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-2) @parameterized.expand( [ # fmt: off [ 33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824], ], [ 47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131], ], # fmt: on ] ) def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) with torch.no_grad(): sample = model(image).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_accelerator @skip_mps def test_stable_diffusion_decode(self, seed, expected_slice): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] output_slice = sample[-1, -2:, :2, -2:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_stable_diffusion_decode_fp16(self, seed, expected_slice): model = self.get_sd_vae_model(fp16=True) encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True) with torch.no_grad(): sample = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand([(13,), (16,), (27,)]) @require_torch_gpu @unittest.skipIf( not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.", ) def test_stable_diffusion_decode_xformers_vs_2_0_fp16(self, seed): model = self.get_sd_vae_model(fp16=True) encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True) with torch.no_grad(): sample = model.decode(encoding).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): sample_2 = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] assert torch_all_close(sample, sample_2, atol=1e-1) @parameterized.expand([(13,), (16,), (37,)]) @require_torch_gpu @unittest.skipIf( not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.", ) def test_stable_diffusion_decode_xformers_vs_2_0(self, seed): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): sample_2 = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] assert torch_all_close(sample, sample_2, atol=1e-2) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def test_stable_diffusion_encode_sample(self, seed, expected_slice): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): dist = model.encode(image).latent_dist sample = dist.sample(generator=generator) assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] output_slice = sample[0, -1, -3:, -3:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) tolerance = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(output_slice, expected_output_slice, atol=tolerance)
diffusers/tests/models/autoencoders/test_models_autoencoder_kl.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_autoencoder_kl.py", "repo_id": "diffusers", "token_count": 8228 }
181
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import glob import inspect import json import os import re import tempfile import traceback import unittest import unittest.mock as mock import uuid import warnings from collections import defaultdict from typing import Dict, List, Optional, Tuple, Union import numpy as np import pytest import requests_mock import safetensors.torch import torch import torch.nn as nn from accelerate.utils.modeling import _get_proper_dtype, compute_module_sizes, dtype_byte_size from huggingface_hub import ModelCard, delete_repo, snapshot_download, try_to_load_from_cache from huggingface_hub.utils import is_jinja_available from parameterized import parameterized from requests.exceptions import HTTPError from diffusers.models import FluxTransformer2DModel, SD3Transformer2DModel, UNet2DConditionModel from diffusers.models.attention_processor import ( AttnProcessor, AttnProcessor2_0, AttnProcessorNPU, XFormersAttnProcessor, ) from diffusers.models.auto_model import AutoModel from diffusers.training_utils import EMAModel from diffusers.utils import ( SAFE_WEIGHTS_INDEX_NAME, WEIGHTS_INDEX_NAME, is_peft_available, is_torch_npu_available, is_xformers_available, logging, ) from diffusers.utils.hub_utils import _add_variant from diffusers.utils.testing_utils import ( CaptureLogger, _check_safetensors_serialization, backend_empty_cache, backend_max_memory_allocated, backend_reset_peak_memory_stats, backend_synchronize, check_if_dicts_are_equal, get_python_version, is_torch_compile, numpy_cosine_similarity_distance, require_peft_backend, require_peft_version_greater, require_torch_2, require_torch_accelerator, require_torch_accelerator_with_training, require_torch_multi_accelerator, require_torch_version_greater, run_test_in_subprocess, slow, torch_all_close, torch_device, ) from diffusers.utils.torch_utils import get_torch_cuda_device_capability from ..others.test_utils import TOKEN, USER, is_staging_test if is_peft_available(): from peft.tuners.tuners_utils import BaseTunerLayer def caculate_expected_num_shards(index_map_path): with open(index_map_path) as f: weight_map_dict = json.load(f)["weight_map"] first_key = list(weight_map_dict.keys())[0] weight_loc = weight_map_dict[first_key] # e.g., diffusion_pytorch_model-00001-of-00002.safetensors expected_num_shards = int(weight_loc.split("-")[-1].split(".")[0]) return expected_num_shards def check_if_lora_correctly_set(model) -> bool: """ Checks if the LoRA layers are correctly set with peft """ for module in model.modules(): if isinstance(module, BaseTunerLayer): return True return False # Will be run via run_test_in_subprocess def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout): error = None try: init_dict, model_class = in_queue.get(timeout=timeout) model = model_class(**init_dict) model.to(torch_device) model = torch.compile(model) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = model_class.from_pretrained(tmpdirname) new_model.to(torch_device) assert new_model.__class__ == model_class except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def named_persistent_module_tensors( module: nn.Module, recurse: bool = False, ): """ A helper function that gathers all the tensors (parameters + persistent buffers) of a given module. Args: module (`torch.nn.Module`): The module we want the tensors on. recurse (`bool`, *optional`, defaults to `False`): Whether or not to go look in every submodule or just return the direct parameters and buffers. """ yield from module.named_parameters(recurse=recurse) for named_buffer in module.named_buffers(recurse=recurse): name, _ = named_buffer # Get parent by splitting on dots and traversing the model parent = module if "." in name: parent_name = name.rsplit(".", 1)[0] for part in parent_name.split("."): parent = getattr(parent, part) name = name.split(".")[-1] if name not in parent._non_persistent_buffers_set: yield named_buffer def compute_module_persistent_sizes( model: nn.Module, dtype: Optional[Union[str, torch.device]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, ): """ Compute the size of each submodule of a given model (parameters + persistent buffers). """ if dtype is not None: dtype = _get_proper_dtype(dtype) dtype_size = dtype_byte_size(dtype) if special_dtypes is not None: special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()} special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()} module_sizes = defaultdict(int) module_list = [] module_list = named_persistent_module_tensors(model, recurse=True) for name, tensor in module_list: if special_dtypes is not None and name in special_dtypes: size = tensor.numel() * special_dtypes_size[name] elif dtype is None: size = tensor.numel() * dtype_byte_size(tensor.dtype) elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): # According to the code in set_module_tensor_to_device, these types won't be converted # so use their original size here size = tensor.numel() * dtype_byte_size(tensor.dtype) else: size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype)) name_parts = name.split(".") for idx in range(len(name_parts) + 1): module_sizes[".".join(name_parts[:idx])] += size return module_sizes def cast_maybe_tensor_dtype(maybe_tensor, current_dtype, target_dtype): if torch.is_tensor(maybe_tensor): return maybe_tensor.to(target_dtype) if maybe_tensor.dtype == current_dtype else maybe_tensor if isinstance(maybe_tensor, dict): return {k: cast_maybe_tensor_dtype(v, current_dtype, target_dtype) for k, v in maybe_tensor.items()} if isinstance(maybe_tensor, list): return [cast_maybe_tensor_dtype(v, current_dtype, target_dtype) for v in maybe_tensor] return maybe_tensor class ModelUtilsTest(unittest.TestCase): def tearDown(self): super().tearDown() def test_missing_key_loading_warning_message(self): with self.assertLogs("diffusers.models.modeling_utils", level="WARNING") as logs: UNet2DConditionModel.from_pretrained("hf-internal-testing/stable-diffusion-broken", subfolder="unet") # make sure that error message states what keys are missing assert "conv_out.bias" in " ".join(logs.output) @parameterized.expand( [ ("hf-internal-testing/tiny-stable-diffusion-pipe-variants-all-kinds", "unet", False), ("hf-internal-testing/tiny-stable-diffusion-pipe-variants-all-kinds", "unet", True), ("hf-internal-testing/tiny-sd-unet-with-sharded-ckpt", None, False), ("hf-internal-testing/tiny-sd-unet-with-sharded-ckpt", None, True), ] ) def test_variant_sharded_ckpt_legacy_format_raises_warning(self, repo_id, subfolder, use_local): def load_model(path): kwargs = {"variant": "fp16"} if subfolder: kwargs["subfolder"] = subfolder return UNet2DConditionModel.from_pretrained(path, **kwargs) with self.assertWarns(FutureWarning) as warning: if use_local: with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = snapshot_download(repo_id=repo_id) _ = load_model(tmpdirname) else: _ = load_model(repo_id) warning_message = str(warning.warnings[0].message) self.assertIn("This serialization format is now deprecated to standardize the serialization", warning_message) # Local tests are already covered down below. @parameterized.expand( [ ("hf-internal-testing/tiny-sd-unet-sharded-latest-format", None, "fp16"), ("hf-internal-testing/tiny-sd-unet-sharded-latest-format-subfolder", "unet", "fp16"), ("hf-internal-testing/tiny-sd-unet-sharded-no-variants", None, None), ("hf-internal-testing/tiny-sd-unet-sharded-no-variants-subfolder", "unet", None), ] ) def test_variant_sharded_ckpt_loads_from_hub(self, repo_id, subfolder, variant=None): def load_model(): kwargs = {} if variant: kwargs["variant"] = variant if subfolder: kwargs["subfolder"] = subfolder return UNet2DConditionModel.from_pretrained(repo_id, **kwargs) assert load_model() def test_cached_files_are_used_when_no_internet(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. orig_model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.request", return_value=response_mock): # Download this model to make sure it's in the cache. model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", local_files_only=True ) for p1, p2 in zip(orig_model.parameters(), model.parameters()): if p1.data.ne(p2.data).sum() > 0: assert False, "Parameters not the same!" def test_local_files_only_with_sharded_checkpoint(self): repo_id = "hf-internal-testing/tiny-flux-sharded" error_response = mock.Mock( status_code=500, headers={}, raise_for_status=mock.Mock(side_effect=HTTPError), json=mock.Mock(return_value={}), ) with tempfile.TemporaryDirectory() as tmpdir: model = FluxTransformer2DModel.from_pretrained(repo_id, subfolder="transformer", cache_dir=tmpdir) with mock.patch("requests.Session.get", return_value=error_response): # Should fail with local_files_only=False (network required) # We would make a network call with model_info with self.assertRaises(OSError): FluxTransformer2DModel.from_pretrained( repo_id, subfolder="transformer", cache_dir=tmpdir, local_files_only=False ) # Should succeed with local_files_only=True (uses cache) # model_info call skipped local_model = FluxTransformer2DModel.from_pretrained( repo_id, subfolder="transformer", cache_dir=tmpdir, local_files_only=True ) assert all(torch.equal(p1, p2) for p1, p2 in zip(model.parameters(), local_model.parameters())), ( "Model parameters don't match!" ) # Remove a shard file cached_shard_file = try_to_load_from_cache( repo_id, filename="transformer/diffusion_pytorch_model-00001-of-00002.safetensors", cache_dir=tmpdir ) os.remove(cached_shard_file) # Attempting to load from cache should raise an error with self.assertRaises(OSError) as context: FluxTransformer2DModel.from_pretrained( repo_id, subfolder="transformer", cache_dir=tmpdir, local_files_only=True ) # Verify error mentions the missing shard error_msg = str(context.exception) assert cached_shard_file in error_msg or "required according to the checkpoint index" in error_msg, ( f"Expected error about missing shard, got: {error_msg}" ) @unittest.skip("Flaky behaviour on CI. Re-enable after migrating to new runners") @unittest.skipIf(torch_device == "mps", reason="Test not supported for MPS.") def test_one_request_upon_cached(self): use_safetensors = False with tempfile.TemporaryDirectory() as tmpdirname: with requests_mock.mock(real_http=True) as m: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) download_requests = [r.method for r in m.request_history] assert download_requests.count("HEAD") == 3, ( "3 HEAD requests one for config, one for model, and one for shard index file." ) assert download_requests.count("GET") == 2, "2 GET requests one for config, one for model" with requests_mock.mock(real_http=True) as m: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) cache_requests = [r.method for r in m.request_history] assert "HEAD" == cache_requests[0] and len(cache_requests) == 2, ( "We should call only `model_info` to check for commit hash and knowing if shard index is present." ) def test_weight_overwrite(self): with tempfile.TemporaryDirectory() as tmpdirname, self.assertRaises(ValueError) as error_context: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, in_channels=9, ) # make sure that error message states what keys are missing assert "Cannot load" in str(error_context.exception) with tempfile.TemporaryDirectory() as tmpdirname: model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True, ) assert model.config.in_channels == 9 @require_torch_accelerator def test_keep_modules_in_fp32(self): r""" A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32 when we load the model in fp16/bf16 Also ensures if inference works. """ fp32_modules = SD3Transformer2DModel._keep_in_fp32_modules for torch_dtype in [torch.bfloat16, torch.float16]: SD3Transformer2DModel._keep_in_fp32_modules = ["proj_out"] model = SD3Transformer2DModel.from_pretrained( "hf-internal-testing/tiny-sd3-pipe", subfolder="transformer", torch_dtype=torch_dtype ).to(torch_device) for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): if name in model._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.float32) else: self.assertTrue(module.weight.dtype == torch_dtype) def get_dummy_inputs(): batch_size = 2 num_channels = 4 height = width = embedding_dim = 32 pooled_embedding_dim = embedding_dim * 2 sequence_length = 154 hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) pooled_prompt_embeds = torch.randn((batch_size, pooled_embedding_dim)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_prompt_embeds, "timestep": timestep, } # test if inference works. with torch.no_grad() and torch.amp.autocast(torch_device, dtype=torch_dtype): input_dict_for_transformer = get_dummy_inputs() model_inputs = { k: v.to(device=torch_device) for k, v in input_dict_for_transformer.items() if not isinstance(v, bool) } model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) _ = model(**model_inputs) SD3Transformer2DModel._keep_in_fp32_modules = fp32_modules class UNetTesterMixin: def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["block_out_channels"] = (16, 32) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") class ModelTesterMixin: main_input_name = None # overwrite in model specific tester class base_precision = 1e-3 forward_requires_fresh_args = False model_split_percents = [0.5, 0.7, 0.9] uses_custom_attn_processor = False def check_device_map_is_respected(self, model, device_map): for param_name, param in model.named_parameters(): # Find device in device_map while len(param_name) > 0 and param_name not in device_map: param_name = ".".join(param_name.split(".")[:-1]) if param_name not in device_map: raise ValueError("device map is incomplete, it does not contain any device for `param_name`.") param_device = device_map[param_name] if param_device in ["cpu", "disk"]: self.assertEqual(param.device, torch.device("meta")) else: self.assertEqual(param.device, torch.device(param_device)) def test_from_save_pretrained(self, expected_max_diff=5e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname) if hasattr(new_model, "set_default_attn_processor"): new_model.set_default_attn_processor() new_model.to(torch_device) with torch.no_grad(): if self.forward_requires_fresh_args: image = model(**self.inputs_dict(0)) else: image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] if self.forward_requires_fresh_args: new_image = new_model(**self.inputs_dict(0)) else: new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") def test_getattr_is_correct(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) # save some things to test model.dummy_attribute = 5 model.register_to_config(test_attribute=5) logger = logging.get_logger("diffusers.models.modeling_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: assert hasattr(model, "dummy_attribute") assert getattr(model, "dummy_attribute") == 5 assert model.dummy_attribute == 5 # no warning should be thrown assert cap_logger.out == "" logger = logging.get_logger("diffusers.models.modeling_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: assert hasattr(model, "save_pretrained") fn = model.save_pretrained fn_1 = getattr(model, "save_pretrained") assert fn == fn_1 # no warning should be thrown assert cap_logger.out == "" # warning should be thrown with self.assertWarns(FutureWarning): assert model.test_attribute == 5 with self.assertWarns(FutureWarning): assert getattr(model, "test_attribute") == 5 with self.assertRaises(AttributeError) as error: model.does_not_exist assert str(error.exception) == f"'{type(model).__name__}' object has no attribute 'does_not_exist'" @unittest.skipIf( torch_device != "npu" or not is_torch_npu_available(), reason="torch npu flash attention is only available with NPU and `torch_npu` installed", ) def test_set_torch_npu_flash_attn_processor_determinism(self): torch.use_deterministic_algorithms(False) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) if not hasattr(model, "set_attn_processor"): # If not has `set_attn_processor`, skip test return model.set_default_attn_processor() assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output = model(**self.inputs_dict(0))[0] else: output = model(**inputs_dict)[0] model.enable_npu_flash_attention() assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_2 = model(**self.inputs_dict(0))[0] else: output_2 = model(**inputs_dict)[0] model.set_attn_processor(AttnProcessorNPU()) assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_3 = model(**self.inputs_dict(0))[0] else: output_3 = model(**inputs_dict)[0] torch.use_deterministic_algorithms(True) assert torch.allclose(output, output_2, atol=self.base_precision) assert torch.allclose(output, output_3, atol=self.base_precision) assert torch.allclose(output_2, output_3, atol=self.base_precision) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_set_xformers_attn_processor_for_determinism(self): torch.use_deterministic_algorithms(False) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) if not hasattr(model, "set_attn_processor"): # If not has `set_attn_processor`, skip test return if not hasattr(model, "set_default_attn_processor"): # If not has `set_attn_processor`, skip test return model.set_default_attn_processor() assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output = model(**self.inputs_dict(0))[0] else: output = model(**inputs_dict)[0] model.enable_xformers_memory_efficient_attention() assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_2 = model(**self.inputs_dict(0))[0] else: output_2 = model(**inputs_dict)[0] model.set_attn_processor(XFormersAttnProcessor()) assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_3 = model(**self.inputs_dict(0))[0] else: output_3 = model(**inputs_dict)[0] torch.use_deterministic_algorithms(True) assert torch.allclose(output, output_2, atol=self.base_precision) assert torch.allclose(output, output_3, atol=self.base_precision) assert torch.allclose(output_2, output_3, atol=self.base_precision) @require_torch_accelerator def test_set_attn_processor_for_determinism(self): if self.uses_custom_attn_processor: return torch.use_deterministic_algorithms(False) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) if not hasattr(model, "set_attn_processor"): # If not has `set_attn_processor`, skip test return assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_1 = model(**self.inputs_dict(0))[0] else: output_1 = model(**inputs_dict)[0] model.set_default_attn_processor() assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_2 = model(**self.inputs_dict(0))[0] else: output_2 = model(**inputs_dict)[0] model.set_attn_processor(AttnProcessor2_0()) assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_4 = model(**self.inputs_dict(0))[0] else: output_4 = model(**inputs_dict)[0] model.set_attn_processor(AttnProcessor()) assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_5 = model(**self.inputs_dict(0))[0] else: output_5 = model(**inputs_dict)[0] torch.use_deterministic_algorithms(True) # make sure that outputs match assert torch.allclose(output_2, output_1, atol=self.base_precision) assert torch.allclose(output_2, output_4, atol=self.base_precision) assert torch.allclose(output_2, output_5, atol=self.base_precision) def test_from_save_pretrained_variant(self, expected_max_diff=5e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16") if hasattr(new_model, "set_default_attn_processor"): new_model.set_default_attn_processor() # non-variant cannot be loaded with self.assertRaises(OSError) as error_context: self.model_class.from_pretrained(tmpdirname) # make sure that error message states what keys are missing assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception) new_model.to(torch_device) with torch.no_grad(): if self.forward_requires_fresh_args: image = model(**self.inputs_dict(0)) else: image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] if self.forward_requires_fresh_args: new_image = new_model(**self.inputs_dict(0)) else: new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") @is_torch_compile @require_torch_2 @unittest.skipIf( get_python_version == (3, 12), reason="Torch Dynamo isn't yet supported for Python 3.12.", ) def test_from_save_pretrained_dynamo(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() inputs = [init_dict, self.model_class] run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=inputs) def test_from_save_pretrained_dtype(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() for dtype in [torch.float32, torch.float16, torch.bfloat16]: if torch_device == "mps" and dtype == torch.bfloat16: continue with tempfile.TemporaryDirectory() as tmpdirname: model.to(dtype) model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=True, torch_dtype=dtype) assert new_model.dtype == dtype if ( hasattr(self.model_class, "_keep_in_fp32_modules") and self.model_class._keep_in_fp32_modules is None ): new_model = self.model_class.from_pretrained( tmpdirname, low_cpu_mem_usage=False, torch_dtype=dtype ) assert new_model.dtype == dtype def test_determinism(self, expected_max_diff=1e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): if self.forward_requires_fresh_args: first = model(**self.inputs_dict(0)) else: first = model(**inputs_dict) if isinstance(first, dict): first = first.to_tuple()[0] if self.forward_requires_fresh_args: second = model(**self.inputs_dict(0)) else: second = model(**inputs_dict) if isinstance(second, dict): second = second.to_tuple()[0] out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, expected_max_diff) def test_output(self, expected_output_shape=None): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) # input & output have to have the same shape input_tensor = inputs_dict[self.main_input_name] if expected_output_shape is None: expected_shape = input_tensor.shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") else: self.assertEqual(output.shape, expected_output_shape, "Input and output shapes do not match") def test_model_from_pretrained(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() # test if the model can be loaded from the config # and has all the expected shape with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname) new_model.to(torch_device) new_model.eval() # check if all parameters shape are the same for param_name in model.state_dict().keys(): param_1 = model.state_dict()[param_name] param_2 = new_model.state_dict()[param_name] self.assertEqual(param_1.shape, param_2.shape) with torch.no_grad(): output_1 = model(**inputs_dict) if isinstance(output_1, dict): output_1 = output_1.to_tuple()[0] output_2 = new_model(**inputs_dict) if isinstance(output_2, dict): output_2 = output_2.to_tuple()[0] self.assertEqual(output_1.shape, output_2.shape) @require_torch_accelerator_with_training def test_training(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.train() output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] input_tensor = inputs_dict[self.main_input_name] noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) loss = torch.nn.functional.mse_loss(output, noise) loss.backward() @require_torch_accelerator_with_training def test_ema_training(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.train() ema_model = EMAModel(model.parameters()) output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] input_tensor = inputs_dict[self.main_input_name] noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) loss = torch.nn.functional.mse_loss(output, noise) loss.backward() ema_model.step(model.parameters()) def test_outputs_equivalence(self): def set_nan_tensor_to_zero(t): # Temporary fallback until `aten::_index_put_impl_` is implemented in mps # Track progress in https://github.com/pytorch/pytorch/issues/77764 device = t.device if device.type == "mps": t = t.to("cpu") t[t != t] = 0 return t.to(device) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): if self.forward_requires_fresh_args: outputs_dict = model(**self.inputs_dict(0)) outputs_tuple = model(**self.inputs_dict(0), return_dict=False) else: outputs_dict = model(**inputs_dict) outputs_tuple = model(**inputs_dict, return_dict=False) recursive_check(outputs_tuple, outputs_dict) @require_torch_accelerator_with_training def test_enable_disable_gradient_checkpointing(self): # Skip test if model does not support gradient checkpointing if not self.model_class._supports_gradient_checkpointing: pytest.skip("Gradient checkpointing is not supported.") init_dict, _ = self.prepare_init_args_and_inputs_for_common() # at init model should have gradient checkpointing disabled model = self.model_class(**init_dict) self.assertFalse(model.is_gradient_checkpointing) # check enable works model.enable_gradient_checkpointing() self.assertTrue(model.is_gradient_checkpointing) # check disable works model.disable_gradient_checkpointing() self.assertFalse(model.is_gradient_checkpointing) @require_torch_accelerator_with_training def test_effective_gradient_checkpointing(self, loss_tolerance=1e-5, param_grad_tol=5e-5, skip: set[str] = {}): # Skip test if model does not support gradient checkpointing if not self.model_class._supports_gradient_checkpointing: pytest.skip("Gradient checkpointing is not supported.") # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() inputs_dict_copy = copy.deepcopy(inputs_dict) torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) assert not model.is_gradient_checkpointing and model.training out = model(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() labels = torch.randn_like(out) loss = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing torch.manual_seed(0) model_2 = self.model_class(**init_dict) # clone model model_2.load_state_dict(model.state_dict()) model_2.to(torch_device) model_2.enable_gradient_checkpointing() assert model_2.is_gradient_checkpointing and model_2.training out_2 = model_2(**inputs_dict_copy).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_2.zero_grad() loss_2 = (out_2 - labels).mean() loss_2.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_2).abs() < loss_tolerance) named_params = dict(model.named_parameters()) named_params_2 = dict(model_2.named_parameters()) for name, param in named_params.items(): if "post_quant_conv" in name: continue if name in skip: continue # TODO(aryan): remove the below lines after looking into easyanimate transformer a little more # It currently errors out the gradient checkpointing test because the gradients for attn2.to_out is None if param.grad is None: continue self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=param_grad_tol)) @unittest.skipIf(torch_device == "mps", "This test is not supported for MPS devices.") def test_gradient_checkpointing_is_applied( self, expected_set=None, attention_head_dim=None, num_attention_heads=None, block_out_channels=None ): # Skip test if model does not support gradient checkpointing if not self.model_class._supports_gradient_checkpointing: pytest.skip("Gradient checkpointing is not supported.") init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() if attention_head_dim is not None: init_dict["attention_head_dim"] = attention_head_dim if num_attention_heads is not None: init_dict["num_attention_heads"] = num_attention_heads if block_out_channels is not None: init_dict["block_out_channels"] = block_out_channels model_class_copy = copy.copy(self.model_class) model = model_class_copy(**init_dict) model.enable_gradient_checkpointing() modules_with_gc_enabled = {} for submodule in model.modules(): if hasattr(submodule, "gradient_checkpointing"): self.assertTrue(submodule.gradient_checkpointing) modules_with_gc_enabled[submodule.__class__.__name__] = True assert set(modules_with_gc_enabled.keys()) == expected_set assert all(modules_with_gc_enabled.values()), "All modules should be enabled" def test_deprecated_kwargs(self): has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0 if has_kwarg_in_model_class and not has_deprecated_kwarg: raise ValueError( f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are" " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" " [<deprecated_argument>]`" ) if not has_kwarg_in_model_class and has_deprecated_kwarg: raise ValueError( f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to" f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument" " from `_deprecated_kwargs = [<deprecated_argument>]`" ) @parameterized.expand([(4, 4, True), (4, 8, False), (8, 4, False)]) @torch.no_grad() @unittest.skipIf(not is_peft_available(), "Only with PEFT") def test_save_load_lora_adapter(self, rank, lora_alpha, use_dora=False): from peft import LoraConfig from peft.utils import get_peft_model_state_dict from diffusers.loaders.peft import PeftAdapterMixin init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) if not issubclass(model.__class__, PeftAdapterMixin): pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).") torch.manual_seed(0) output_no_lora = model(**inputs_dict, return_dict=False)[0] denoiser_lora_config = LoraConfig( r=rank, lora_alpha=lora_alpha, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False, use_dora=use_dora, ) model.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") torch.manual_seed(0) outputs_with_lora = model(**inputs_dict, return_dict=False)[0] self.assertFalse(torch.allclose(output_no_lora, outputs_with_lora, atol=1e-4, rtol=1e-4)) with tempfile.TemporaryDirectory() as tmpdir: model.save_lora_adapter(tmpdir) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) state_dict_loaded = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) model.unload_lora() self.assertFalse(check_if_lora_correctly_set(model), "LoRA layers not set correctly") model.load_lora_adapter(tmpdir, prefix=None, use_safetensors=True) state_dict_retrieved = get_peft_model_state_dict(model, adapter_name="default_0") for k in state_dict_loaded: loaded_v = state_dict_loaded[k] retrieved_v = state_dict_retrieved[k].to(loaded_v.device) self.assertTrue(torch.allclose(loaded_v, retrieved_v)) self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") torch.manual_seed(0) outputs_with_lora_2 = model(**inputs_dict, return_dict=False)[0] self.assertFalse(torch.allclose(output_no_lora, outputs_with_lora_2, atol=1e-4, rtol=1e-4)) self.assertTrue(torch.allclose(outputs_with_lora, outputs_with_lora_2, atol=1e-4, rtol=1e-4)) @unittest.skipIf(not is_peft_available(), "Only with PEFT") def test_lora_wrong_adapter_name_raises_error(self): from peft import LoraConfig from diffusers.loaders.peft import PeftAdapterMixin init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) if not issubclass(model.__class__, PeftAdapterMixin): pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).") denoiser_lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False, use_dora=False, ) model.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") with tempfile.TemporaryDirectory() as tmpdir: wrong_name = "foo" with self.assertRaises(ValueError) as err_context: model.save_lora_adapter(tmpdir, adapter_name=wrong_name) self.assertTrue(f"Adapter name {wrong_name} not found in the model." in str(err_context.exception)) @parameterized.expand([(4, 4, True), (4, 8, False), (8, 4, False)]) @torch.no_grad() @unittest.skipIf(not is_peft_available(), "Only with PEFT") def test_lora_adapter_metadata_is_loaded_correctly(self, rank, lora_alpha, use_dora): from peft import LoraConfig from diffusers.loaders.peft import PeftAdapterMixin init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) if not issubclass(model.__class__, PeftAdapterMixin): pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).") denoiser_lora_config = LoraConfig( r=rank, lora_alpha=lora_alpha, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False, use_dora=use_dora, ) model.add_adapter(denoiser_lora_config) metadata = model.peft_config["default"].to_dict() self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") with tempfile.TemporaryDirectory() as tmpdir: model.save_lora_adapter(tmpdir) model_file = os.path.join(tmpdir, "pytorch_lora_weights.safetensors") self.assertTrue(os.path.isfile(model_file)) model.unload_lora() self.assertFalse(check_if_lora_correctly_set(model), "LoRA layers not set correctly") model.load_lora_adapter(tmpdir, prefix=None, use_safetensors=True) parsed_metadata = model.peft_config["default_0"].to_dict() check_if_dicts_are_equal(metadata, parsed_metadata) @torch.no_grad() @unittest.skipIf(not is_peft_available(), "Only with PEFT") def test_lora_adapter_wrong_metadata_raises_error(self): from peft import LoraConfig from diffusers.loaders.lora_base import LORA_ADAPTER_METADATA_KEY from diffusers.loaders.peft import PeftAdapterMixin init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) if not issubclass(model.__class__, PeftAdapterMixin): pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).") denoiser_lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False, use_dora=False, ) model.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") with tempfile.TemporaryDirectory() as tmpdir: model.save_lora_adapter(tmpdir) model_file = os.path.join(tmpdir, "pytorch_lora_weights.safetensors") self.assertTrue(os.path.isfile(model_file)) # Perturb the metadata in the state dict. loaded_state_dict = safetensors.torch.load_file(model_file) metadata = {"format": "pt"} lora_adapter_metadata = denoiser_lora_config.to_dict() lora_adapter_metadata.update({"foo": 1, "bar": 2}) for key, value in lora_adapter_metadata.items(): if isinstance(value, set): lora_adapter_metadata[key] = list(value) metadata[LORA_ADAPTER_METADATA_KEY] = json.dumps(lora_adapter_metadata, indent=2, sort_keys=True) safetensors.torch.save_file(loaded_state_dict, model_file, metadata=metadata) model.unload_lora() self.assertFalse(check_if_lora_correctly_set(model), "LoRA layers not set correctly") with self.assertRaises(TypeError) as err_context: model.load_lora_adapter(tmpdir, prefix=None, use_safetensors=True) self.assertTrue("`LoraConfig` class could not be instantiated" in str(err_context.exception)) @require_torch_accelerator def test_cpu_offload(self): if self.model_class._no_split_modules is None: pytest.skip("Test not supported for this model as `_no_split_modules` is not set.") config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, "cpu": model_size * 2} new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_accelerator def test_disk_offload_without_safetensors(self): if self.model_class._no_split_modules is None: pytest.skip("Test not supported for this model as `_no_split_modules` is not set.") config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] max_size = int(self.model_split_percents[0] * model_size) # Force disk offload by setting very small CPU memory max_memory = {0: max_size, "cpu": int(0.1 * max_size)} with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, safe_serialization=False) with self.assertRaises(ValueError): # This errors out because it's missing an offload folder new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) new_model = self.model_class.from_pretrained( tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_accelerator def test_disk_offload_with_safetensors(self): if self.model_class._no_split_modules is None: pytest.skip("Test not supported for this model as `_no_split_modules` is not set.") config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) max_size = int(self.model_split_percents[0] * model_size) max_memory = {0: max_size, "cpu": max_size} new_model = self.model_class.from_pretrained( tmp_dir, device_map="auto", offload_folder=tmp_dir, max_memory=max_memory ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_multi_accelerator def test_model_parallelism(self): if self.model_class._no_split_modules is None: pytest.skip("Test not supported for this model as `_no_split_modules` is not set.") config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2} new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_accelerator def test_sharded_checkpoints(self): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) base_output = model(**inputs_dict) model_size = compute_module_persistent_sizes(model)[""] max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB") self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # Now check if the right number of shards exists. First, let's get the number of shards. # Since this number can be dependent on the model being tested, it's important that we calculate it # instead of hardcoding it. expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)) actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) self.assertTrue(actual_num_shards == expected_num_shards) new_model = self.model_class.from_pretrained(tmp_dir).eval() new_model = new_model.to(torch_device) torch.manual_seed(0) if "generator" in inputs_dict: _, inputs_dict = self.prepare_init_args_and_inputs_for_common() new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_accelerator def test_sharded_checkpoints_with_variant(self): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) base_output = model(**inputs_dict) model_size = compute_module_persistent_sizes(model)[""] max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. variant = "fp16" with tempfile.TemporaryDirectory() as tmp_dir: # It doesn't matter if the actual model is in fp16 or not. Just adding the variant and # testing if loading works with the variant when the checkpoint is sharded should be # enough. model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB", variant=variant) index_filename = _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) self.assertTrue(os.path.exists(os.path.join(tmp_dir, index_filename))) # Now check if the right number of shards exists. First, let's get the number of shards. # Since this number can be dependent on the model being tested, it's important that we calculate it # instead of hardcoding it. expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, index_filename)) actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) self.assertTrue(actual_num_shards == expected_num_shards) new_model = self.model_class.from_pretrained(tmp_dir, variant=variant).eval() new_model = new_model.to(torch_device) torch.manual_seed(0) if "generator" in inputs_dict: _, inputs_dict = self.prepare_init_args_and_inputs_for_common() new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_torch_accelerator def test_sharded_checkpoints_with_parallel_loading(self): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) base_output = model(**inputs_dict) model_size = compute_module_persistent_sizes(model)[""] max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB") self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # Now check if the right number of shards exists. First, let's get the number of shards. # Since this number can be dependent on the model being tested, it's important that we calculate it # instead of hardcoding it. expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)) actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) self.assertTrue(actual_num_shards == expected_num_shards) # Load with parallel loading os.environ["HF_ENABLE_PARALLEL_LOADING"] = "yes" new_model = self.model_class.from_pretrained(tmp_dir).eval() new_model = new_model.to(torch_device) torch.manual_seed(0) if "generator" in inputs_dict: _, inputs_dict = self.prepare_init_args_and_inputs_for_common() new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) # set to no. os.environ["HF_ENABLE_PARALLEL_LOADING"] = "no" @require_torch_accelerator def test_sharded_checkpoints_device_map(self): if self.model_class._no_split_modules is None: pytest.skip("Test not supported for this model as `_no_split_modules` is not set.") config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_persistent_sizes(model)[""] max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB") self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # Now check if the right number of shards exists. First, let's get the number of shards. # Since this number can be dependent on the model being tested, it's important that we calculate it # instead of hardcoding it. expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)) actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) self.assertTrue(actual_num_shards == expected_num_shards) new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto") torch.manual_seed(0) if "generator" in inputs_dict: _, inputs_dict = self.prepare_init_args_and_inputs_for_common() new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) # This test is okay without a GPU because we're not running any execution. We're just serializing # and check if the resultant files are following an expected format. def test_variant_sharded_ckpt_right_format(self): for use_safe in [True, False]: extension = ".safetensors" if use_safe else ".bin" config, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() model_size = compute_module_persistent_sizes(model)[""] max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. variant = "fp16" with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained( tmp_dir, variant=variant, max_shard_size=f"{max_shard_size}KB", safe_serialization=use_safe ) index_variant = _add_variant(SAFE_WEIGHTS_INDEX_NAME if use_safe else WEIGHTS_INDEX_NAME, variant) self.assertTrue(os.path.exists(os.path.join(tmp_dir, index_variant))) # Now check if the right number of shards exists. First, let's get the number of shards. # Since this number can be dependent on the model being tested, it's important that we calculate it # instead of hardcoding it. expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, index_variant)) actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(extension)]) self.assertTrue(actual_num_shards == expected_num_shards) # Check if the variant is present as a substring in the checkpoints. shard_files = [ file for file in os.listdir(tmp_dir) if file.endswith(extension) or ("index" in file and "json" in file) ] assert all(variant in f for f in shard_files) # Check if the sharded checkpoints were serialized in the right format. shard_files = [file for file in os.listdir(tmp_dir) if file.endswith(extension)] # Example: diffusion_pytorch_model.fp16-00001-of-00002.safetensors assert all(f.split(".")[1].split("-")[0] == variant for f in shard_files) def test_layerwise_casting_training(self): def test_fn(storage_dtype, compute_dtype): if torch.device(torch_device).type == "cpu" and compute_dtype == torch.bfloat16: pytest.skip("Skipping test because CPU doesn't go well with bfloat16.") init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model = model.to(torch_device, dtype=compute_dtype) model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) model.train() inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype) with torch.amp.autocast(device_type=torch.device(torch_device).type): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] input_tensor = inputs_dict[self.main_input_name] noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) noise = cast_maybe_tensor_dtype(noise, torch.float32, compute_dtype) loss = torch.nn.functional.mse_loss(output, noise) loss.backward() test_fn(torch.float16, torch.float32) test_fn(torch.float8_e4m3fn, torch.float32) test_fn(torch.float8_e5m2, torch.float32) test_fn(torch.float8_e4m3fn, torch.bfloat16) @torch.no_grad() def test_layerwise_casting_inference(self): from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS from diffusers.hooks.layerwise_casting import DEFAULT_SKIP_MODULES_PATTERN torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config) model.eval() model.to(torch_device) base_slice = model(**inputs_dict)[0].detach().flatten().cpu().numpy() def check_linear_dtype(module, storage_dtype, compute_dtype): patterns_to_check = DEFAULT_SKIP_MODULES_PATTERN if getattr(module, "_skip_layerwise_casting_patterns", None) is not None: patterns_to_check += tuple(module._skip_layerwise_casting_patterns) for name, submodule in module.named_modules(): if not isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS): continue dtype_to_check = storage_dtype if any(re.search(pattern, name) for pattern in patterns_to_check): dtype_to_check = compute_dtype if getattr(submodule, "weight", None) is not None: self.assertEqual(submodule.weight.dtype, dtype_to_check) if getattr(submodule, "bias", None) is not None: self.assertEqual(submodule.bias.dtype, dtype_to_check) def test_layerwise_casting(storage_dtype, compute_dtype): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype) model = self.model_class(**config).eval() model = model.to(torch_device, dtype=compute_dtype) model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) check_linear_dtype(model, storage_dtype, compute_dtype) output = model(**inputs_dict)[0].float().flatten().detach().cpu().numpy() # The precision test is not very important for fast tests. In most cases, the outputs will not be the same. # We just want to make sure that the layerwise casting is working as expected. self.assertTrue(numpy_cosine_similarity_distance(base_slice, output) < 1.0) test_layerwise_casting(torch.float16, torch.float32) test_layerwise_casting(torch.float8_e4m3fn, torch.float32) test_layerwise_casting(torch.float8_e5m2, torch.float32) test_layerwise_casting(torch.float8_e4m3fn, torch.bfloat16) @require_torch_accelerator @torch.no_grad() def test_layerwise_casting_memory(self): MB_TOLERANCE = 0.2 LEAST_COMPUTE_CAPABILITY = 8.0 def reset_memory_stats(): gc.collect() backend_synchronize(torch_device) backend_empty_cache(torch_device) backend_reset_peak_memory_stats(torch_device) def get_memory_usage(storage_dtype, compute_dtype): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype) model = self.model_class(**config).eval() model = model.to(torch_device, dtype=compute_dtype) model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) reset_memory_stats() model(**inputs_dict) model_memory_footprint = model.get_memory_footprint() peak_inference_memory_allocated_mb = backend_max_memory_allocated(torch_device) / 1024**2 return model_memory_footprint, peak_inference_memory_allocated_mb fp32_memory_footprint, fp32_max_memory = get_memory_usage(torch.float32, torch.float32) fp8_e4m3_fp32_memory_footprint, fp8_e4m3_fp32_max_memory = get_memory_usage(torch.float8_e4m3fn, torch.float32) fp8_e4m3_bf16_memory_footprint, fp8_e4m3_bf16_max_memory = get_memory_usage( torch.float8_e4m3fn, torch.bfloat16 ) compute_capability = get_torch_cuda_device_capability() if torch_device == "cuda" else None self.assertTrue(fp8_e4m3_bf16_memory_footprint < fp8_e4m3_fp32_memory_footprint < fp32_memory_footprint) # NOTE: the following assertion would fail on our CI (running Tesla T4) due to bf16 using more memory than fp32. # On other devices, such as DGX (Ampere) and Audace (Ada), the test passes. So, we conditionally check it. if compute_capability and compute_capability >= LEAST_COMPUTE_CAPABILITY: self.assertTrue(fp8_e4m3_bf16_max_memory < fp8_e4m3_fp32_max_memory) # On this dummy test case with a small model, sometimes fp8_e4m3_fp32 max memory usage is higher than fp32 by a few # bytes. This only happens for some models, so we allow a small tolerance. # For any real model being tested, the order would be fp8_e4m3_bf16 < fp8_e4m3_fp32 < fp32. self.assertTrue( fp8_e4m3_fp32_max_memory < fp32_max_memory or abs(fp8_e4m3_fp32_max_memory - fp32_max_memory) < MB_TOLERANCE ) @parameterized.expand([False, True]) @require_torch_accelerator def test_group_offloading(self, record_stream): if not self.model_class._supports_group_offloading: pytest.skip("Model does not support group offloading.") init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) @torch.no_grad() def run_forward(model): self.assertTrue( all( module._diffusers_hook.get_hook("group_offloading") is not None for module in model.modules() if hasattr(module, "_diffusers_hook") ) ) model.eval() return model(**inputs_dict)[0] model = self.model_class(**init_dict) model.to(torch_device) output_without_group_offloading = run_forward(model) torch.manual_seed(0) model = self.model_class(**init_dict) model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1) output_with_group_offloading1 = run_forward(model) torch.manual_seed(0) model = self.model_class(**init_dict) model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, non_blocking=True) output_with_group_offloading2 = run_forward(model) torch.manual_seed(0) model = self.model_class(**init_dict) model.enable_group_offload(torch_device, offload_type="leaf_level") output_with_group_offloading3 = run_forward(model) torch.manual_seed(0) model = self.model_class(**init_dict) model.enable_group_offload( torch_device, offload_type="leaf_level", use_stream=True, record_stream=record_stream ) output_with_group_offloading4 = run_forward(model) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading1, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading2, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading3, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading4, atol=1e-5)) @parameterized.expand([(False, "block_level"), (True, "leaf_level")]) @require_torch_accelerator @torch.no_grad() def test_group_offloading_with_layerwise_casting(self, record_stream, offload_type): if not self.model_class._supports_group_offloading: pytest.skip("Model does not support group offloading.") torch.manual_seed(0) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() _ = model(**inputs_dict)[0] torch.manual_seed(0) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() storage_dtype, compute_dtype = torch.float16, torch.float32 inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype) model = self.model_class(**init_dict) model.eval() additional_kwargs = {} if offload_type == "leaf_level" else {"num_blocks_per_group": 1} model.enable_group_offload( torch_device, offload_type=offload_type, use_stream=True, record_stream=record_stream, **additional_kwargs ) model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) _ = model(**inputs_dict)[0] @parameterized.expand([("block_level", False), ("leaf_level", True)]) @require_torch_accelerator @torch.no_grad() @torch.inference_mode() def test_group_offloading_with_disk(self, offload_type, record_stream, atol=1e-5): if not self.model_class._supports_group_offloading: pytest.skip("Model does not support group offloading.") if self.model_class.__name__ == "QwenImageTransformer2DModel": pytest.skip( "QwenImageTransformer2DModel doesn't support group offloading with disk. Needs to be investigated." ) def _has_generator_arg(model): sig = inspect.signature(model.forward) params = sig.parameters return "generator" in params def _run_forward(model, inputs_dict): accepts_generator = _has_generator_arg(model) if accepts_generator: inputs_dict["generator"] = torch.manual_seed(0) torch.manual_seed(0) return model(**inputs_dict)[0] if self.__class__.__name__ == "AutoencoderKLCosmosTests" and offload_type == "leaf_level": pytest.skip("With `leaf_type` as the offloading type, it fails. Needs investigation.") init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict) model.eval() model.to(torch_device) output_without_group_offloading = _run_forward(model, inputs_dict) torch.manual_seed(0) model = self.model_class(**init_dict) model.eval() num_blocks_per_group = None if offload_type == "leaf_level" else 1 additional_kwargs = {} if offload_type == "leaf_level" else {"num_blocks_per_group": num_blocks_per_group} with tempfile.TemporaryDirectory() as tmpdir: model.enable_group_offload( torch_device, offload_type=offload_type, offload_to_disk_path=tmpdir, use_stream=True, record_stream=record_stream, **additional_kwargs, ) has_safetensors = glob.glob(f"{tmpdir}/*.safetensors") self.assertTrue(has_safetensors, "No safetensors found in the directory.") # For "leaf-level", there is a prefetching hook which makes this check a bit non-deterministic # in nature. So, skip it. if offload_type != "leaf_level": is_correct, extra_files, missing_files = _check_safetensors_serialization( module=model, offload_to_disk_path=tmpdir, offload_type=offload_type, num_blocks_per_group=num_blocks_per_group, ) if not is_correct: if extra_files: raise ValueError(f"Found extra files: {', '.join(extra_files)}") elif missing_files: raise ValueError(f"Following files are missing: {', '.join(missing_files)}") output_with_group_offloading = _run_forward(model, inputs_dict) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading, atol=atol)) def test_auto_model(self, expected_max_diff=5e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model = model.eval() model = model.to(torch_device) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) auto_model = AutoModel.from_pretrained(tmpdirname) if hasattr(auto_model, "set_default_attn_processor"): auto_model.set_default_attn_processor() auto_model = auto_model.eval() auto_model = auto_model.to(torch_device) with torch.no_grad(): if self.forward_requires_fresh_args: output_original = model(**self.inputs_dict(0)) output_auto = auto_model(**self.inputs_dict(0)) else: output_original = model(**inputs_dict) output_auto = auto_model(**inputs_dict) if isinstance(output_original, dict): output_original = output_original.to_tuple()[0] if isinstance(output_auto, dict): output_auto = output_auto.to_tuple()[0] max_diff = (output_original - output_auto).abs().max().item() self.assertLessEqual( max_diff, expected_max_diff, f"AutoModel forward pass diff: {max_diff} exceeds threshold {expected_max_diff}", ) @parameterized.expand( [ (-1, "You can't pass device_map as a negative int"), ("foo", "When passing device_map as a string, the value needs to be a device name"), ] ) def test_wrong_device_map_raises_error(self, device_map, msg_substring): init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) with tempfile.TemporaryDirectory() as tmpdir: model.save_pretrained(tmpdir) with self.assertRaises(ValueError) as err_ctx: _ = self.model_class.from_pretrained(tmpdir, device_map=device_map) assert msg_substring in str(err_ctx.exception) @parameterized.expand([0, torch_device, torch.device(torch_device)]) @require_torch_accelerator def test_passing_non_dict_device_map_works(self, device_map): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).eval() with tempfile.TemporaryDirectory() as tmpdir: model.save_pretrained(tmpdir) loaded_model = self.model_class.from_pretrained(tmpdir, device_map=device_map) _ = loaded_model(**inputs_dict) @parameterized.expand([("", torch_device), ("", torch.device(torch_device))]) @require_torch_accelerator def test_passing_dict_device_map_works(self, name, device): # There are other valid dict-based `device_map` values too. It's best to refer to # the docs for those: https://huggingface.co/docs/accelerate/en/concept_guides/big_model_inference#the-devicemap. init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).eval() device_map = {name: device} with tempfile.TemporaryDirectory() as tmpdir: model.save_pretrained(tmpdir) loaded_model = self.model_class.from_pretrained(tmpdir, device_map=device_map) _ = loaded_model(**inputs_dict) @is_staging_test class ModelPushToHubTester(unittest.TestCase): identifier = uuid.uuid4() repo_id = f"test-model-{identifier}" org_repo_id = f"valid_org/{repo_id}-org" def test_push_to_hub(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.repo_id, token=TOKEN) def test_push_to_hub_in_organization(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.org_repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.org_repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.org_repo_id, token=TOKEN) @unittest.skipIf( not is_jinja_available(), reason="Model card tests cannot be performed without Jinja installed.", ) def test_push_to_hub_library_name(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.repo_id, token=TOKEN) model_card = ModelCard.load(f"{USER}/{self.repo_id}", token=TOKEN).data assert model_card.library_name == "diffusers" # Reset repo delete_repo(self.repo_id, token=TOKEN) @require_torch_accelerator @require_torch_2 @is_torch_compile @slow @require_torch_version_greater("2.7.1") class TorchCompileTesterMixin: different_shapes_for_compilation = None def setUp(self): # clean up the VRAM before each test super().setUp() torch.compiler.reset() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test in case of CUDA runtime errors super().tearDown() torch.compiler.reset() gc.collect() backend_empty_cache(torch_device) def test_torch_compile_recompilation_and_graph_break(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) model = torch.compile(model, fullgraph=True) with ( torch._inductor.utils.fresh_inductor_cache(), torch._dynamo.config.patch(error_on_recompile=True), torch.no_grad(), ): _ = model(**inputs_dict) _ = model(**inputs_dict) def test_torch_compile_repeated_blocks(self): if self.model_class._repeated_blocks is None: pytest.skip("Skipping test as the model class doesn't have `_repeated_blocks` set.") init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) model.compile_repeated_blocks(fullgraph=True) recompile_limit = 1 if self.model_class.__name__ == "UNet2DConditionModel": recompile_limit = 2 with ( torch._inductor.utils.fresh_inductor_cache(), torch._dynamo.config.patch(recompile_limit=recompile_limit), torch.no_grad(), ): _ = model(**inputs_dict) _ = model(**inputs_dict) def test_compile_with_group_offloading(self): if not self.model_class._supports_group_offloading: pytest.skip("Model does not support group offloading.") torch._dynamo.config.cache_size_limit = 10000 init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.eval() # TODO: Can test for other group offloading kwargs later if needed. group_offload_kwargs = { "onload_device": torch_device, "offload_device": "cpu", "offload_type": "block_level", "num_blocks_per_group": 1, "use_stream": True, "non_blocking": True, } model.enable_group_offload(**group_offload_kwargs) model.compile() with torch.no_grad(): _ = model(**inputs_dict) _ = model(**inputs_dict) @require_torch_version_greater("2.7.1") def test_compile_on_different_shapes(self): if self.different_shapes_for_compilation is None: pytest.skip(f"Skipping as `different_shapes_for_compilation` is not set for {self.__class__.__name__}.") torch.fx.experimental._config.use_duck_shape = False init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) model = torch.compile(model, fullgraph=True, dynamic=True) for height, width in self.different_shapes_for_compilation: with torch._dynamo.config.patch(error_on_recompile=True), torch.no_grad(): inputs_dict = self.prepare_dummy_input(height=height, width=width) _ = model(**inputs_dict) @slow @require_torch_2 @require_torch_accelerator @require_peft_backend @require_peft_version_greater("0.14.0") @require_torch_version_greater("2.7.1") @is_torch_compile class LoraHotSwappingForModelTesterMixin: """Test that hotswapping does not result in recompilation on the model directly. We're not extensively testing the hotswapping functionality since it is implemented in PEFT and is extensively tested there. The goal of this test is specifically to ensure that hotswapping with diffusers does not require recompilation. See https://github.com/huggingface/peft/blob/eaab05e18d51fb4cce20a73c9acd82a00c013b83/tests/test_gpu_examples.py#L4252 for the analogous PEFT test. """ different_shapes_for_compilation = None def tearDown(self): # It is critical that the dynamo cache is reset for each test. Otherwise, if the test re-uses the same model, # there will be recompilation errors, as torch caches the model when run in the same process. super().tearDown() torch.compiler.reset() gc.collect() backend_empty_cache(torch_device) def get_lora_config(self, lora_rank, lora_alpha, target_modules): # from diffusers test_models_unet_2d_condition.py from peft import LoraConfig lora_config = LoraConfig( r=lora_rank, lora_alpha=lora_alpha, target_modules=target_modules, init_lora_weights=False, use_dora=False, ) return lora_config def get_linear_module_name_other_than_attn(self, model): linear_names = [ name for name, module in model.named_modules() if isinstance(module, nn.Linear) and "to_" not in name ] return linear_names[0] def check_model_hotswap(self, do_compile, rank0, rank1, target_modules0, target_modules1=None): """ Check that hotswapping works on a small unet. Steps: - create 2 LoRA adapters and save them - load the first adapter - hotswap the second adapter - check that the outputs are correct - optionally compile the model - optionally check if recompilations happen on different shapes Note: We set rank == alpha here because save_lora_adapter does not save the alpha scalings, thus the test would fail if the values are different. Since rank != alpha does not matter for the purpose of this test, this is fine. """ different_shapes = self.different_shapes_for_compilation # create 2 adapters with different ranks and alphas torch.manual_seed(0) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) alpha0, alpha1 = rank0, rank1 max_rank = max([rank0, rank1]) if target_modules1 is None: target_modules1 = target_modules0[:] lora_config0 = self.get_lora_config(rank0, alpha0, target_modules0) lora_config1 = self.get_lora_config(rank1, alpha1, target_modules1) model.add_adapter(lora_config0, adapter_name="adapter0") with torch.inference_mode(): torch.manual_seed(0) output0_before = model(**inputs_dict)["sample"] model.add_adapter(lora_config1, adapter_name="adapter1") model.set_adapter("adapter1") with torch.inference_mode(): torch.manual_seed(0) output1_before = model(**inputs_dict)["sample"] # sanity checks: tol = 5e-3 assert not torch.allclose(output0_before, output1_before, atol=tol, rtol=tol) assert not (output0_before == 0).all() assert not (output1_before == 0).all() with tempfile.TemporaryDirectory() as tmp_dirname: # save the adapter checkpoints model.save_lora_adapter(os.path.join(tmp_dirname, "0"), safe_serialization=True, adapter_name="adapter0") model.save_lora_adapter(os.path.join(tmp_dirname, "1"), safe_serialization=True, adapter_name="adapter1") del model # load the first adapter torch.manual_seed(0) init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) if do_compile or (rank0 != rank1): # no need to prepare if the model is not compiled or if the ranks are identical model.enable_lora_hotswap(target_rank=max_rank) file_name0 = os.path.join(os.path.join(tmp_dirname, "0"), "pytorch_lora_weights.safetensors") file_name1 = os.path.join(os.path.join(tmp_dirname, "1"), "pytorch_lora_weights.safetensors") model.load_lora_adapter(file_name0, safe_serialization=True, adapter_name="adapter0", prefix=None) if do_compile: model = torch.compile(model, mode="reduce-overhead", dynamic=different_shapes is not None) with torch.inference_mode(): # additionally check if dynamic compilation works. if different_shapes is not None: for height, width in different_shapes: new_inputs_dict = self.prepare_dummy_input(height=height, width=width) _ = model(**new_inputs_dict) else: output0_after = model(**inputs_dict)["sample"] assert torch.allclose(output0_before, output0_after, atol=tol, rtol=tol) # hotswap the 2nd adapter model.load_lora_adapter(file_name1, adapter_name="adapter0", hotswap=True, prefix=None) # we need to call forward to potentially trigger recompilation with torch.inference_mode(): if different_shapes is not None: for height, width in different_shapes: new_inputs_dict = self.prepare_dummy_input(height=height, width=width) _ = model(**new_inputs_dict) else: output1_after = model(**inputs_dict)["sample"] assert torch.allclose(output1_before, output1_after, atol=tol, rtol=tol) # check error when not passing valid adapter name name = "does-not-exist" msg = f"Trying to hotswap LoRA adapter '{name}' but there is no existing adapter by that name" with self.assertRaisesRegex(ValueError, msg): model.load_lora_adapter(file_name1, adapter_name=name, hotswap=True, prefix=None) @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa def test_hotswapping_model(self, rank0, rank1): self.check_model_hotswap( do_compile=False, rank0=rank0, rank1=rank1, target_modules0=["to_q", "to_k", "to_v", "to_out.0"] ) @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa def test_hotswapping_compiled_model_linear(self, rank0, rank1): # It's important to add this context to raise an error on recompilation target_modules = ["to_q", "to_k", "to_v", "to_out.0"] with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache(): self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa def test_hotswapping_compiled_model_conv2d(self, rank0, rank1): if "unet" not in self.model_class.__name__.lower(): pytest.skip("Test only applies to UNet.") # It's important to add this context to raise an error on recompilation target_modules = ["conv", "conv1", "conv2"] with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache(): self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa def test_hotswapping_compiled_model_both_linear_and_conv2d(self, rank0, rank1): if "unet" not in self.model_class.__name__.lower(): pytest.skip("Test only applies to UNet.") # It's important to add this context to raise an error on recompilation target_modules = ["to_q", "conv"] with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache(): self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa def test_hotswapping_compiled_model_both_linear_and_other(self, rank0, rank1): # In `test_hotswapping_compiled_model_both_linear_and_conv2d()`, we check if we can do hotswapping # with `torch.compile()` for models that have both linear and conv layers. In this test, we check # if we can target a linear layer from the transformer blocks and another linear layer from non-attention # block. target_modules = ["to_q"] init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) target_modules.append(self.get_linear_module_name_other_than_attn(model)) del model # It's important to add this context to raise an error on recompilation with torch._dynamo.config.patch(error_on_recompile=True): self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) def test_enable_lora_hotswap_called_after_adapter_added_raises(self): # ensure that enable_lora_hotswap is called before loading the first adapter lora_config = self.get_lora_config(8, 8, target_modules=["to_q"]) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) model.add_adapter(lora_config) msg = re.escape("Call `enable_lora_hotswap` before loading the first adapter.") with self.assertRaisesRegex(RuntimeError, msg): model.enable_lora_hotswap(target_rank=32) def test_enable_lora_hotswap_called_after_adapter_added_warning(self): # ensure that enable_lora_hotswap is called before loading the first adapter from diffusers.loaders.peft import logger lora_config = self.get_lora_config(8, 8, target_modules=["to_q"]) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) model.add_adapter(lora_config) msg = ( "It is recommended to call `enable_lora_hotswap` before loading the first adapter to avoid recompilation." ) with self.assertLogs(logger=logger, level="WARNING") as cm: model.enable_lora_hotswap(target_rank=32, check_compiled="warn") assert any(msg in log for log in cm.output) def test_enable_lora_hotswap_called_after_adapter_added_ignore(self): # check possibility to ignore the error/warning lora_config = self.get_lora_config(8, 8, target_modules=["to_q"]) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) model.add_adapter(lora_config) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") # Capture all warnings model.enable_lora_hotswap(target_rank=32, check_compiled="warn") self.assertEqual(len(w), 0, f"Expected no warnings, but got: {[str(warn.message) for warn in w]}") def test_enable_lora_hotswap_wrong_check_compiled_argument_raises(self): # check that wrong argument value raises an error lora_config = self.get_lora_config(8, 8, target_modules=["to_q"]) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict).to(torch_device) model.add_adapter(lora_config) msg = re.escape("check_compiles should be one of 'error', 'warn', or 'ignore', got 'wrong-argument' instead.") with self.assertRaisesRegex(ValueError, msg): model.enable_lora_hotswap(target_rank=32, check_compiled="wrong-argument") def test_hotswap_second_adapter_targets_more_layers_raises(self): # check the error and log from diffusers.loaders.peft import logger # at the moment, PEFT requires the 2nd adapter to target the same or a subset of layers target_modules0 = ["to_q"] target_modules1 = ["to_q", "to_k"] with self.assertRaises(RuntimeError): # peft raises RuntimeError with self.assertLogs(logger=logger, level="ERROR") as cm: self.check_model_hotswap( do_compile=True, rank0=8, rank1=8, target_modules0=target_modules0, target_modules1=target_modules1 ) assert any("Hotswapping adapter0 was unsuccessful" in log for log in cm.output) @parameterized.expand([(11, 11), (7, 13), (13, 7)]) @require_torch_version_greater("2.7.1") def test_hotswapping_compile_on_different_shapes(self, rank0, rank1): different_shapes_for_compilation = self.different_shapes_for_compilation if different_shapes_for_compilation is None: pytest.skip(f"Skipping as `different_shapes_for_compilation` is not set for {self.__class__.__name__}.") # Specifying `use_duck_shape=False` instructs the compiler if it should use the same symbolic # variable to represent input sizes that are the same. For more details, # check out this [comment](https://github.com/huggingface/diffusers/pull/11327#discussion_r2047659790). torch.fx.experimental._config.use_duck_shape = False target_modules = ["to_q", "to_k", "to_v", "to_out.0"] with torch._dynamo.config.patch(error_on_recompile=True): self.check_model_hotswap( do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules, )
diffusers/tests/models/test_modeling_common.py/0
{ "file_path": "diffusers/tests/models/test_modeling_common.py", "repo_id": "diffusers", "token_count": 48754 }
182
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import EasyAnimateTransformer3DModel from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class EasyAnimateTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = EasyAnimateTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 2 num_channels = 4 num_frames = 2 height = 16 width = 16 embedding_dim = 16 sequence_length = 16 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "timestep": timestep, "timestep_cond": None, "encoder_hidden_states": encoder_hidden_states, "encoder_hidden_states_t5": None, "inpaint_latents": None, "control_latents": None, } @property def input_shape(self): return (4, 2, 16, 16) @property def output_shape(self): return (4, 2, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "attention_head_dim": 16, "num_attention_heads": 2, "in_channels": 4, "mmdit_layers": 2, "num_layers": 2, "out_channels": 4, "patch_size": 2, "sample_height": 60, "sample_width": 90, "text_embed_dim": 16, "time_embed_dim": 8, "time_position_encoding_type": "3d_rope", "timestep_activation_fn": "silu", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"EasyAnimateTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
diffusers/tests/models/transformers/test_models_transformer_easyanimate.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_easyanimate.py", "repo_id": "diffusers", "token_count": 1196 }
183
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers.models.transformers import TransformerTemporalModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class TemporalTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = TransformerTemporalModel main_input_name = "hidden_states" @property def dummy_input(self): batch_size = 2 num_channels = 4 height = width = 32 hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "timestep": timestep, } @property def input_shape(self): return (4, 32, 32) @property def output_shape(self): return (4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "num_attention_heads": 8, "attention_head_dim": 4, "in_channels": 4, "num_layers": 1, "norm_num_groups": 1, } inputs_dict = self.dummy_input return init_dict, inputs_dict
diffusers/tests/models/transformers/test_models_transformer_temporal.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_temporal.py", "repo_id": "diffusers", "token_count": 734 }
184
import gc import tempfile import unittest from typing import Callable, Union import numpy as np import torch import diffusers from diffusers import ComponentsManager, ModularPipeline, ModularPipelineBlocks from diffusers.utils import logging from diffusers.utils.testing_utils import ( backend_empty_cache, numpy_cosine_similarity_distance, require_accelerator, require_torch, torch_device, ) def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor @require_torch class ModularPipelineTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each modular pipeline, including: - test_pipeline_call_signature: check if the pipeline's __call__ method has all required parameters - test_inference_batch_consistent: check if the pipeline's __call__ method can handle batch inputs - test_inference_batch_single_identical: check if the pipeline's __call__ method can handle single input - test_float16_inference: check if the pipeline's __call__ method can handle float16 inputs - test_to_device: check if the pipeline's __call__ method can handle different devices """ # Canonical parameters that are passed to `__call__` regardless # of the type of pipeline. They are always optional and have common # sense default values. optional_params = frozenset( [ "num_inference_steps", "num_images_per_prompt", "latents", "output_type", ] ) # this is modular specific: generator needs to be a intermediate input because it's mutable intermediate_params = frozenset( [ "generator", ] ) def get_generator(self, seed): device = torch_device if torch_device != "mps" else "cpu" generator = torch.Generator(device).manual_seed(seed) return generator @property def pipeline_class(self) -> Union[Callable, ModularPipeline]: raise NotImplementedError( "You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. " "See existing pipeline tests for reference." ) @property def repo(self) -> str: raise NotImplementedError( "You need to set the attribute `repo` in the child test class. See existing pipeline tests for reference." ) @property def pipeline_blocks_class(self) -> Union[Callable, ModularPipelineBlocks]: raise NotImplementedError( "You need to set the attribute `pipeline_blocks_class = ClassNameOfPipelineBlocks` in the child test class. " "See existing pipeline tests for reference." ) def get_pipeline(self): raise NotImplementedError( "You need to implement `get_pipeline(self)` in the child test class. " "See existing pipeline tests for reference." ) def get_dummy_inputs(self, device, seed=0): raise NotImplementedError( "You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. " "See existing pipeline tests for reference." ) @property def params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `params` in the child test class. " "`params` are checked for if all values are present in `__call__`'s signature." " You can set `params` using one of the common set of parameters defined in `pipeline_params.py`" " e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to " "image pipelines, including prompts and prompt embedding overrides." "If your pipeline's set of arguments has minor changes from one of the common sets of arguments, " "do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline " "with non-configurable height and width arguments should set the attribute as " "`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. " "See existing pipeline tests for reference." ) @property def batch_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `batch_params` in the child test class. " "`batch_params` are the parameters required to be batched when passed to the pipeline's " "`__call__` method. `pipeline_params.py` provides some common sets of parameters such as " "`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's " "set of batch arguments has minor changes from one of the common sets of batch arguments, " "do not make modifications to the existing common sets of batch arguments. I.e. a text to " "image pipeline `negative_prompt` is not batched should set the attribute as " "`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. " "See existing pipeline tests for reference." ) def setUp(self): # clean up the VRAM before each test super().setUp() torch.compiler.reset() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test in case of CUDA runtime errors super().tearDown() torch.compiler.reset() gc.collect() backend_empty_cache(torch_device) def test_pipeline_call_signature(self): pipe = self.get_pipeline() input_parameters = pipe.blocks.input_names optional_parameters = pipe.default_call_parameters def _check_for_parameters(parameters, expected_parameters, param_type): remaining_parameters = {param for param in parameters if param not in expected_parameters} assert len(remaining_parameters) == 0, ( f"Required {param_type} parameters not present: {remaining_parameters}" ) _check_for_parameters(self.params, input_parameters, "input") _check_for_parameters(self.optional_params, optional_parameters, "optional") def test_inference_batch_consistent(self, batch_sizes=[2], batch_generator=True): pipe = self.get_pipeline() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # prepare batched inputs batched_inputs = [] for batch_size in batch_sizes: batched_input = {} batched_input.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] batched_input[name] = batch_size * [value] if batch_generator and "generator" in inputs: batched_input["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_input["batch_size"] = batch_size batched_inputs.append(batched_input) logger.setLevel(level=diffusers.logging.WARNING) for batch_size, batched_input in zip(batch_sizes, batched_inputs): output = pipe(**batched_input, output="images") assert len(output) == batch_size, "Output is different from expected batch size" def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, ): pipe = self.get_pipeline() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size output = pipe(**inputs, output="images") output_batch = pipe(**batched_inputs, output="images") assert output_batch.shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0]) - to_np(output[0])).max() assert max_diff < expected_max_diff, "Batch inference results different from single inference results" @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_float16_inference(self, expected_max_diff=5e-2): pipe = self.get_pipeline() pipe.to(torch_device, torch.float32) pipe.set_progress_bar_config(disable=None) pipe_fp16 = self.get_pipeline() pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in inputs: inputs["generator"] = self.get_generator(0) output = pipe(**inputs, output="images") fp16_inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in fp16_inputs: fp16_inputs["generator"] = self.get_generator(0) output_fp16 = pipe_fp16(**fp16_inputs, output="images") if isinstance(output, torch.Tensor): output = output.cpu() output_fp16 = output_fp16.cpu() max_diff = numpy_cosine_similarity_distance(output.flatten(), output_fp16.flatten()) assert max_diff < expected_max_diff, "FP16 inference is different from FP32 inference" @require_accelerator def test_to_device(self): pipe = self.get_pipeline() pipe.set_progress_bar_config(disable=None) pipe.to("cpu") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] assert all(device == "cpu" for device in model_devices), "All pipeline components are not on CPU" pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] assert all(device == torch_device for device in model_devices), ( "All pipeline components are not on accelerator device" ) def test_inference_is_not_nan_cpu(self): pipe = self.get_pipeline() pipe.set_progress_bar_config(disable=None) pipe.to("cpu") output = pipe(**self.get_dummy_inputs("cpu"), output="images") assert np.isnan(to_np(output)).sum() == 0, "CPU Inference returns NaN" @require_accelerator def test_inference_is_not_nan(self): pipe = self.get_pipeline() pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) output = pipe(**self.get_dummy_inputs(torch_device), output="images") assert np.isnan(to_np(output)).sum() == 0, "Accelerator Inference returns NaN" def test_num_images_per_prompt(self): pipe = self.get_pipeline() if "num_images_per_prompt" not in pipe.blocks.input_names: return pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt, output="images") assert images.shape[0] == batch_size * num_images_per_prompt @require_accelerator def test_components_auto_cpu_offload_inference_consistent(self): base_pipe = self.get_pipeline().to(torch_device) cm = ComponentsManager() cm.enable_auto_cpu_offload(device=torch_device) offload_pipe = self.get_pipeline(components_manager=cm) image_slices = [] for pipe in [base_pipe, offload_pipe]: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs, output="images") image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 def test_save_from_pretrained(self): pipes = [] base_pipe = self.get_pipeline().to(torch_device) pipes.append(base_pipe) with tempfile.TemporaryDirectory() as tmpdirname: base_pipe.save_pretrained(tmpdirname) pipe = ModularPipeline.from_pretrained(tmpdirname).to(torch_device) pipe.load_default_components(torch_dtype=torch.float32) pipe.to(torch_device) pipes.append(pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs, output="images") image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
diffusers/tests/modular_pipelines/test_modular_pipelines_common.py/0
{ "file_path": "diffusers/tests/modular_pipelines/test_modular_pipelines_common.py", "repo_id": "diffusers", "token_count": 5871 }
185
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily based on: import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetInpaintPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_numpy, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class ControlNetInpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset({"control_image"}) # skip `image` and `mask` for now, only test for control_image image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) init_image = init_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) class ControlNetSimpleInpaintPipelineFastTests(ControlNetInpaintPipelineFastTests): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components class MultiControlNetInpaintPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal_(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) init_image = init_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) @slow @require_torch_accelerator class ControlNetInpaintPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "botp/stable-diffusion-v1-5-inpainting", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ).resize((512, 512)) prompt = "pitch black hole" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe( prompt, image=image, mask_image=mask_image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=3, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/inpaint.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_inpaint(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(33) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" ) init_image = init_image.resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" ) mask_image = mask_image.resize((512, 512)) prompt = "a handsome man with ray-ban sunglasses" def make_inpaint_condition(image, image_mask): image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0 assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size" image[image_mask > 0.5] = -1.0 # set as masked pixel image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) image = torch.from_numpy(image) return image control_image = make_inpaint_condition(init_image, mask_image) output = pipe( prompt, image=init_image, mask_image=mask_image, control_image=control_image, guidance_scale=9.0, eta=1.0, generator=generator, num_inference_steps=20, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/boy_ray_ban.npy" ) assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 1e-2
diffusers/tests/pipelines/controlnet/test_controlnet_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet/test_controlnet_inpaint.py", "repo_id": "diffusers", "token_count": 10114 }
186
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json import os import tempfile import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLCosmos, CosmosTextToWorldPipeline, CosmosTransformer3DModel, EDMEulerScheduler from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np from .cosmos_guardrail import DummyCosmosSafetyChecker enable_full_determinism() class CosmosTextToWorldPipelineWrapper(CosmosTextToWorldPipeline): @staticmethod def from_pretrained(*args, **kwargs): kwargs["safety_checker"] = DummyCosmosSafetyChecker() return CosmosTextToWorldPipeline.from_pretrained(*args, **kwargs) class CosmosTextToWorldPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = CosmosTextToWorldPipelineWrapper params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self): torch.manual_seed(0) transformer = CosmosTransformer3DModel( in_channels=4, out_channels=4, num_attention_heads=2, attention_head_dim=16, num_layers=2, mlp_ratio=2, text_embed_dim=32, adaln_lora_dim=4, max_size=(4, 32, 32), patch_size=(1, 2, 2), rope_scale=(2.0, 1.0, 1.0), concat_padding_mask=True, extra_pos_embed_type="learnable", ) torch.manual_seed(0) vae = AutoencoderKLCosmos( in_channels=3, out_channels=3, latent_channels=4, encoder_block_out_channels=(8, 8, 8, 8), decode_block_out_channels=(8, 8, 8, 8), attention_resolutions=(8,), resolution=64, num_layers=2, patch_size=4, patch_type="haar", scaling_factor=1.0, spatial_compression_ratio=4, temporal_compression_ratio=4, ) torch.manual_seed(0) scheduler = EDMEulerScheduler( sigma_min=0.002, sigma_max=80, sigma_data=0.5, sigma_schedule="karras", num_train_timesteps=1000, prediction_type="epsilon", rho=7.0, final_sigmas_type="sigma_min", ) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, # We cannot run the Cosmos Guardrail for fast tests due to the large model size "safety_checker": DummyCosmosSafetyChecker(), } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "negative_prompt": "bad quality", "generator": generator, "num_inference_steps": 2, "guidance_scale": 3.0, "height": 32, "width": 32, "num_frames": 9, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (9, 3, 32, 32)) # fmt: off expected_slice = torch.tensor([0.0, 0.9686, 0.8549, 0.8078, 0.0, 0.8431, 1.0, 0.4863, 0.7098, 0.1098, 0.8157, 0.4235, 0.6353, 0.2549, 0.5137, 0.5333]) # fmt: on generated_slice = generated_video.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-2) def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_vae_tiling(self, expected_diff_max: float = 0.2): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to("cpu") pipe.set_progress_bar_config(disable=None) # Without tiling inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_without_tiling = pipe(**inputs)[0] # With tiling pipe.vae.enable_tiling( tile_sample_min_height=96, tile_sample_min_width=96, tile_sample_stride_height=64, tile_sample_stride_width=64, ) inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_with_tiling = pipe(**inputs)[0] self.assertLess( (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), expected_diff_max, "VAE tiling should not affect the inference results", ) def test_save_load_optional_components(self, expected_max_difference=1e-4): self.pipeline_class._optional_components.remove("safety_checker") super().test_save_load_optional_components(expected_max_difference=expected_max_difference) self.pipeline_class._optional_components.append("safety_checker") def test_serialization_with_variants(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) model_components = [ component_name for component_name, component in pipe.components.items() if isinstance(component, torch.nn.Module) ] model_components.remove("safety_checker") variant = "fp16" with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) with open(f"{tmpdir}/model_index.json", "r") as f: config = json.load(f) for subfolder in os.listdir(tmpdir): if not os.path.isfile(subfolder) and subfolder in model_components: folder_path = os.path.join(tmpdir, subfolder) is_folder = os.path.isdir(folder_path) and subfolder in config assert is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) def test_torch_dtype_dict(self): components = self.get_dummy_components() if not components: self.skipTest("No dummy components defined.") pipe = self.pipeline_class(**components) specified_key = next(iter(components.keys())) with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: pipe.save_pretrained(tmpdirname, safe_serialization=False) torch_dtype_dict = {specified_key: torch.bfloat16, "default": torch.float16} loaded_pipe = self.pipeline_class.from_pretrained( tmpdirname, safety_checker=DummyCosmosSafetyChecker(), torch_dtype=torch_dtype_dict ) for name, component in loaded_pipe.components.items(): if name == "safety_checker": continue if isinstance(component, torch.nn.Module) and hasattr(component, "dtype"): expected_dtype = torch_dtype_dict.get(name, torch_dtype_dict.get("default", torch.float32)) self.assertEqual( component.dtype, expected_dtype, f"Component '{name}' has dtype {component.dtype} but expected {expected_dtype}", ) @unittest.skip( "The pipeline should not be runnable without a safety checker. The test creates a pipeline without passing in " "a safety checker, which makes the pipeline default to the actual Cosmos Guardrail. The Cosmos Guardrail is " "too large and slow to run on CI." ) def test_encode_prompt_works_in_isolation(self): pass
diffusers/tests/pipelines/cosmos/test_cosmos.py/0
{ "file_path": "diffusers/tests/pipelines/cosmos/test_cosmos.py", "repo_id": "diffusers", "token_count": 6304 }
187
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DiTTransformer2DModel, DPMSolverMultistepScheduler from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, load_numpy, nightly, numpy_cosine_similarity_distance, require_torch_accelerator, torch_device, ) from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DiTPipeline params = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } batch_params = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) transformer = DiTTransformer2DModel( sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_zero", norm_elementwise_affine=False, ) vae = AutoencoderKL() scheduler = DDIMScheduler() components = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "np", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3)) expected_slice = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @nightly @require_torch_accelerator class DiTPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_dit_256(self): generator = torch.manual_seed(0) pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256") pipe.to(torch_device) words = ["vase", "umbrella", "white shark", "white wolf"] ids = pipe.get_label_ids(words) images = pipe(ids, generator=generator, num_inference_steps=40, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max()) < 1e-2 def test_dit_512(self): pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) words = ["vase", "umbrella"] ids = pipe.get_label_ids(words) generator = torch.manual_seed(0) images = pipe(ids, generator=generator, num_inference_steps=25, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}_512.npy" ) expected_slice = expected_image.flatten() output_slice = image.flatten() assert numpy_cosine_similarity_distance(expected_slice, output_slice) < 1e-2
diffusers/tests/pipelines/dit/test_dit.py/0
{ "file_path": "diffusers/tests/pipelines/dit/test_dit.py", "repo_id": "diffusers", "token_count": 2488 }
188
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import ( KandinskyV22CombinedPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22InpaintCombinedPipeline, ) from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, torch_device from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies from .test_kandinsky_img2img import Dummies as Img2ImgDummies from .test_kandinsky_inpaint import Dummies as InpaintDummies from .test_kandinsky_prior import Dummies as PriorDummies enable_full_determinism() class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22CombinedPipeline params = [ "prompt", ] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = True callback_cfg_params = ["image_embds"] supports_dduf = False def get_dummy_components(self): dummy = Dummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update( { "height": 64, "width": 64, } ) return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3076, 0.2729, 0.5668, 0.0522, 0.3384, 0.7028, 0.4908, 0.3659, 0.6243]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_slice.flatten()}" ) assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" ) @require_torch_accelerator def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload(device=torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload(device=torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-3) def test_callback_inputs(self): pass def test_callback_cfg(self): pass class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Img2ImgCombinedPipeline params = ["prompt", "image"] batch_params = ["prompt", "negative_prompt", "image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embds"] supports_dduf = False def get_dummy_components(self): dummy = Img2ImgDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = Img2ImgDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4445, 0.4287, 0.4596, 0.3919, 0.3730, 0.5039, 0.4834, 0.4269, 0.5521]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_slice.flatten()}" ) assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" ) @require_torch_accelerator def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload(device=torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload(device=torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_callback_inputs(self): pass def test_callback_cfg(self): pass class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintCombinedPipeline params = ["prompt", "image", "mask_image"] batch_params = ["prompt", "negative_prompt", "image", "mask_image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False supports_dduf = False def get_dummy_components(self): dummy = InpaintDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = InpaintDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_slice.flatten()}" ) assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" ) @require_torch_accelerator def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload(device=torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload(device=torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=8e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_callback_inputs(self): pass def test_callback_cfg(self): pass
diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py", "repo_id": "diffusers", "token_count": 6197 }
189
import unittest import torch from transformers import AutoTokenizer, Gemma2Config, Gemma2Model from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Lumina2Pipeline, Lumina2Transformer2DModel, ) from ..test_pipelines_common import PipelineTesterMixin class Lumina2PipelineFastTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = Lumina2Pipeline params = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) batch_params = frozenset(["prompt", "negative_prompt"]) required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False test_xformers_attention = False test_layerwise_casting = True def get_dummy_components(self): torch.manual_seed(0) transformer = Lumina2Transformer2DModel( sample_size=4, patch_size=2, in_channels=4, hidden_size=8, num_layers=2, num_attention_heads=1, num_kv_heads=1, multiple_of=16, ffn_dim_multiplier=None, norm_eps=1e-5, scaling_factor=1.0, axes_dim_rope=[4, 2, 2], cap_feat_dim=8, ) torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=4, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") torch.manual_seed(0) config = Gemma2Config( head_dim=4, hidden_size=8, intermediate_size=8, num_attention_heads=2, num_hidden_layers=2, num_key_value_heads=2, sliding_window=2, ) text_encoder = Gemma2Model(config) components = { "transformer": transformer, "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 32, "width": 32, "output_type": "np", } return inputs
diffusers/tests/pipelines/lumina2/test_pipeline_lumina2.py/0
{ "file_path": "diffusers/tests/pipelines/lumina2/test_pipeline_lumina2.py", "repo_id": "diffusers", "token_count": 1756 }
190
# Copyright 2024 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKLWan, SkyReelsV2Pipeline, SkyReelsV2Transformer3DModel, UniPCMultistepScheduler, ) from diffusers.utils.testing_utils import ( enable_full_determinism, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, ) enable_full_determinism() class SkyReelsV2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = SkyReelsV2Pipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKLWan( base_dim=3, z_dim=16, dim_mult=[1, 1, 1, 1], num_res_blocks=1, temperal_downsample=[False, True, True], ) torch.manual_seed(0) scheduler = UniPCMultistepScheduler(flow_shift=8.0, use_flow_sigmas=True) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) transformer = SkyReelsV2Transformer3DModel( patch_size=(1, 2, 2), num_attention_heads=2, attention_head_dim=12, in_channels=16, out_channels=16, text_dim=32, freq_dim=256, ffn_dim=32, num_layers=2, cross_attn_norm=True, qk_norm="rms_norm_across_heads", rope_max_seq_len=32, ) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "negative_prompt": "negative", # TODO "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "height": 16, "width": 16, "num_frames": 9, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (9, 3, 16, 16)) expected_video = torch.randn(9, 3, 16, 16) max_diff = np.abs(generated_video - expected_video).max() self.assertLessEqual(max_diff, 1e10) @unittest.skip("Test not supported") def test_attention_slicing_forward_pass(self): pass
diffusers/tests/pipelines/skyreels_v2/test_skyreels_v2.py/0
{ "file_path": "diffusers/tests/pipelines/skyreels_v2/test_skyreels_v2.py", "repo_id": "diffusers", "token_count": 2017 }
191
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import time import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTextConfig, CLIPTextModel, CLIPTokenizer, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LCMScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, logging, ) from diffusers.utils.testing_utils import ( CaptureLogger, backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, backend_reset_peak_memory_stats, enable_full_determinism, load_numpy, nightly, numpy_cosine_similarity_distance, require_accelerate_version_greater, require_torch_accelerator, require_torch_multi_accelerator, skip_mps, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class StableDiffusionPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self, time_cond_proj_dim=None): cross_attention_dim = 8 torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, time_cond_proj_dim=time_cond_proj_dim, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=cross_attention_dim, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=cross_attention_dim, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.1763, 0.4776, 0.4986, 0.2566, 0.3802, 0.4596, 0.5363, 0.3277, 0.3949]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.2368, 0.4900, 0.5019, 0.2723, 0.4473, 0.4578, 0.4551, 0.3532, 0.4133]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.2368, 0.4900, 0.5019, 0.2723, 0.4473, 0.4578, 0.4551, 0.3532, 0.4133]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_ays(self): from diffusers.schedulers import AysSchedules timestep_schedule = AysSchedules["StableDiffusionTimesteps"] sigma_schedule = AysSchedules["StableDiffusionSigmas"] device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 10 output = sd_pipe(**inputs).images inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = None inputs["timesteps"] = timestep_schedule output_ts = sd_pipe(**inputs).images inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = None inputs["sigmas"] = sigma_schedule output_sigmas = sd_pipe(**inputs).images assert np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3, ( "ays timesteps and ays sigmas should have the same outputs" ) assert np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3, ( "use ays timesteps should have different outputs" ) assert np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3, ( "use ays sigmas should have different outputs" ) def test_stable_diffusion_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = sd_pipe.tokenizer( prompt, padding="max_length", max_length=sd_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = sd_pipe.text_encoder(text_inputs)[0] inputs["prompt_embeds"] = prompt_embeds # forward output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = sd_pipe.tokenizer( p, padding="max_length", max_length=sd_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) embeds.append(sd_pipe.text_encoder(text_inputs)[0]) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_ddim_factor_8(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, height=136, width=136) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 136, 136, 3) expected_slice = np.array([0.4720, 0.5426, 0.5160, 0.3961, 0.4696, 0.4296, 0.5738, 0.5888, 0.5481]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = PNDMScheduler(skip_prk_steps=True) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.1941, 0.4748, 0.4880, 0.2222, 0.4221, 0.4545, 0.5604, 0.3488, 0.3902]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_no_safety_checker(self): pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None ) assert isinstance(pipe, StableDiffusionPipeline) assert isinstance(pipe.scheduler, LMSDiscreteScheduler) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.2681, 0.4785, 0.4857, 0.2426, 0.4473, 0.4481, 0.5610, 0.3676, 0.3855]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.2682, 0.4782, 0.4855, 0.2424, 0.4472, 0.4479, 0.5612, 0.3676, 0.3854]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.2681, 0.4785, 0.4857, 0.2426, 0.4473, 0.4481, 0.5610, 0.3676, 0.3855]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_vae_slicing(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) image_count = 4 inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count output_1 = sd_pipe(**inputs) # make sure sliced vae decode yields the same result sd_pipe.enable_vae_slicing() inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count output_2 = sd_pipe(**inputs) # there is a small discrepancy at image borders vs. full batch decode assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 3e-3 def test_stable_diffusion_vae_tiling(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # make sure here that pndm scheduler skips prk components["safety_checker"] = None sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" # Test that tiled decode at 512x512 yields the same result as the non-tiled decode generator = torch.Generator(device=device).manual_seed(0) output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") # make sure tiled vae decode yields the same result sd_pipe.enable_vae_tiling() generator = torch.Generator(device=device).manual_seed(0) output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 5e-1 # test that tiled decode works with various shapes shapes = [(1, 4, 73, 97), (1, 4, 97, 73), (1, 4, 49, 65), (1, 4, 65, 49)] for shape in shapes: zeros = torch.zeros(shape).to(device) sd_pipe.vae.decode(zeros) def test_stable_diffusion_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.1907, 0.4709, 0.4858, 0.2224, 0.4223, 0.4539, 0.5606, 0.3489, 0.3900]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_long_prompt(self): components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) do_classifier_free_guidance = True negative_prompt = None num_images_per_prompt = 1 logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion") logger.setLevel(logging.WARNING) prompt = 100 * "@" with CaptureLogger(logger) as cap_logger: negative_text_embeddings, text_embeddings = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings is not None: text_embeddings = torch.cat([negative_text_embeddings, text_embeddings]) # 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25 assert cap_logger.out.count("@") == 25 negative_prompt = "Hello" with CaptureLogger(logger) as cap_logger_2: negative_text_embeddings_2, text_embeddings_2 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings_2 is not None: text_embeddings_2 = torch.cat([negative_text_embeddings_2, text_embeddings_2]) assert cap_logger.out == cap_logger_2.out prompt = 25 * "@" with CaptureLogger(logger) as cap_logger_3: negative_text_embeddings_3, text_embeddings_3 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings_3 is not None: text_embeddings_3 = torch.cat([negative_text_embeddings_3, text_embeddings_3]) assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape assert text_embeddings.shape[1] == 77 assert cap_logger_3.out == "" def test_stable_diffusion_height_width_opt(self): components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" output = sd_pipe(prompt, num_inference_steps=1, output_type="np") image_shape = output.images[0].shape[:2] assert image_shape == (64, 64) output = sd_pipe(prompt, num_inference_steps=1, height=96, width=96, output_type="np") image_shape = output.images[0].shape[:2] assert image_shape == (96, 96) config = dict(sd_pipe.unet.config) config["sample_size"] = 96 sd_pipe.unet = UNet2DConditionModel.from_config(config).to(torch_device) output = sd_pipe(prompt, num_inference_steps=1, output_type="np") image_shape = output.images[0].shape[:2] assert image_shape == (192, 192) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) # MPS currently doesn't support ComplexFloats, which are required for freeU - see https://github.com/huggingface/diffusers/issues/7569. @skip_mps def test_freeu_enabled(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" output = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) output_freeu = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), ( "Enabling of FreeU should lead to different results." ) def test_freeu_disabled(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" output = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) sd_pipe.disable_freeu() freeu_keys = {"s1", "s2", "b1", "b2"} for upsample_block in sd_pipe.unet.up_blocks: for key in freeu_keys: assert getattr(upsample_block, key) is None, f"Disabling of FreeU should have set {key} to None." output_no_freeu = sd_pipe( prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0) ).images assert np.allclose(output[0, -3:, -3:, -1], output_no_freeu[0, -3:, -3:, -1]), ( "Disabling of FreeU should lead to results similar to the default pipeline results." ) def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] sd_pipe.fuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] sd_pipe.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( "Fusion of QKV projections shouldn't affect the outputs." ) assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." ) assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Original outputs should match when fused QKV projections are disabled." ) def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" num_inference_steps = 3 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) def test_pipeline_accept_tuple_type_unet_sample_size(self): # the purpose of this test is to see whether the pipeline would accept a unet with the tuple-typed sample size sd_repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" sample_size = [60, 80] customised_unet = UNet2DConditionModel(sample_size=sample_size) pipe = StableDiffusionPipeline.from_pretrained(sd_repo_id, unet=customised_unet) assert pipe.unet.config.sample_size == sample_size def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) @slow @require_torch_accelerator class StableDiffusionPipelineSlowTests(unittest.TestCase): def setUp(self): gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_1_1_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.4363, 0.4355, 0.3667, 0.4066, 0.3970, 0.3866, 0.4394, 0.4356, 0.4059]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_v1_4_with_freeu(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) image = sd_pipe(**inputs).images image = image[0, -3:, -3:, -1].flatten() expected_image = [0.0721, 0.0588, 0.0268, 0.0384, 0.0636, 0.0, 0.0429, 0.0344, 0.0309] max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_1_4_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.5740, 0.4784, 0.3162, 0.6358, 0.5831, 0.5505, 0.5082, 0.5631, 0.5575]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.38019, 0.28647, 0.27321, 0.40377, 0.38290, 0.35446, 0.39218, 0.38165, 0.42239]) assert np.abs(image_slice - expected_slice).max() < 1e-4 def test_stable_diffusion_lms(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.10542, 0.09620, 0.07332, 0.09015, 0.09382, 0.07597, 0.08496, 0.07806, 0.06455]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_dpm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config( sd_pipe.scheduler.config, final_sigmas_type="sigma_min", ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.03503, 0.03494, 0.01087, 0.03128, 0.02552, 0.00803, 0.00742, 0.00372, 0.00000]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_attention_slicing(self): backend_reset_peak_memory_stats(torch_device) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.unet.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # enable attention slicing pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image_sliced = pipe(**inputs).images mem_bytes = backend_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) # make sure that less than 3.75 GB is allocated assert mem_bytes < 3.75 * 10**9 # disable slicing pipe.disable_attention_slicing() pipe.unet.set_default_attn_processor() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images # make sure that more than 3.75 GB is allocated mem_bytes = backend_max_memory_allocated(torch_device) assert mem_bytes > 3.75 * 10**9 max_diff = numpy_cosine_similarity_distance(image_sliced.flatten(), image.flatten()) assert max_diff < 1e-3 def test_stable_diffusion_vae_slicing(self): backend_reset_peak_memory_stats(torch_device) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() # enable vae slicing pipe.enable_vae_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) inputs["prompt"] = [inputs["prompt"]] * 4 inputs["latents"] = torch.cat([inputs["latents"]] * 4) image_sliced = pipe(**inputs).images mem_bytes = backend_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) # make sure that less than 4 GB is allocated assert mem_bytes < 4e9 # disable vae slicing pipe.disable_vae_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) inputs["prompt"] = [inputs["prompt"]] * 4 inputs["latents"] = torch.cat([inputs["latents"]] * 4) image = pipe(**inputs).images # make sure that more than 4 GB is allocated mem_bytes = backend_max_memory_allocated(torch_device) assert mem_bytes > 4e9 # There is a small discrepancy at the image borders vs. a fully batched version. max_diff = numpy_cosine_similarity_distance(image_sliced.flatten(), image.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_vae_tiling(self): backend_reset_peak_memory_stats(torch_device) model_id = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionPipeline.from_pretrained( model_id, variant="fp16", torch_dtype=torch.float16, safety_checker=None ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.unet = pipe.unet.to(memory_format=torch.channels_last) pipe.vae = pipe.vae.to(memory_format=torch.channels_last) prompt = "a photograph of an astronaut riding a horse" # enable vae tiling pipe.enable_vae_tiling() pipe.enable_model_cpu_offload(device=torch_device) generator = torch.Generator(device="cpu").manual_seed(0) output_chunked = pipe( [prompt], width=1024, height=1024, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="np", ) image_chunked = output_chunked.images mem_bytes = backend_max_memory_allocated(torch_device) # disable vae tiling pipe.disable_vae_tiling() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe( [prompt], width=1024, height=1024, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="np", ) image = output.images assert mem_bytes < 1e10 max_diff = numpy_cosine_similarity_distance(image_chunked.flatten(), image.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_fp16_vs_autocast(self): # this test makes sure that the original model with autocast # and the new model with fp16 yield the same result pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, dtype=torch.float16) image_fp16 = pipe(**inputs).images with torch.autocast(torch_device): inputs = self.get_inputs(torch_device) image_autocast = pipe(**inputs).images # Make sure results are close enough diff = np.abs(image_fp16.flatten() - image_autocast.flatten()) # They ARE different since ops are not run always at the same precision # however, they should be extremely close. assert diff.mean() < 2e-2 def test_stable_diffusion_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.5693, -0.3018, -0.9746, 0.0518, -0.8770, 0.7559, -1.7402, 0.1022, 1.1582] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.1958, -0.2993, -1.0166, -0.5005, -0.4810, 0.6162, -0.9492, 0.6621, 1.4492] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == inputs["num_inference_steps"] def test_stable_diffusion_low_cpu_mem_usage(self): pipeline_id = "CompVis/stable-diffusion-v1-4" start_time = time.time() pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) pipeline_low_cpu_mem_usage.to(torch_device) low_cpu_mem_usage_time = time.time() - start_time start_time = time.time() _ = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16, low_cpu_mem_usage=False) normal_load_time = time.time() - start_time assert 2 * low_cpu_mem_usage_time < normal_load_time def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = backend_max_memory_allocated(torch_device) # make sure that less than 2.8 GB is allocated assert mem_bytes < 2.8 * 10**9 def test_stable_diffusion_pipeline_with_model_offloading(self): backend_empty_cache(torch_device) backend_reset_peak_memory_stats(torch_device) inputs = self.get_inputs(torch_device, dtype=torch.float16) # Normal inference pipe = StableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) outputs = pipe(**inputs) mem_bytes = backend_max_memory_allocated(torch_device) # With model offloading # Reload but don't move to cuda pipe = StableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, dtype=torch.float16) outputs_offloaded = pipe(**inputs) mem_bytes_offloaded = backend_max_memory_allocated(torch_device) images = outputs.images offloaded_images = outputs_offloaded.images max_diff = numpy_cosine_similarity_distance(images.flatten(), offloaded_images.flatten()) assert max_diff < 1e-3 assert mem_bytes_offloaded < mem_bytes assert mem_bytes_offloaded < 3.5 * 10**9 for module in pipe.text_encoder, pipe.unet, pipe.vae: assert module.device == torch.device("cpu") # With attention slicing backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) pipe.enable_attention_slicing() _ = pipe(**inputs) mem_bytes_slicing = backend_max_memory_allocated(torch_device) assert mem_bytes_slicing < mem_bytes_offloaded assert mem_bytes_slicing < 3 * 10**9 def test_stable_diffusion_textual_inversion(self): pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons") a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") a111_file_neg = hf_hub_download( "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" ) pipe.load_textual_inversion(a111_file) pipe.load_textual_inversion(a111_file_neg) pipe.to(torch_device) generator = torch.Generator(device="cpu").manual_seed(1) prompt = "An logo of a turtle in strong Style-Winter with <low-poly-hd-logos-icons>" neg_prompt = "Style-Winter-neg" image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 8e-1 def test_stable_diffusion_textual_inversion_with_model_cpu_offload(self): pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.enable_model_cpu_offload(device=torch_device) pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons") a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") a111_file_neg = hf_hub_download( "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" ) pipe.load_textual_inversion(a111_file) pipe.load_textual_inversion(a111_file_neg) generator = torch.Generator(device="cpu").manual_seed(1) prompt = "An logo of a turtle in strong Style-Winter with <low-poly-hd-logos-icons>" neg_prompt = "Style-Winter-neg" image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 8e-1 def test_stable_diffusion_textual_inversion_with_sequential_cpu_offload(self): pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.enable_sequential_cpu_offload(device=torch_device) pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons").to(torch_device) a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") a111_file_neg = hf_hub_download( "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" ) pipe.load_textual_inversion(a111_file) pipe.load_textual_inversion(a111_file_neg) generator = torch.Generator(device="cpu").manual_seed(1) prompt = "An logo of a turtle in strong Style-Winter with <low-poly-hd-logos-icons>" neg_prompt = "Style-Winter-neg" image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 8e-1 @slow @require_torch_accelerator class StableDiffusionPipelineCkptTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_download_from_hub(self): ckpt_paths = [ "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors", ] for ckpt_path in ckpt_paths: pipe = StableDiffusionPipeline.from_single_file(ckpt_path, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] assert image_out.shape == (512, 512, 3) def test_download_local(self): ckpt_filename = hf_hub_download( "stable-diffusion-v1-5/stable-diffusion-v1-5", filename="v1-5-pruned-emaonly.safetensors" ) config_filename = hf_hub_download("stable-diffusion-v1-5/stable-diffusion-v1-5", filename="v1-inference.yaml") pipe = StableDiffusionPipeline.from_single_file( ckpt_filename, config_files={"v1": config_filename}, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] assert image_out.shape == (512, 512, 3) @nightly @require_torch_accelerator class StableDiffusionPipelineNightlyTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_1_4_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_1_5_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5").to( torch_device ) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_5_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 3e-3 def test_stable_diffusion_lms(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_euler(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_euler.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 # (sayakpaul): This test suite was run in the DGX with two GPUs (1, 2). @slow @require_torch_multi_accelerator @require_accelerate_version_greater("0.27.0") class StableDiffusionPipelineDeviceMapTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, generator_device="cpu", seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) inputs = { "prompt": "a photograph of an astronaut riding a horse", "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "np", } return inputs def get_pipeline_output_without_device_map(self): sd_pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to(torch_device) sd_pipe.set_progress_bar_config(disable=True) inputs = self.get_inputs() no_device_map_image = sd_pipe(**inputs).images del sd_pipe return no_device_map_image def test_forward_pass_balanced_device_map(self): no_device_map_image = self.get_pipeline_output_without_device_map() sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) sd_pipe_with_device_map.set_progress_bar_config(disable=True) inputs = self.get_inputs() device_map_image = sd_pipe_with_device_map(**inputs).images max_diff = np.abs(device_map_image - no_device_map_image).max() assert max_diff < 1e-3 def test_components_put_in_right_devices(self): sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) assert len(set(sd_pipe_with_device_map.hf_device_map.values())) >= 2 def test_max_memory(self): no_device_map_image = self.get_pipeline_output_without_device_map() sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", max_memory={0: "1GB", 1: "1GB"}, torch_dtype=torch.float16, ) sd_pipe_with_device_map.set_progress_bar_config(disable=True) inputs = self.get_inputs() device_map_image = sd_pipe_with_device_map(**inputs).images max_diff = np.abs(device_map_image - no_device_map_image).max() assert max_diff < 1e-3 def test_reset_device_map(self): sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) sd_pipe_with_device_map.reset_device_map() assert sd_pipe_with_device_map.hf_device_map is None for name, component in sd_pipe_with_device_map.components.items(): if isinstance(component, torch.nn.Module): assert component.device.type == "cpu" def test_reset_device_map_to(self): sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) sd_pipe_with_device_map.reset_device_map() assert sd_pipe_with_device_map.hf_device_map is None # Make sure `to()` can be used and the pipeline can be called. pipe = sd_pipe_with_device_map.to(torch_device) _ = pipe("hello", num_inference_steps=2) def test_reset_device_map_enable_model_cpu_offload(self): sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) sd_pipe_with_device_map.reset_device_map() assert sd_pipe_with_device_map.hf_device_map is None # Make sure `enable_model_cpu_offload()` can be used and the pipeline can be called. sd_pipe_with_device_map.enable_model_cpu_offload(device=torch_device) _ = sd_pipe_with_device_map("hello", num_inference_steps=2) def test_reset_device_map_enable_sequential_cpu_offload(self): sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 ) sd_pipe_with_device_map.reset_device_map() assert sd_pipe_with_device_map.hf_device_map is None # Make sure `enable_sequential_cpu_offload()` can be used and the pipeline can be called. sd_pipe_with_device_map.enable_sequential_cpu_offload(device=torch_device) _ = sd_pipe_with_device_map("hello", num_inference_steps=2)
diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion.py", "repo_id": "diffusers", "token_count": 27034 }
192
import random import unittest import numpy as np import torch from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel, StableDiffusion3InpaintPipeline, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusion3InpaintPipelineFastTests(PipelineLatentTesterMixin, unittest.TestCase, PipelineTesterMixin): pipeline_class = StableDiffusion3InpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) def get_dummy_components(self): torch.manual_seed(0) transformer = SD3Transformer2DModel( sample_size=32, patch_size=1, in_channels=16, num_layers=1, attention_head_dim=8, num_attention_heads=4, joint_attention_dim=32, caption_projection_dim=32, pooled_projection_dim=64, out_channels=16, ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=16, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "text_encoder_3": text_encoder_3, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "tokenizer_3": tokenizer_3, "transformer": transformer, "vae": vae, "image_encoder": None, "feature_extractor": None, } def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) mask_image = torch.ones((1, 1, 32, 32)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "mask_image": mask_image, "height": 32, "width": 32, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "strength": 0.8, } return inputs def test_inference(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images[0] generated_slice = image.flatten() generated_slice = np.concatenate([generated_slice[:8], generated_slice[-8:]]) # fmt: off expected_slice = np.array([0.5035, 0.6661, 0.5859, 0.413, 0.4224, 0.4234, 0.7181, 0.5062, 0.5183, 0.6877, 0.5074, 0.585, 0.6111, 0.5422, 0.5306, 0.5891]) # fmt: on self.assertTrue( np.allclose(generated_slice, expected_slice, atol=1e-3), "Output does not match expected slice." ) @unittest.skip("Skip for now.") def test_multi_vae(self): pass
diffusers/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_inpaint.py", "repo_id": "diffusers", "token_count": 2565 }
193
import contextlib import io import re import unittest import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AnimateDiffPipeline, AnimateDiffVideoToVideoPipeline, AutoencoderKL, DDIMScheduler, MotionAdapter, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.pipelines.pipeline_loading_utils import is_safetensors_compatible, variant_compatible_siblings from diffusers.utils.testing_utils import require_torch_accelerator, torch_device class IsSafetensorsCompatibleTests(unittest.TestCase): def test_all_is_compatible(self): filenames = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_diffusers_model_is_compatible(self): filenames = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_diffusers_model_is_not_compatible(self): filenames = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(filenames)) def test_transformer_model_is_compatible(self): filenames = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_transformer_model_is_not_compatible(self): filenames = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) def test_all_is_compatible_variant(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) def test_diffusers_model_is_compatible_variant(self): filenames = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) def test_diffusers_model_is_compatible_variant_mixed(self): filenames = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) def test_diffusers_model_is_not_compatible_variant(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] self.assertFalse(is_safetensors_compatible(filenames)) def test_transformer_model_is_compatible_variant(self): filenames = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) def test_transformer_model_is_not_compatible_variant(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) def test_transformer_model_is_compatible_variant_extra_folder(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames, folder_names={"vae", "unet"})) self.assertTrue(is_safetensors_compatible(filenames, folder_names={"vae", "unet"}, variant="fp16")) def test_transformer_model_is_not_compatible_variant_extra_folder(self): filenames = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames, folder_names={"text_encoder"})) def test_transformers_is_compatible_sharded(self): filenames = [ "text_encoder/pytorch_model.bin", "text_encoder/model-00001-of-00002.safetensors", "text_encoder/model-00002-of-00002.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_transformers_is_compatible_variant_sharded(self): filenames = [ "text_encoder/pytorch_model.bin", "text_encoder/model.fp16-00001-of-00002.safetensors", "text_encoder/model.fp16-00001-of-00002.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) def test_diffusers_is_compatible_sharded(self): filenames = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model-00001-of-00002.safetensors", "unet/diffusion_pytorch_model-00002-of-00002.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames)) def test_diffusers_is_compatible_variant_sharded(self): filenames = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.fp16-00001-of-00002.safetensors", "unet/diffusion_pytorch_model.fp16-00001-of-00002.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) def test_diffusers_is_compatible_only_variants(self): filenames = [ "unet/diffusion_pytorch_model.fp16.safetensors", ] self.assertFalse(is_safetensors_compatible(filenames)) self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) def test_diffusers_is_compatible_no_components(self): filenames = [ "diffusion_pytorch_model.bin", ] self.assertFalse(is_safetensors_compatible(filenames)) def test_diffusers_is_compatible_no_components_only_variants(self): filenames = [ "diffusion_pytorch_model.fp16.bin", ] self.assertFalse(is_safetensors_compatible(filenames)) def test_is_compatible_mixed_variants(self): filenames = [ "unet/diffusion_pytorch_model.fp16.safetensors", "vae/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) def test_is_compatible_variant_and_non_safetensors(self): filenames = [ "unet/diffusion_pytorch_model.fp16.safetensors", "vae/diffusion_pytorch_model.bin", ] self.assertFalse(is_safetensors_compatible(filenames, variant="fp16")) class VariantCompatibleSiblingsTest(unittest.TestCase): def test_only_non_variants_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ f"vae/diffusion_pytorch_model.{variant}.safetensors", "vae/diffusion_pytorch_model.safetensors", f"text_encoder/model.{variant}.safetensors", "text_encoder/model.safetensors", f"unet/diffusion_pytorch_model.{variant}.safetensors", "unet/diffusion_pytorch_model.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=None, ignore_patterns=ignore_patterns ) assert all(variant not in f for f in model_filenames) def test_only_variants_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ f"vae/diffusion_pytorch_model.{variant}.safetensors", "vae/diffusion_pytorch_model.safetensors", f"text_encoder/model.{variant}.safetensors", "text_encoder/model.safetensors", f"unet/diffusion_pytorch_model.{variant}.safetensors", "unet/diffusion_pytorch_model.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f for f in model_filenames) def test_mixed_variants_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" non_variant_file = "text_encoder/model.safetensors" filenames = [ f"vae/diffusion_pytorch_model.{variant}.safetensors", "vae/diffusion_pytorch_model.safetensors", "text_encoder/model.safetensors", f"unet/diffusion_pytorch_model.{variant}.safetensors", "unet/diffusion_pytorch_model.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f if f != non_variant_file else variant not in f for f in model_filenames) def test_non_variants_in_main_dir_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ f"diffusion_pytorch_model.{variant}.safetensors", "diffusion_pytorch_model.safetensors", "model.safetensors", f"model.{variant}.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=None, ignore_patterns=ignore_patterns ) assert all(variant not in f for f in model_filenames) def test_variants_in_main_dir_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ f"diffusion_pytorch_model.{variant}.safetensors", "diffusion_pytorch_model.safetensors", "model.safetensors", f"model.{variant}.safetensors", f"diffusion_pytorch_model.{variant}.safetensors", "diffusion_pytorch_model.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f for f in model_filenames) def test_mixed_variants_in_main_dir_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" non_variant_file = "model.safetensors" filenames = [ f"diffusion_pytorch_model.{variant}.safetensors", "diffusion_pytorch_model.safetensors", "model.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f if f != non_variant_file else variant not in f for f in model_filenames) def test_sharded_variants_in_main_dir_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ "diffusion_pytorch_model.safetensors.index.json", "diffusion_pytorch_model-00001-of-00003.safetensors", "diffusion_pytorch_model-00002-of-00003.safetensors", "diffusion_pytorch_model-00003-of-00003.safetensors", f"diffusion_pytorch_model.{variant}-00001-of-00002.safetensors", f"diffusion_pytorch_model.{variant}-00002-of-00002.safetensors", f"diffusion_pytorch_model.safetensors.index.{variant}.json", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f for f in model_filenames) def test_mixed_sharded_and_variant_in_main_dir_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ "diffusion_pytorch_model.safetensors.index.json", "diffusion_pytorch_model-00001-of-00003.safetensors", "diffusion_pytorch_model-00002-of-00003.safetensors", "diffusion_pytorch_model-00003-of-00003.safetensors", f"diffusion_pytorch_model.{variant}.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f for f in model_filenames) def test_mixed_sharded_non_variants_in_main_dir_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ f"diffusion_pytorch_model.safetensors.index.{variant}.json", "diffusion_pytorch_model.safetensors.index.json", "diffusion_pytorch_model-00001-of-00003.safetensors", "diffusion_pytorch_model-00002-of-00003.safetensors", "diffusion_pytorch_model-00003-of-00003.safetensors", f"diffusion_pytorch_model.{variant}-00001-of-00002.safetensors", f"diffusion_pytorch_model.{variant}-00002-of-00002.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=None, ignore_patterns=ignore_patterns ) assert all(variant not in f for f in model_filenames) def test_sharded_non_variants_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ f"unet/diffusion_pytorch_model.safetensors.index.{variant}.json", "unet/diffusion_pytorch_model.safetensors.index.json", "unet/diffusion_pytorch_model-00001-of-00003.safetensors", "unet/diffusion_pytorch_model-00002-of-00003.safetensors", "unet/diffusion_pytorch_model-00003-of-00003.safetensors", f"unet/diffusion_pytorch_model.{variant}-00001-of-00002.safetensors", f"unet/diffusion_pytorch_model.{variant}-00002-of-00002.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=None, ignore_patterns=ignore_patterns ) assert all(variant not in f for f in model_filenames) def test_sharded_variants_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ f"unet/diffusion_pytorch_model.safetensors.index.{variant}.json", "unet/diffusion_pytorch_model.safetensors.index.json", "unet/diffusion_pytorch_model-00001-of-00003.safetensors", "unet/diffusion_pytorch_model-00002-of-00003.safetensors", "unet/diffusion_pytorch_model-00003-of-00003.safetensors", f"unet/diffusion_pytorch_model.{variant}-00001-of-00002.safetensors", f"unet/diffusion_pytorch_model.{variant}-00002-of-00002.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f for f in model_filenames) assert model_filenames == variant_filenames def test_single_variant_with_sharded_non_variant_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ "unet/diffusion_pytorch_model.safetensors.index.json", "unet/diffusion_pytorch_model-00001-of-00003.safetensors", "unet/diffusion_pytorch_model-00002-of-00003.safetensors", "unet/diffusion_pytorch_model-00003-of-00003.safetensors", f"unet/diffusion_pytorch_model.{variant}.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f for f in model_filenames) def test_mixed_single_variant_with_sharded_non_variant_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" allowed_non_variant = "unet" filenames = [ "vae/diffusion_pytorch_model.safetensors.index.json", "vae/diffusion_pytorch_model-00001-of-00003.safetensors", "vae/diffusion_pytorch_model-00002-of-00003.safetensors", "vae/diffusion_pytorch_model-00003-of-00003.safetensors", f"vae/diffusion_pytorch_model.{variant}.safetensors", "unet/diffusion_pytorch_model.safetensors.index.json", "unet/diffusion_pytorch_model-00001-of-00003.safetensors", "unet/diffusion_pytorch_model-00002-of-00003.safetensors", "unet/diffusion_pytorch_model-00003-of-00003.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f if allowed_non_variant not in f else variant not in f for f in model_filenames) def test_sharded_mixed_variants_downloaded(self): ignore_patterns = ["*.bin"] variant = "fp16" allowed_non_variant = "unet" filenames = [ f"vae/diffusion_pytorch_model.safetensors.index.{variant}.json", "vae/diffusion_pytorch_model.safetensors.index.json", "unet/diffusion_pytorch_model.safetensors.index.json", "unet/diffusion_pytorch_model-00001-of-00003.safetensors", "unet/diffusion_pytorch_model-00002-of-00003.safetensors", "unet/diffusion_pytorch_model-00003-of-00003.safetensors", f"vae/diffusion_pytorch_model.{variant}-00001-of-00002.safetensors", f"vae/diffusion_pytorch_model.{variant}-00002-of-00002.safetensors", "vae/diffusion_pytorch_model-00001-of-00003.safetensors", "vae/diffusion_pytorch_model-00002-of-00003.safetensors", "vae/diffusion_pytorch_model-00003-of-00003.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f if allowed_non_variant not in f else variant not in f for f in model_filenames) def test_downloading_when_no_variant_exists(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = ["model.safetensors", "diffusion_pytorch_model.safetensors"] with self.assertRaisesRegex(ValueError, "but no such modeling files are available. "): model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) def test_downloading_use_safetensors_false(self): ignore_patterns = ["*.safetensors"] filenames = [ "text_encoder/model.bin", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=None, ignore_patterns=ignore_patterns ) assert all(".safetensors" not in f for f in model_filenames) def test_non_variant_in_main_dir_with_variant_in_subfolder(self): ignore_patterns = ["*.bin"] variant = "fp16" allowed_non_variant = "diffusion_pytorch_model.safetensors" filenames = [ f"unet/diffusion_pytorch_model.{variant}.safetensors", "diffusion_pytorch_model.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f if allowed_non_variant not in f else variant not in f for f in model_filenames) def test_download_variants_when_component_has_no_safetensors_variant(self): ignore_patterns = None variant = "fp16" filenames = [ f"unet/diffusion_pytorch_model.{variant}.bin", "vae/diffusion_pytorch_model.safetensors", f"vae/diffusion_pytorch_model.{variant}.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert { f"unet/diffusion_pytorch_model.{variant}.bin", f"vae/diffusion_pytorch_model.{variant}.safetensors", } == model_filenames def test_error_when_download_sharded_variants_when_component_has_no_safetensors_variant(self): ignore_patterns = ["*.bin"] variant = "fp16" filenames = [ f"vae/diffusion_pytorch_model.bin.index.{variant}.json", "vae/diffusion_pytorch_model.safetensors.index.json", f"vae/diffusion_pytorch_model.{variant}-00002-of-00002.bin", "vae/diffusion_pytorch_model-00001-of-00003.safetensors", "vae/diffusion_pytorch_model-00002-of-00003.safetensors", "vae/diffusion_pytorch_model-00003-of-00003.safetensors", "unet/diffusion_pytorch_model.safetensors.index.json", "unet/diffusion_pytorch_model-00001-of-00003.safetensors", "unet/diffusion_pytorch_model-00002-of-00003.safetensors", "unet/diffusion_pytorch_model-00003-of-00003.safetensors", f"vae/diffusion_pytorch_model.{variant}-00001-of-00002.bin", ] with self.assertRaisesRegex(ValueError, "but no such modeling files are available. "): model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) def test_download_sharded_variants_when_component_has_no_safetensors_variant_and_safetensors_false(self): ignore_patterns = ["*.safetensors"] allowed_non_variant = "unet" variant = "fp16" filenames = [ f"vae/diffusion_pytorch_model.bin.index.{variant}.json", "vae/diffusion_pytorch_model.safetensors.index.json", f"vae/diffusion_pytorch_model.{variant}-00002-of-00002.bin", "vae/diffusion_pytorch_model-00001-of-00003.safetensors", "vae/diffusion_pytorch_model-00002-of-00003.safetensors", "vae/diffusion_pytorch_model-00003-of-00003.safetensors", "unet/diffusion_pytorch_model.safetensors.index.json", "unet/diffusion_pytorch_model-00001-of-00003.safetensors", "unet/diffusion_pytorch_model-00002-of-00003.safetensors", "unet/diffusion_pytorch_model-00003-of-00003.safetensors", f"vae/diffusion_pytorch_model.{variant}-00001-of-00002.bin", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f if allowed_non_variant not in f else variant not in f for f in model_filenames) def test_download_sharded_legacy_variants(self): ignore_patterns = None variant = "fp16" filenames = [ f"vae/transformer/diffusion_pytorch_model.safetensors.{variant}.index.json", "vae/diffusion_pytorch_model.safetensors.index.json", f"vae/diffusion_pytorch_model-00002-of-00002.{variant}.safetensors", "vae/diffusion_pytorch_model-00001-of-00003.safetensors", "vae/diffusion_pytorch_model-00002-of-00003.safetensors", "vae/diffusion_pytorch_model-00003-of-00003.safetensors", f"vae/diffusion_pytorch_model-00001-of-00002.{variant}.safetensors", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=variant, ignore_patterns=ignore_patterns ) assert all(variant in f for f in model_filenames) def test_download_onnx_models(self): ignore_patterns = ["*.safetensors"] filenames = [ "vae/model.onnx", "unet/model.onnx", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=None, ignore_patterns=ignore_patterns ) assert model_filenames == set(filenames) def test_download_flax_models(self): ignore_patterns = ["*.safetensors", "*.bin"] filenames = [ "vae/diffusion_flax_model.msgpack", "unet/diffusion_flax_model.msgpack", ] model_filenames, variant_filenames = variant_compatible_siblings( filenames, variant=None, ignore_patterns=ignore_patterns ) assert model_filenames == set(filenames) class ProgressBarTests(unittest.TestCase): def get_dummy_components_image_generation(self): cross_attention_dim = 8 torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=cross_attention_dim, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=cross_attention_dim, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_components_video_generation(self): cross_attention_dim = 8 block_out_channels = (8, 8) torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=block_out_channels, layers_per_block=2, sample_size=8, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=cross_attention_dim, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", clip_sample=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=block_out_channels, in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=cross_attention_dim, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) motion_adapter = MotionAdapter( block_out_channels=block_out_channels, motion_layers_per_block=2, motion_norm_num_groups=2, motion_num_attention_heads=4, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "motion_adapter": motion_adapter, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": None, "image_encoder": None, } return components def test_text_to_image(self): components = self.get_dummy_components_image_generation() pipe = StableDiffusionPipeline(**components) pipe.to(torch_device) inputs = {"prompt": "a cute cat", "num_inference_steps": 2} with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) stderr = stderr.getvalue() # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, # so we just match "5" in "#####| 1/5 [00:01<00:00]" max_steps = re.search("/(.*?) ", stderr).group(1) self.assertTrue(max_steps is not None and len(max_steps) > 0) self.assertTrue( f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) pipe.set_progress_bar_config(disable=True) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") def test_image_to_image(self): components = self.get_dummy_components_image_generation() pipe = StableDiffusionImg2ImgPipeline(**components) pipe.to(torch_device) image = Image.new("RGB", (32, 32)) inputs = {"prompt": "a cute cat", "num_inference_steps": 2, "strength": 0.5, "image": image} with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) stderr = stderr.getvalue() # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, # so we just match "5" in "#####| 1/5 [00:01<00:00]" max_steps = re.search("/(.*?) ", stderr).group(1) self.assertTrue(max_steps is not None and len(max_steps) > 0) self.assertTrue( f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) pipe.set_progress_bar_config(disable=True) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") def test_inpainting(self): components = self.get_dummy_components_image_generation() pipe = StableDiffusionInpaintPipeline(**components) pipe.to(torch_device) image = Image.new("RGB", (32, 32)) mask = Image.new("RGB", (32, 32)) inputs = { "prompt": "a cute cat", "num_inference_steps": 2, "strength": 0.5, "image": image, "mask_image": mask, } with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) stderr = stderr.getvalue() # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, # so we just match "5" in "#####| 1/5 [00:01<00:00]" max_steps = re.search("/(.*?) ", stderr).group(1) self.assertTrue(max_steps is not None and len(max_steps) > 0) self.assertTrue( f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) pipe.set_progress_bar_config(disable=True) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") def test_text_to_video(self): components = self.get_dummy_components_video_generation() pipe = AnimateDiffPipeline(**components) pipe.to(torch_device) inputs = {"prompt": "a cute cat", "num_inference_steps": 2, "num_frames": 2} with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) stderr = stderr.getvalue() # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, # so we just match "5" in "#####| 1/5 [00:01<00:00]" max_steps = re.search("/(.*?) ", stderr).group(1) self.assertTrue(max_steps is not None and len(max_steps) > 0) self.assertTrue( f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) pipe.set_progress_bar_config(disable=True) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") def test_video_to_video(self): components = self.get_dummy_components_video_generation() pipe = AnimateDiffVideoToVideoPipeline(**components) pipe.to(torch_device) num_frames = 2 video = [Image.new("RGB", (32, 32))] * num_frames inputs = {"prompt": "a cute cat", "num_inference_steps": 2, "video": video} with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) stderr = stderr.getvalue() # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, # so we just match "5" in "#####| 1/5 [00:01<00:00]" max_steps = re.search("/(.*?) ", stderr).group(1) self.assertTrue(max_steps is not None and len(max_steps) > 0) self.assertTrue( f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) pipe.set_progress_bar_config(disable=True) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") @require_torch_accelerator class PipelineDeviceAndDtypeStabilityTests(unittest.TestCase): expected_pipe_device = torch.device(f"{torch_device}:0") expected_pipe_dtype = torch.float64 def get_dummy_components_image_generation(self): cross_attention_dim = 8 torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=cross_attention_dim, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=cross_attention_dim, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def test_deterministic_device(self): components = self.get_dummy_components_image_generation() pipe = StableDiffusionPipeline(**components) pipe.to(device=torch_device, dtype=torch.float32) pipe.unet.to(device="cpu") pipe.vae.to(device=torch_device) pipe.text_encoder.to(device=f"{torch_device}:0") pipe_device = pipe.device self.assertEqual( self.expected_pipe_device, pipe_device, f"Wrong expected device. Expected {self.expected_pipe_device}. Got {pipe_device}.", ) def test_deterministic_dtype(self): components = self.get_dummy_components_image_generation() pipe = StableDiffusionPipeline(**components) pipe.to(device=torch_device, dtype=torch.float32) pipe.unet.to(dtype=torch.float16) pipe.vae.to(dtype=torch.float32) pipe.text_encoder.to(dtype=torch.float64) pipe_dtype = pipe.dtype self.assertEqual( self.expected_pipe_dtype, pipe_dtype, f"Wrong expected dtype. Expected {self.expected_pipe_dtype}. Got {pipe_dtype}.", )
diffusers/tests/pipelines/test_pipeline_utils.py/0
{ "file_path": "diffusers/tests/pipelines/test_pipeline_utils.py", "repo_id": "diffusers", "token_count": 20188 }
194
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from PIL import Image from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, UniPCMultistepScheduler, WanTransformer3DModel, WanVideoToVideoPipeline from diffusers.utils.testing_utils import ( enable_full_determinism, ) from ..pipeline_params import TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, ) enable_full_determinism() class WanVideoToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WanVideoToVideoPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = frozenset(["video", "prompt", "negative_prompt"]) image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKLWan( base_dim=3, z_dim=16, dim_mult=[1, 1, 1, 1], num_res_blocks=1, temperal_downsample=[False, True, True], ) torch.manual_seed(0) scheduler = UniPCMultistepScheduler(flow_shift=3.0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) transformer = WanTransformer3DModel( patch_size=(1, 2, 2), num_attention_heads=2, attention_head_dim=12, in_channels=16, out_channels=16, text_dim=32, freq_dim=256, ffn_dim=32, num_layers=2, cross_attn_norm=True, qk_norm="rms_norm_across_heads", rope_max_seq_len=32, ) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) video = [Image.new("RGB", (16, 16))] * 17 inputs = { "video": video, "prompt": "dance monkey", "negative_prompt": "negative", # TODO "generator": generator, "num_inference_steps": 4, "guidance_scale": 6.0, "height": 16, "width": 16, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (17, 3, 16, 16)) # fmt: off expected_slice = torch.tensor([0.4522, 0.4534, 0.4532, 0.4553, 0.4526, 0.4538, 0.4533, 0.4547, 0.513, 0.5176, 0.5286, 0.4958, 0.4955, 0.5381, 0.5154, 0.5195]) # fmt:on generated_slice = generated_video.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) @unittest.skip("Test not supported") def test_attention_slicing_forward_pass(self): pass @unittest.skip( "WanVideoToVideoPipeline has to run in mixed precision. Casting the entire pipeline will result in errors" ) def test_float16_inference(self): pass @unittest.skip( "WanVideoToVideoPipeline has to run in mixed precision. Save/Load the entire pipeline in FP16 will result in errors" ) def test_save_load_float16(self): pass
diffusers/tests/pipelines/wan/test_wan_video_to_video.py/0
{ "file_path": "diffusers/tests/pipelines/wan/test_wan_video_to_video.py", "repo_id": "diffusers", "token_count": 2254 }
195
import inspect import tempfile import unittest from typing import Dict, List, Tuple import torch from diffusers import EDMEulerScheduler from .test_schedulers import SchedulerCommonTest class EDMEulerSchedulerTest(SchedulerCommonTest): scheduler_classes = (EDMEulerScheduler,) forward_default_kwargs = (("num_inference_steps", 10),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 256, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_no_noise(self, num_inference_steps=10, seed=0): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(scheduler.timesteps): scaled_sample = scheduler.scale_model_input(sample, t) model_output = model(scaled_sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 34.1855) < 1e-3 assert abs(result_mean.item() - 0.044) < 1e-3 def test_full_loop_device(self, num_inference_steps=10, seed=0): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(scheduler.timesteps): scaled_sample = scheduler.scale_model_input(sample, t) model_output = model(scaled_sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 34.1855) < 1e-3 assert abs(result_mean.item() - 0.044) < 1e-3 # Override test_from_save_pretrained to use EDMEulerScheduler-specific logic def test_from_save_pretrained(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) scheduler.set_timesteps(num_inference_steps) new_scheduler.set_timesteps(num_inference_steps) timestep = scheduler.timesteps[0] sample = self.dummy_sample scaled_sample = scheduler.scale_model_input(sample, timestep) residual = 0.1 * scaled_sample new_scaled_sample = new_scheduler.scale_model_input(sample, timestep) new_residual = 0.1 * new_scaled_sample if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) new_output = new_scheduler.step(new_residual, timestep, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" # Override test_from_save_pretrained to use EDMEulerScheduler-specific logic def test_step_shape(self): num_inference_steps = 10 scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timestep_0 = scheduler.timesteps[0] timestep_1 = scheduler.timesteps[1] sample = self.dummy_sample scaled_sample = scheduler.scale_model_input(sample, timestep_0) residual = 0.1 * scaled_sample output_0 = scheduler.step(residual, timestep_0, sample).prev_sample output_1 = scheduler.step(residual, timestep_1, sample).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) # Override test_from_save_pretrained to use EDMEulerScheduler-specific logic def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", 50) timestep = 0 for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) timestep = scheduler.timesteps[0] sample = self.dummy_sample scaled_sample = scheduler.scale_model_input(sample, timestep) residual = 0.1 * scaled_sample # Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) outputs_dict = scheduler.step(residual, timestep, sample, **kwargs) scheduler.set_timesteps(num_inference_steps) scaled_sample = scheduler.scale_model_input(sample, timestep) residual = 0.1 * scaled_sample # Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): kwargs["generator"] = torch.manual_seed(0) outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs) recursive_check(outputs_tuple, outputs_dict) @unittest.skip(reason="EDMEulerScheduler does not support beta schedules.") def test_trained_betas(self): pass
diffusers/tests/schedulers/test_scheduler_edm_euler.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_edm_euler.py", "repo_id": "diffusers", "token_count": 3799 }
196
import unittest import torch import torch.nn.functional as F from diffusers import VQDiffusionScheduler from .test_schedulers import SchedulerCommonTest class VQDiffusionSchedulerTest(SchedulerCommonTest): scheduler_classes = (VQDiffusionScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_vec_classes": 4097, "num_train_timesteps": 100, } config.update(**kwargs) return config def dummy_sample(self, num_vec_classes): batch_size = 4 height = 8 width = 8 sample = torch.randint(0, num_vec_classes, (batch_size, height * width)) return sample @property def dummy_sample_deter(self): assert False def dummy_model(self, num_vec_classes): def model(sample, t, *args): batch_size, num_latent_pixels = sample.shape logits = torch.rand((batch_size, num_vec_classes - 1, num_latent_pixels)) return_value = F.log_softmax(logits.double(), dim=1).float() return return_value return model def test_timesteps(self): for timesteps in [2, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_num_vec_classes(self): for num_vec_classes in [5, 100, 1000, 4000]: self.check_over_configs(num_vec_classes=num_vec_classes) def test_time_indices(self): for t in [0, 50, 99]: self.check_over_forward(time_step=t) @unittest.skip("Test not supported.") def test_add_noise_device(self): pass
diffusers/tests/schedulers/test_scheduler_vq_diffusion.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_vq_diffusion.py", "repo_id": "diffusers", "token_count": 715 }
197
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # """ Utility that checks that modules like attention processors are listed in the documentation file. ```bash python utils/check_support_list.py ``` It has no auto-fix mode. """ import os import re # All paths are set with the intent that you run this script from the root of the repo REPO_PATH = "." def read_documented_classes(doc_path, autodoc_regex=r"\[\[autodoc\]\]\s([^\n]+)"): """ Reads documented classes from a doc file using a regex to find lines like [[autodoc]] my.module.Class. Returns a list of documented class names (just the class name portion). """ with open(os.path.join(REPO_PATH, doc_path), "r") as f: doctext = f.read() matches = re.findall(autodoc_regex, doctext) return [match.split(".")[-1] for match in matches] def read_source_classes(src_path, class_regex, exclude_conditions=None): """ Reads class names from a source file using a regex that captures class definitions. Optionally exclude classes based on a list of conditions (functions that take class name and return bool). """ if exclude_conditions is None: exclude_conditions = [] with open(os.path.join(REPO_PATH, src_path), "r") as f: doctext = f.read() classes = re.findall(class_regex, doctext) # Filter out classes that meet any of the exclude conditions filtered_classes = [c for c in classes if not any(cond(c) for cond in exclude_conditions)] return filtered_classes def check_documentation(doc_path, src_path, doc_regex, src_regex, exclude_conditions=None): """ Generic function to check if all classes defined in `src_path` are documented in `doc_path`. Returns a set of undocumented class names. """ documented = set(read_documented_classes(doc_path, doc_regex)) source_classes = set(read_source_classes(src_path, src_regex, exclude_conditions=exclude_conditions)) # Find which classes in source are not documented in a deterministic way. undocumented = sorted(source_classes - documented) return undocumented if __name__ == "__main__": # Define the checks we need to perform checks = { "Attention Processors": { "doc_path": "docs/source/en/api/attnprocessor.md", "src_path": "src/diffusers/models/attention_processor.py", "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", "src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]", "exclude_conditions": [lambda c: "LoRA" in c, lambda c: c == "Attention"], }, "Image Processors": { "doc_path": "docs/source/en/api/image_processor.md", "src_path": "src/diffusers/image_processor.py", "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", "src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]", }, "Activations": { "doc_path": "docs/source/en/api/activations.md", "src_path": "src/diffusers/models/activations.py", "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", "src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", }, "Normalizations": { "doc_path": "docs/source/en/api/normalization.md", "src_path": "src/diffusers/models/normalization.py", "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", "src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", "exclude_conditions": [ # Exclude LayerNorm as it's an intentional exception lambda c: c == "LayerNorm" ], }, "LoRA Mixins": { "doc_path": "docs/source/en/api/loaders/lora.md", "src_path": "src/diffusers/loaders/lora_pipeline.py", "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", "src_regex": r"class\s+(\w+LoraLoaderMixin(?:\d*_?\d*))[:(]", }, } missing_items = {} for category, params in checks.items(): undocumented = check_documentation( doc_path=params["doc_path"], src_path=params["src_path"], doc_regex=params["doc_regex"], src_regex=params["src_regex"], exclude_conditions=params.get("exclude_conditions"), ) if undocumented: missing_items[category] = undocumented # If we have any missing items, raise a single combined error if missing_items: error_msg = ["Some classes are not documented properly:\n"] for category, classes in missing_items.items(): error_msg.append(f"- {category}: {', '.join(sorted(classes))}") raise ValueError("\n".join(error_msg))
diffusers/utils/check_support_list.py/0
{ "file_path": "diffusers/utils/check_support_list.py", "repo_id": "diffusers", "token_count": 2093 }
198
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Welcome to tests_fetcher V2. This util is designed to fetch tests to run on a PR so that only the tests impacted by the modifications are run, and when too many models are being impacted, only run the tests of a subset of core models. It works like this. Stage 1: Identify the modified files. For jobs that run on the main branch, it's just the diff with the last commit. On a PR, this takes all the files from the branching point to the current commit (so all modifications in a PR, not just the last commit) but excludes modifications that are on docstrings or comments only. Stage 2: Extract the tests to run. This is done by looking at the imports in each module and test file: if module A imports module B, then changing module B impacts module A, so the tests using module A should be run. We thus get the dependencies of each model and then recursively builds the 'reverse' map of dependencies to get all modules and tests impacted by a given file. We then only keep the tests (and only the core models tests if there are too many modules). Caveats: - This module only filters tests by files (not individual tests) so it's better to have tests for different things in different files. - This module assumes inits are just importing things, not really building objects, so it's better to structure them this way and move objects building in separate submodules. Usage: Base use to fetch the tests in a pull request ```bash python utils/tests_fetcher.py ``` Base use to fetch the tests on a the main branch (with diff from the last commit): ```bash python utils/tests_fetcher.py --diff_with_last_commit ``` """ import argparse import collections import json import os import re from contextlib import contextmanager from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from git import Repo PATH_TO_REPO = Path(__file__).parent.parent.resolve() PATH_TO_EXAMPLES = PATH_TO_REPO / "examples" PATH_TO_DIFFUSERS = PATH_TO_REPO / "src/diffusers" PATH_TO_TESTS = PATH_TO_REPO / "tests" # Ignore fixtures in tests folder # Ignore lora since they are always tested MODULES_TO_IGNORE = ["fixtures", "lora"] IMPORTANT_PIPELINES = [ "controlnet", "stable_diffusion", "stable_diffusion_2", "stable_diffusion_xl", "stable_video_diffusion", "deepfloyd_if", "kandinsky", "kandinsky2_2", "text_to_video_synthesis", "wuerstchen", ] @contextmanager def checkout_commit(repo: Repo, commit_id: str): """ Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager. """ current_head = repo.head.commit if repo.head.is_detached else repo.head.ref try: repo.git.checkout(commit_id) yield finally: repo.git.checkout(current_head) def clean_code(content: str) -> str: """ Remove docstrings, empty line or comments from some code (used to detect if a diff is real or only concern comments or docstrings). Args: content (`str`): The code to clean Returns: `str`: The cleaned code. """ # We need to deactivate autoformatting here to write escaped triple quotes (we cannot use real triple quotes or # this would mess up the result if this function applied to this particular file). # fmt: off # Remove docstrings by splitting on triple " then triple ': splits = content.split('\"\"\"') content = "".join(splits[::2]) splits = content.split("\'\'\'") # fmt: on content = "".join(splits[::2]) # Remove empty lines and comments lines_to_keep = [] for line in content.split("\n"): # remove anything that is after a # sign. line = re.sub("#.*$", "", line) # remove white lines if len(line) != 0 and not line.isspace(): lines_to_keep.append(line) return "\n".join(lines_to_keep) def keep_doc_examples_only(content: str) -> str: """ Remove everything from the code content except the doc examples (used to determined if a diff should trigger doc tests or not). Args: content (`str`): The code to clean Returns: `str`: The cleaned code. """ # Keep doc examples only by splitting on triple "`" splits = content.split("```") # Add leading and trailing "```" so the navigation is easier when compared to the original input `content` content = "```" + "```".join(splits[1::2]) + "```" # Remove empty lines and comments lines_to_keep = [] for line in content.split("\n"): # remove anything that is after a # sign. line = re.sub("#.*$", "", line) # remove white lines if len(line) != 0 and not line.isspace(): lines_to_keep.append(line) return "\n".join(lines_to_keep) def get_all_tests() -> List[str]: """ Walks the `tests` folder to return a list of files/subfolders. This is used to split the tests to run when using parallelism. The split is: - folders under `tests`: (`tokenization`, `pipelines`, etc) except the subfolder `models` is excluded. - folders under `tests/models`: `bert`, `gpt2`, etc. - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc. """ # test folders/files directly under `tests` folder tests = os.listdir(PATH_TO_TESTS) tests = [f"tests/{f}" for f in tests if "__pycache__" not in f] tests = sorted([f for f in tests if (PATH_TO_REPO / f).is_dir() or f.startswith("tests/test_")]) return tests def diff_is_docstring_only(repo: Repo, branching_point: str, filename: str) -> bool: """ Check if the diff is only in docstrings (or comments and whitespace) in a filename. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). branching_point (`str`): The commit reference of where to compare for the diff. filename (`str`): The filename where we want to know if the diff isonly in docstrings/comments. Returns: `bool`: Whether the diff is docstring/comments only or not. """ folder = Path(repo.working_dir) with checkout_commit(repo, branching_point): with open(folder / filename, "r", encoding="utf-8") as f: old_content = f.read() with open(folder / filename, "r", encoding="utf-8") as f: new_content = f.read() old_content_clean = clean_code(old_content) new_content_clean = clean_code(new_content) return old_content_clean == new_content_clean def diff_contains_doc_examples(repo: Repo, branching_point: str, filename: str) -> bool: """ Check if the diff is only in code examples of the doc in a filename. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). branching_point (`str`): The commit reference of where to compare for the diff. filename (`str`): The filename where we want to know if the diff is only in codes examples. Returns: `bool`: Whether the diff is only in code examples of the doc or not. """ folder = Path(repo.working_dir) with checkout_commit(repo, branching_point): with open(folder / filename, "r", encoding="utf-8") as f: old_content = f.read() with open(folder / filename, "r", encoding="utf-8") as f: new_content = f.read() old_content_clean = keep_doc_examples_only(old_content) new_content_clean = keep_doc_examples_only(new_content) return old_content_clean != new_content_clean def get_diff(repo: Repo, base_commit: str, commits: List[str]) -> List[str]: """ Get the diff between a base commit and one or several commits. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). base_commit (`str`): The commit reference of where to compare for the diff. This is the current commit, not the branching point! commits (`List[str]`): The list of commits with which to compare the repo at `base_commit` (so the branching point). Returns: `List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files modified are returned if the diff in the file is not only in docstrings or comments, see `diff_is_docstring_only`). """ print("\n### DIFF ###\n") code_diff = [] for commit in commits: for diff_obj in commit.diff(base_commit): # We always add new python files if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"): code_diff.append(diff_obj.b_path) # We check that deleted python files won't break corresponding tests. elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"): code_diff.append(diff_obj.a_path) # Now for modified files elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"): # In case of renames, we'll look at the tests using both the old and new name. if diff_obj.a_path != diff_obj.b_path: code_diff.extend([diff_obj.a_path, diff_obj.b_path]) else: # Otherwise, we check modifications are in code and not docstrings. if diff_is_docstring_only(repo, commit, diff_obj.b_path): print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.") else: code_diff.append(diff_obj.a_path) return code_diff def get_modified_python_files(diff_with_last_commit: bool = False) -> List[str]: """ Return a list of python files that have been modified between: - the current head and the main branch if `diff_with_last_commit=False` (default) - the current head and its parent commit otherwise. Returns: `List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files modified are returned if the diff in the file is not only in docstrings or comments, see `diff_is_docstring_only`). """ repo = Repo(PATH_TO_REPO) if not diff_with_last_commit: # Need to fetch refs for main using remotes when running with github actions. upstream_main = repo.remotes.origin.refs.main print(f"main is at {upstream_main.commit}") print(f"Current head is at {repo.head.commit}") branching_commits = repo.merge_base(upstream_main, repo.head) for commit in branching_commits: print(f"Branching commit: {commit}") return get_diff(repo, repo.head.commit, branching_commits) else: print(f"main is at {repo.head.commit}") parent_commits = repo.head.commit.parents for commit in parent_commits: print(f"Parent commit: {commit}") return get_diff(repo, repo.head.commit, parent_commits) def get_diff_for_doctesting(repo: Repo, base_commit: str, commits: List[str]) -> List[str]: """ Get the diff in doc examples between a base commit and one or several commits. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). base_commit (`str`): The commit reference of where to compare for the diff. This is the current commit, not the branching point! commits (`List[str]`): The list of commits with which to compare the repo at `base_commit` (so the branching point). Returns: `List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files modified are returned if the diff in the file is only in doctest examples). """ print("\n### DIFF ###\n") code_diff = [] for commit in commits: for diff_obj in commit.diff(base_commit): # We only consider Python files and doc files. if not diff_obj.b_path.endswith(".py") and not diff_obj.b_path.endswith(".md"): continue # We always add new python/md files if diff_obj.change_type in ["A"]: code_diff.append(diff_obj.b_path) # Now for modified files elif diff_obj.change_type in ["M", "R"]: # In case of renames, we'll look at the tests using both the old and new name. if diff_obj.a_path != diff_obj.b_path: code_diff.extend([diff_obj.a_path, diff_obj.b_path]) else: # Otherwise, we check modifications contain some doc example(s). if diff_contains_doc_examples(repo, commit, diff_obj.b_path): code_diff.append(diff_obj.a_path) else: print(f"Ignoring diff in {diff_obj.b_path} as it doesn't contain any doc example.") return code_diff def get_all_doctest_files() -> List[str]: """ Return the complete list of python and Markdown files on which we run doctest. At this moment, we restrict this to only take files from `src/` or `docs/source/en/` that are not in `utils/not_doctested.txt`. Returns: `List[str]`: The complete list of Python and Markdown files on which we run doctest. """ py_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob("**/*.py")] md_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob("**/*.md")] test_files_to_run = py_files + md_files # only include files in `src` or `docs/source/en/` test_files_to_run = [x for x in test_files_to_run if x.startswith(("src/", "docs/source/en/"))] # not include init files test_files_to_run = [x for x in test_files_to_run if not x.endswith(("__init__.py",))] # These are files not doctested yet. with open("utils/not_doctested.txt") as fp: not_doctested = {x.split(" ")[0] for x in fp.read().strip().split("\n")} # So far we don't have 100% coverage for doctest. This line will be removed once we achieve 100%. test_files_to_run = [x for x in test_files_to_run if x not in not_doctested] return sorted(test_files_to_run) def get_new_doctest_files(repo, base_commit, branching_commit) -> List[str]: """ Get the list of files that were removed from "utils/not_doctested.txt", between `base_commit` and `branching_commit`. Returns: `List[str]`: List of files that were removed from "utils/not_doctested.txt". """ for diff_obj in branching_commit.diff(base_commit): # Ignores all but the "utils/not_doctested.txt" file. if diff_obj.a_path != "utils/not_doctested.txt": continue # Loads the two versions folder = Path(repo.working_dir) with checkout_commit(repo, branching_commit): with open(folder / "utils/not_doctested.txt", "r", encoding="utf-8") as f: old_content = f.read() with open(folder / "utils/not_doctested.txt", "r", encoding="utf-8") as f: new_content = f.read() # Compute the removed lines and return them removed_content = {x.split(" ")[0] for x in old_content.split("\n")} - { x.split(" ")[0] for x in new_content.split("\n") } return sorted(removed_content) return [] def get_doctest_files(diff_with_last_commit: bool = False) -> List[str]: """ Return a list of python and Markdown files where doc example have been modified between: - the current head and the main branch if `diff_with_last_commit=False` (default) - the current head and its parent commit otherwise. Returns: `List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files modified are returned if the diff in the file is only in doctest examples). """ repo = Repo(PATH_TO_REPO) test_files_to_run = [] # noqa if not diff_with_last_commit: upstream_main = repo.remotes.origin.refs.main print(f"main is at {upstream_main.commit}") print(f"Current head is at {repo.head.commit}") branching_commits = repo.merge_base(upstream_main, repo.head) for commit in branching_commits: print(f"Branching commit: {commit}") test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, branching_commits) else: print(f"main is at {repo.head.commit}") parent_commits = repo.head.commit.parents for commit in parent_commits: print(f"Parent commit: {commit}") test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, parent_commits) all_test_files_to_run = get_all_doctest_files() # Add to the test files to run any removed entry from "utils/not_doctested.txt". new_test_files = get_new_doctest_files(repo, repo.head.commit, upstream_main.commit) test_files_to_run = list(set(test_files_to_run + new_test_files)) # Do not run slow doctest tests on CircleCI with open("utils/slow_documentation_tests.txt") as fp: slow_documentation_tests = set(fp.read().strip().split("\n")) test_files_to_run = [ x for x in test_files_to_run if x in all_test_files_to_run and x not in slow_documentation_tests ] # Make sure we did not end up with a test file that was removed test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()] return sorted(test_files_to_run) # (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. # \s*from\s+(\.+\S+)\s+import\s+([^\n]+) -> Line only contains from .xxx import yyy and we catch .xxx and yyy # (?=\n) -> Look-ahead to a new line. We can't just put \n here or using find_all on this re will only catch every # other import. _re_single_line_relative_imports = re.compile(r"(?:^|\n)\s*from\s+(\.+\S+)\s+import\s+([^\n]+)(?=\n)") # (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. # \s*from\s+(\.+\S+)\s+import\s+\(([^\)]+)\) -> Line continues with from .xxx import (yyy) and we catch .xxx and yyy # yyy will take multiple lines otherwise there wouldn't be parenthesis. _re_multi_line_relative_imports = re.compile(r"(?:^|\n)\s*from\s+(\.+\S+)\s+import\s+\(([^\)]+)\)") # (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. # \s*from\s+transformers(\S*)\s+import\s+([^\n]+) -> Line only contains from transformers.xxx import yyy and we catch # .xxx and yyy # (?=\n) -> Look-ahead to a new line. We can't just put \n here or using find_all on this re will only catch every # other import. _re_single_line_direct_imports = re.compile(r"(?:^|\n)\s*from\s+diffusers(\S*)\s+import\s+([^\n]+)(?=\n)") # (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. # \s*from\s+transformers(\S*)\s+import\s+\(([^\)]+)\) -> Line continues with from transformers.xxx import (yyy) and we # catch .xxx and yyy. yyy will take multiple lines otherwise there wouldn't be parenthesis. _re_multi_line_direct_imports = re.compile(r"(?:^|\n)\s*from\s+diffusers(\S*)\s+import\s+\(([^\)]+)\)") def extract_imports(module_fname: str, cache: Dict[str, List[str]] = None) -> List[str]: """ Get the imports a given module makes. Args: module_fname (`str`): The name of the file of the module where we want to look at the imports (given relative to the root of the repo). cache (Dictionary `str` to `List[str]`, *optional*): To speed up this function if it was previously called on `module_fname`, the cache of all previously computed results. Returns: `List[str]`: The list of module filenames imported in the input `module_fname` (a submodule we import from that is a subfolder will give its init file). """ if cache is not None and module_fname in cache: return cache[module_fname] with open(PATH_TO_REPO / module_fname, "r", encoding="utf-8") as f: content = f.read() # Filter out all docstrings to not get imports in code examples. As before we need to deactivate formatting to # keep this as escaped quotes and avoid this function failing on this file. # fmt: off splits = content.split('\"\"\"') # fmt: on content = "".join(splits[::2]) module_parts = str(module_fname).split(os.path.sep) imported_modules = [] # Let's start with relative imports relative_imports = _re_single_line_relative_imports.findall(content) relative_imports = [ (mod, imp) for mod, imp in relative_imports if "# tests_ignore" not in imp and imp.strip() != "(" ] multiline_relative_imports = _re_multi_line_relative_imports.findall(content) relative_imports += [(mod, imp) for mod, imp in multiline_relative_imports if "# tests_ignore" not in imp] # We need to remove parts of the module name depending on the depth of the relative imports. for module, imports in relative_imports: level = 0 while module.startswith("."): module = module[1:] level += 1 if len(module) > 0: dep_parts = module_parts[: len(module_parts) - level] + module.split(".") else: dep_parts = module_parts[: len(module_parts) - level] imported_module = os.path.sep.join(dep_parts) imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")])) # Let's continue with direct imports direct_imports = _re_single_line_direct_imports.findall(content) direct_imports = [(mod, imp) for mod, imp in direct_imports if "# tests_ignore" not in imp and imp.strip() != "("] multiline_direct_imports = _re_multi_line_direct_imports.findall(content) direct_imports += [(mod, imp) for mod, imp in multiline_direct_imports if "# tests_ignore" not in imp] # We need to find the relative path of those imports. for module, imports in direct_imports: import_parts = module.split(".")[1:] # ignore the name of the repo since we add it below. dep_parts = ["src", "diffusers"] + import_parts imported_module = os.path.sep.join(dep_parts) imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")])) result = [] # Double check we get proper modules (either a python file or a folder with an init). for module_file, imports in imported_modules: if (PATH_TO_REPO / f"{module_file}.py").is_file(): module_file = f"{module_file}.py" elif (PATH_TO_REPO / module_file).is_dir() and (PATH_TO_REPO / module_file / "__init__.py").is_file(): module_file = os.path.sep.join([module_file, "__init__.py"]) imports = [imp for imp in imports if len(imp) > 0 and re.match("^[A-Za-z0-9_]*$", imp)] if len(imports) > 0: result.append((module_file, imports)) if cache is not None: cache[module_fname] = result return result def get_module_dependencies(module_fname: str, cache: Dict[str, List[str]] = None) -> List[str]: """ Refines the result of `extract_imports` to remove subfolders and get a proper list of module filenames: if a file as an import `from utils import Foo, Bar`, with `utils` being a subfolder containing many files, this will traverse the `utils` init file to check where those dependencies come from: for instance the files utils/foo.py and utils/bar.py. Warning: This presupposes that all intermediate inits are properly built (with imports from the respective submodules) and work better if objects are defined in submodules and not the intermediate init (otherwise the intermediate init is added, and inits usually have a lot of dependencies). Args: module_fname (`str`): The name of the file of the module where we want to look at the imports (given relative to the root of the repo). cache (Dictionary `str` to `List[str]`, *optional*): To speed up this function if it was previously called on `module_fname`, the cache of all previously computed results. Returns: `List[str]`: The list of module filenames imported in the input `module_fname` (with submodule imports refined). """ dependencies = [] imported_modules = extract_imports(module_fname, cache=cache) # The while loop is to recursively traverse all inits we may encounter: we will add things as we go. while len(imported_modules) > 0: new_modules = [] for module, imports in imported_modules: # If we end up in an __init__ we are often not actually importing from this init (except in the case where # the object is fully defined in the __init__) if module.endswith("__init__.py"): # So we get the imports from that init then try to find where our objects come from. new_imported_modules = extract_imports(module, cache=cache) for new_module, new_imports in new_imported_modules: if any(i in new_imports for i in imports): if new_module not in dependencies: new_modules.append((new_module, [i for i in new_imports if i in imports])) imports = [i for i in imports if i not in new_imports] if len(imports) > 0: # If there are any objects lefts, they may be a submodule path_to_module = PATH_TO_REPO / module.replace("__init__.py", "") dependencies.extend( [ os.path.join(module.replace("__init__.py", ""), f"{i}.py") for i in imports if (path_to_module / f"{i}.py").is_file() ] ) imports = [i for i in imports if not (path_to_module / f"{i}.py").is_file()] if len(imports) > 0: # Then if there are still objects left, they are fully defined in the init, so we keep it as a # dependency. dependencies.append(module) else: dependencies.append(module) imported_modules = new_modules return dependencies def create_reverse_dependency_tree() -> List[Tuple[str, str]]: """ Create a list of all edges (a, b) which mean that modifying a impacts b with a going over all module and test files. """ cache = {} all_modules = list(PATH_TO_DIFFUSERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py")) all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] edges = [(dep, mod) for mod in all_modules for dep in get_module_dependencies(mod, cache=cache)] return list(set(edges)) def get_tree_starting_at(module: str, edges: List[Tuple[str, str]]) -> List[Union[str, List[str]]]: """ Returns the tree starting at a given module following all edges. Args: module (`str`): The module that will be the root of the subtree we want. edges (`List[Tuple[str, str]]`): The list of all edges of the tree. Returns: `List[Union[str, List[str]]]`: The tree to print in the following format: [module, [list of edges starting at module], [list of edges starting at the preceding level], ...] """ vertices_seen = [module] new_edges = [edge for edge in edges if edge[0] == module and edge[1] != module and "__init__.py" not in edge[1]] tree = [module] while len(new_edges) > 0: tree.append(new_edges) final_vertices = list({edge[1] for edge in new_edges}) vertices_seen.extend(final_vertices) new_edges = [ edge for edge in edges if edge[0] in final_vertices and edge[1] not in vertices_seen and "__init__.py" not in edge[1] ] return tree def print_tree_deps_of(module, all_edges=None): """ Prints the tree of modules depending on a given module. Args: module (`str`): The module that will be the root of the subtree we want. all_edges (`List[Tuple[str, str]]`, *optional*): The list of all edges of the tree. Will be set to `create_reverse_dependency_tree()` if not passed. """ if all_edges is None: all_edges = create_reverse_dependency_tree() tree = get_tree_starting_at(module, all_edges) # The list of lines is a list of tuples (line_to_be_printed, module) # Keeping the modules lets us know where to insert each new lines in the list. lines = [(tree[0], tree[0])] for index in range(1, len(tree)): edges = tree[index] start_edges = {edge[0] for edge in edges} for start in start_edges: end_edges = {edge[1] for edge in edges if edge[0] == start} # We will insert all those edges just after the line showing start. pos = 0 while lines[pos][1] != start: pos += 1 lines = lines[: pos + 1] + [(" " * (2 * index) + end, end) for end in end_edges] + lines[pos + 1 :] for line in lines: # We don't print the refs that where just here to help build lines. print(line[0]) def init_test_examples_dependencies() -> Tuple[Dict[str, List[str]], List[str]]: """ The test examples do not import from the examples (which are just scripts, not modules) so we need some extra care initializing the dependency map, which is the goal of this function. It initializes the dependency map for example files by linking each example to the example test file for the example framework. Returns: `Tuple[Dict[str, List[str]], List[str]]`: A tuple with two elements: the initialized dependency map which is a dict test example file to list of example files potentially tested by that test file, and the list of all example files (to avoid recomputing it later). """ test_example_deps = {} all_examples = [] for framework in ["flax", "pytorch", "tensorflow"]: test_files = list((PATH_TO_EXAMPLES / framework).glob("test_*.py")) all_examples.extend(test_files) # Remove the files at the root of examples/framework since they are not proper examples (they are either utils # or example test files). examples = [ f for f in (PATH_TO_EXAMPLES / framework).glob("**/*.py") if f.parent != PATH_TO_EXAMPLES / framework ] all_examples.extend(examples) for test_file in test_files: with open(test_file, "r", encoding="utf-8") as f: content = f.read() # Map all examples to the test files found in examples/framework. test_example_deps[str(test_file.relative_to(PATH_TO_REPO))] = [ str(e.relative_to(PATH_TO_REPO)) for e in examples if e.name in content ] # Also map the test files to themselves. test_example_deps[str(test_file.relative_to(PATH_TO_REPO))].append( str(test_file.relative_to(PATH_TO_REPO)) ) return test_example_deps, all_examples def create_reverse_dependency_map() -> Dict[str, List[str]]: """ Create the dependency map from module/test filename to the list of modules/tests that depend on it recursively. Returns: `Dict[str, List[str]]`: The reverse dependency map as a dictionary mapping filenames to all the filenames depending on it recursively. This way the tests impacted by a change in file A are the test files in the list corresponding to key A in this result. """ cache = {} # Start from the example deps init. example_deps, examples = init_test_examples_dependencies() # Add all modules and all tests to all examples all_modules = list(PATH_TO_DIFFUSERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py")) + examples all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] # Compute the direct dependencies of all modules. direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules} direct_deps.update(example_deps) # This recurses the dependencies something_changed = True while something_changed: something_changed = False for m in all_modules: for d in direct_deps[m]: # We stop recursing at an init (cause we always end up in the main init and we don't want to add all # files which the main init imports) if d.endswith("__init__.py"): continue if d not in direct_deps: raise ValueError(f"KeyError:{d}. From {m}") new_deps = set(direct_deps[d]) - set(direct_deps[m]) if len(new_deps) > 0: direct_deps[m].extend(list(new_deps)) something_changed = True # Finally we can build the reverse map. reverse_map = collections.defaultdict(list) for m in all_modules: for d in direct_deps[m]: reverse_map[d].append(m) # For inits, we don't do the reverse deps but the direct deps: if modifying an init, we want to make sure we test # all the modules impacted by that init. for m in [f for f in all_modules if f.endswith("__init__.py")]: direct_deps = get_module_dependencies(m, cache=cache) deps = sum([reverse_map[d] for d in direct_deps if not d.endswith("__init__.py")], direct_deps) reverse_map[m] = list(set(deps) - {m}) return reverse_map def create_module_to_test_map(reverse_map: Dict[str, List[str]] = None) -> Dict[str, List[str]]: """ Extract the tests from the reverse_dependency_map and potentially filters the model tests. Args: reverse_map (`Dict[str, List[str]]`, *optional*): The reverse dependency map as created by `create_reverse_dependency_map`. Will default to the result of that function if not provided. filter_pipelines (`bool`, *optional*, defaults to `False`): Whether or not to filter pipeline tests to only include core pipelines if a file impacts a lot of models. Returns: `Dict[str, List[str]]`: A dictionary that maps each file to the tests to execute if that file was modified. """ if reverse_map is None: reverse_map = create_reverse_dependency_map() # Utility that tells us if a given file is a test (taking test examples into account) def is_test(fname): if fname.startswith("tests"): return True if fname.startswith("examples") and fname.split(os.path.sep)[-1].startswith("test"): return True return False # Build the test map test_map = {module: [f for f in deps if is_test(f)] for module, deps in reverse_map.items()} return test_map def check_imports_all_exist(): """ Isn't used per se by the test fetcher but might be used later as a quality check. Putting this here for now so the code is not lost. This checks all imports in a given file do exist. """ cache = {} all_modules = list(PATH_TO_DIFFUSERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py")) all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules} for module, deps in direct_deps.items(): for dep in deps: if not (PATH_TO_REPO / dep).is_file(): print(f"{module} has dependency on {dep} which does not exist.") def _print_list(l) -> str: """ Pretty print a list of elements with one line per element and a - starting each line. """ return "\n".join([f"- {f}" for f in l]) def update_test_map_with_core_pipelines(json_output_file: str): print(f"\n### ADD CORE PIPELINE TESTS ###\n{_print_list(IMPORTANT_PIPELINES)}") with open(json_output_file, "rb") as fp: test_map = json.load(fp) # Add core pipelines as their own test group test_map["core_pipelines"] = " ".join( sorted([str(PATH_TO_TESTS / f"pipelines/{pipe}") for pipe in IMPORTANT_PIPELINES]) ) # If there are no existing pipeline tests save the map if "pipelines" not in test_map: with open(json_output_file, "w", encoding="UTF-8") as fp: json.dump(test_map, fp, ensure_ascii=False) pipeline_tests = test_map.pop("pipelines") pipeline_tests = pipeline_tests.split(" ") # Remove core pipeline tests from the fetched pipeline tests updated_pipeline_tests = [] for pipe in pipeline_tests: if pipe == "tests/pipelines" or Path(pipe).parts[2] in IMPORTANT_PIPELINES: continue updated_pipeline_tests.append(pipe) if len(updated_pipeline_tests) > 0: test_map["pipelines"] = " ".join(sorted(updated_pipeline_tests)) with open(json_output_file, "w", encoding="UTF-8") as fp: json.dump(test_map, fp, ensure_ascii=False) def create_json_map(test_files_to_run: List[str], json_output_file: Optional[str] = None): """ Creates a map from a list of tests to run to easily split them by category, when running parallelism of slow tests. Args: test_files_to_run (`List[str]`): The list of tests to run. json_output_file (`str`): The path where to store the built json map. """ if json_output_file is None: return test_map = {} for test_file in test_files_to_run: # `test_file` is a path to a test folder/file, starting with `tests/`. For example, # - `tests/models/bert/test_modeling_bert.py` or `tests/models/bert` # - `tests/trainer/test_trainer.py` or `tests/trainer` # - `tests/test_modeling_common.py` names = test_file.split(os.path.sep) module = names[1] if module in MODULES_TO_IGNORE: continue if len(names) > 2 or not test_file.endswith(".py"): # test folders under `tests` or python files under them # take the part like tokenization, `pipeline`, etc. for other test categories key = os.path.sep.join(names[1:2]) else: # common test files directly under `tests/` key = "common" if key not in test_map: test_map[key] = [] test_map[key].append(test_file) # sort the keys & values keys = sorted(test_map.keys()) test_map = {k: " ".join(sorted(test_map[k])) for k in keys} with open(json_output_file, "w", encoding="UTF-8") as fp: json.dump(test_map, fp, ensure_ascii=False) def infer_tests_to_run( output_file: str, diff_with_last_commit: bool = False, json_output_file: Optional[str] = None, ): """ The main function called by the test fetcher. Determines the tests to run from the diff. Args: output_file (`str`): The path where to store the summary of the test fetcher analysis. Other files will be stored in the same folder: - examples_test_list.txt: The list of examples tests to run. - test_repo_utils.txt: Will indicate if the repo utils tests should be run or not. - doctest_list.txt: The list of doctests to run. diff_with_last_commit (`bool`, *optional*, defaults to `False`): Whether to analyze the diff with the last commit (for use on the main branch after a PR is merged) or with the branching point from main (for use on each PR). filter_models (`bool`, *optional*, defaults to `True`): Whether or not to filter the tests to core models only, when a file modified results in a lot of model tests. json_output_file (`str`, *optional*): The path where to store the json file mapping categories of tests to tests to run (used for parallelism or the slow tests). """ modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit) print(f"\n### MODIFIED FILES ###\n{_print_list(modified_files)}") # Create the map that will give us all impacted modules. reverse_map = create_reverse_dependency_map() impacted_files = modified_files.copy() for f in modified_files: if f in reverse_map: impacted_files.extend(reverse_map[f]) # Remove duplicates impacted_files = sorted(set(impacted_files)) print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}") # Grab the corresponding test files: if any(x in modified_files for x in ["setup.py"]): test_files_to_run = ["tests", "examples"] # in order to trigger pipeline tests even if no code change at all if "tests/utils/tiny_model_summary.json" in modified_files: test_files_to_run = ["tests"] any(f.split(os.path.sep)[0] == "utils" for f in modified_files) else: # All modified tests need to be run. test_files_to_run = [ f for f in modified_files if f.startswith("tests") and f.split(os.path.sep)[-1].startswith("test") ] # Then we grab the corresponding test files. test_map = create_module_to_test_map(reverse_map=reverse_map) for f in modified_files: if f in test_map: test_files_to_run.extend(test_map[f]) test_files_to_run = sorted(set(test_files_to_run)) # Make sure we did not end up with a test file that was removed test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()] any(f.split(os.path.sep)[0] == "utils" for f in modified_files) examples_tests_to_run = [f for f in test_files_to_run if f.startswith("examples")] test_files_to_run = [f for f in test_files_to_run if not f.startswith("examples")] print(f"\n### TEST TO RUN ###\n{_print_list(test_files_to_run)}") if len(test_files_to_run) > 0: with open(output_file, "w", encoding="utf-8") as f: f.write(" ".join(test_files_to_run)) # Create a map that maps test categories to test files, i.e. `models/bert` -> [...test_modeling_bert.py, ...] # Get all test directories (and some common test files) under `tests` and `tests/models` if `test_files_to_run` # contains `tests` (i.e. when `setup.py` is changed). if "tests" in test_files_to_run: test_files_to_run = get_all_tests() create_json_map(test_files_to_run, json_output_file) print(f"\n### EXAMPLES TEST TO RUN ###\n{_print_list(examples_tests_to_run)}") if len(examples_tests_to_run) > 0: # We use `all` in the case `commit_flags["test_all"]` as well as in `create_circleci_config.py` for processing if examples_tests_to_run == ["examples"]: examples_tests_to_run = ["all"] example_file = Path(output_file).parent / "examples_test_list.txt" with open(example_file, "w", encoding="utf-8") as f: f.write(" ".join(examples_tests_to_run)) def filter_tests(output_file: str, filters: List[str]): """ Reads the content of the output file and filters out all the tests in a list of given folders. Args: output_file (`str` or `os.PathLike`): The path to the output file of the tests fetcher. filters (`List[str]`): A list of folders to filter. """ if not os.path.isfile(output_file): print("No test file found.") return with open(output_file, "r", encoding="utf-8") as f: test_files = f.read().split(" ") if len(test_files) == 0 or test_files == [""]: print("No tests to filter.") return if test_files == ["tests"]: test_files = [os.path.join("tests", f) for f in os.listdir("tests") if f not in ["__init__.py"] + filters] else: test_files = [f for f in test_files if f.split(os.path.sep)[1] not in filters] with open(output_file, "w", encoding="utf-8") as f: f.write(" ".join(test_files)) def parse_commit_message(commit_message: str) -> Dict[str, bool]: """ Parses the commit message to detect if a command is there to skip, force all or part of the CI. Args: commit_message (`str`): The commit message of the current commit. Returns: `Dict[str, bool]`: A dictionary of strings to bools with keys the following keys: `"skip"`, `"test_all_models"` and `"test_all"`. """ if commit_message is None: return {"skip": False, "no_filter": False, "test_all": False} command_search = re.search(r"\[([^\]]*)\]", commit_message) if command_search is not None: command = command_search.groups()[0] command = command.lower().replace("-", " ").replace("_", " ") skip = command in ["ci skip", "skip ci", "circleci skip", "skip circleci"] no_filter = set(command.split(" ")) == {"no", "filter"} test_all = set(command.split(" ")) == {"test", "all"} return {"skip": skip, "no_filter": no_filter, "test_all": test_all} else: return {"skip": False, "no_filter": False, "test_all": False} if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--output_file", type=str, default="test_list.txt", help="Where to store the list of tests to run" ) parser.add_argument( "--json_output_file", type=str, default="test_map.json", help="Where to store the tests to run in a dictionary format mapping test categories to test files", ) parser.add_argument( "--diff_with_last_commit", action="store_true", help="To fetch the tests between the current commit and the last commit", ) parser.add_argument( "--filter_tests", action="store_true", help="Will filter the pipeline/repo utils tests outside of the generated list of tests.", ) parser.add_argument( "--print_dependencies_of", type=str, help="Will only print the tree of modules depending on the file passed.", default=None, ) parser.add_argument( "--commit_message", type=str, help="The commit message (which could contain a command to force all tests or skip the CI).", default=None, ) args = parser.parse_args() if args.print_dependencies_of is not None: print_tree_deps_of(args.print_dependencies_of) else: repo = Repo(PATH_TO_REPO) commit_message = repo.head.commit.message commit_flags = parse_commit_message(commit_message) if commit_flags["skip"]: print("Force-skipping the CI") quit() if commit_flags["no_filter"]: print("Running all tests fetched without filtering.") if commit_flags["test_all"]: print("Force-launching all tests") diff_with_last_commit = args.diff_with_last_commit if not diff_with_last_commit and not repo.head.is_detached and repo.head.ref == repo.refs.main: print("main branch detected, fetching tests against last commit.") diff_with_last_commit = True if not commit_flags["test_all"]: try: infer_tests_to_run( args.output_file, diff_with_last_commit=diff_with_last_commit, json_output_file=args.json_output_file, ) filter_tests(args.output_file, ["repo_utils"]) update_test_map_with_core_pipelines(json_output_file=args.json_output_file) except Exception as e: print(f"\nError when trying to grab the relevant tests: {e}\n\nRunning all tests.") commit_flags["test_all"] = True if commit_flags["test_all"]: with open(args.output_file, "w", encoding="utf-8") as f: f.write("tests") example_file = Path(args.output_file).parent / "examples_test_list.txt" with open(example_file, "w", encoding="utf-8") as f: f.write("all") test_files_to_run = get_all_tests() create_json_map(test_files_to_run, args.json_output_file) update_test_map_with_core_pipelines(json_output_file=args.json_output_file)
diffusers/utils/tests_fetcher.py/0
{ "file_path": "diffusers/utils/tests_fetcher.py", "repo_id": "diffusers", "token_count": 19563 }
199
# Video benchmark ## Questions What is the optimal trade-off between: - maximizing loading time with random access, - minimizing memory space on disk, - maximizing success rate of policies, - compatibility across devices/platforms for decoding videos (e.g. video players, web browsers). How to encode videos? - Which video codec (`-vcodec`) to use? h264, h265, AV1? - What pixel format to use (`-pix_fmt`)? `yuv444p` or `yuv420p`? - How much compression (`-crf`)? No compression with `0`, intermediate compression with `25` or extreme with `50+`? - Which frequency to chose for key frames (`-g`)? A key frame every `10` frames? How to decode videos? - Which `decoder`? `torchvision`, `torchaudio`, `ffmpegio`, `decord`, or `nvc`? - What scenarios to use for the requesting timestamps during benchmark? (`timestamps_mode`) ## Variables **Image content & size** We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an apartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution). For these reasons, we run this benchmark on four representative datasets: - `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera. - `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera. - `aliberts/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera. - `aliberts/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera. Note: The datasets used for this benchmark need to be image datasets, not video datasets. **Data augmentations** We might revisit this benchmark and find better settings if we train our policies with various data augmentations to make them more robust (e.g. robust to color changes, compression, etc.). ### Encoding parameters | parameter | values | | ----------- | ------------------------------------------------------------ | | **vcodec** | `libx264`, `libx265`, `libsvtav1` | | **pix_fmt** | `yuv444p`, `yuv420p` | | **g** | `1`, `2`, `3`, `4`, `5`, `6`, `10`, `15`, `20`, `40`, `None` | | **crf** | `0`, `5`, `10`, `15`, `20`, `25`, `30`, `40`, `50`, `None` | Note that `crf` value might be interpreted differently by various video codecs. In other words, the same value used with one codec doesn't necessarily translate into the same compression level with another codec. In fact, the default value (`None`) isn't the same amongst the different video codecs. Importantly, it is also the case for many other ffmpeg arguments like `g` which specifies the frequency of the key frames. For a comprehensive list and documentation of these parameters, see the ffmpeg documentation depending on the video codec used: - h264: https://trac.ffmpeg.org/wiki/Encode/H.264 - h265: https://trac.ffmpeg.org/wiki/Encode/H.265 - AV1: https://trac.ffmpeg.org/wiki/Encode/AV1 ### Decoding parameters **Decoder** We tested two video decoding backends from torchvision: - `pyav` - `video_reader` (requires to build torchvision from source) **Requested timestamps** Given the way video decoding works, once a keyframe has been loaded, the decoding of subsequent frames is fast. This of course is affected by the `-g` parameter during encoding, which specifies the frequency of the keyframes. Given our typical use cases in robotics policies which might request a few timestamps in different random places, we want to replicate these use cases with the following scenarios: - `1_frame`: 1 frame, - `2_frames`: 2 consecutive frames (e.g. `[t, t + 1 / fps]`), - `6_frames`: 6 consecutive frames (e.g. `[t + i / fps for i in range(6)]`) Note that this differs significantly from a typical use case like watching a movie, in which every frame is loaded sequentially from the beginning to the end and it's acceptable to have big values for `-g`. Additionally, because some policies might request single timestamps that are a few frames apart, we also have the following scenario: - `2_frames_4_space`: 2 frames with 4 consecutive frames of spacing in between (e.g `[t, t + 5 / fps]`), However, due to how video decoding is implemented with `pyav`, we don't have access to an accurate seek so in practice this scenario is essentially the same as `6_frames` since all 6 frames between `t` and `t + 5 / fps` will be decoded. ## Metrics **Data compression ratio (lower is better)** `video_images_size_ratio` is the ratio of the memory space on disk taken by the encoded video over the memory space taken by the original images. For instance, `video_images_size_ratio=25%` means that the video takes 4 times less memory space on disk compared to the original images. **Loading time ratio (lower is better)** `video_images_load_time_ratio` is the ratio of the time it takes to decode frames from the video at a given timestamps over the time it takes to load the exact same original images. Lower is better. For instance, `video_images_load_time_ratio=200%` means that decoding from video is 2 times slower than loading the original images. **Average Mean Square Error (lower is better)** `avg_mse` is the average mean square error between each decoded frame and its corresponding original image over all requested timestamps, and also divided by the number of pixels in the image to be comparable when switching to different image sizes. **Average Peak Signal to Noise Ratio (higher is better)** `avg_psnr` measures the ratio between the maximum possible power of a signal and the power of corrupting noise that affects the fidelity of its representation. Higher PSNR indicates better quality. **Average Structural Similarity Index Measure (higher is better)** `avg_ssim` evaluates the perceived quality of images by comparing luminance, contrast, and structure. SSIM values range from -1 to 1, where 1 indicates perfect similarity. One aspect that can't be measured here with those metrics is the compatibility of the encoding across platforms, in particular on web browser, for visualization purposes. h264, h265 and AV1 are all commonly used codecs and should not pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility: - `yuv420p` is more widely supported across various platforms, including web browsers. - `yuv444p` offers higher color fidelity but might not be supported as broadly. <!-- **Loss of a pretrained policy (higher is better)** (not available) `loss_pretrained` is the result of evaluating with the selected encoding/decoding settings a policy pretrained on original images. It is easier to understand than `avg_l2_error`. **Success rate after retraining (higher is better)** (not available) `success_rate` is the result of training and evaluating a policy with the selected encoding/decoding settings. It is the most difficult metric to get but also the very best. --> ## How the benchmark works The benchmark evaluates both encoding and decoding of video frames on the first episode of each dataset. **Encoding:** for each `vcodec` and `pix_fmt` pair, we use a default value for `g` and `crf` upon which we change a single value (either `g` or `crf`) to one of the specified values (we don't test every combination of those as this would be computationally too heavy). This gives a unique set of encoding parameters which is used to encode the episode. **Decoding:** Then, for each of those unique encodings, we iterate through every combination of the decoding parameters `backend` and `timestamps_mode`. For each of them, we record the metrics of a number of samples (given by `--num-samples`). This is parallelized for efficiency and the number of processes can be controlled with `--num-workers`. Ideally, it's best to have a `--num-samples` that is divisible by `--num-workers`. Intermediate results saved for each `vcodec` and `pix_fmt` combination in csv tables. These are then all concatenated to a single table ready for analysis. ## Caveats We tried to measure the most impactful parameters for both encoding and decoding. However, for computational reasons we can't test out every combination. Additional encoding parameters exist that are not included in this benchmark. In particular: - `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1. - `-tune` which allows to optimize the encoding for certain aspects (e.g. film quality, fast decoding, etc.). See the documentation mentioned above for more detailed info on these settings and for a more comprehensive list of other parameters. Similarly on the decoding side, other decoders exist but are not implemented in our current benchmark. To name a few: - `torchaudio` - `ffmpegio` - `decord` - `nvc` Note as well that since we are mostly interested in the performance at decoding time (also because encoding is done only once before uploading a dataset), we did not measure encoding times nor have any metrics regarding encoding. However, besides the necessity to build ffmpeg from source, encoding did not pose any issue and it didn't take a significant amount of time during this benchmark. ## Install Building ffmpeg from source is required to include libx265 and libaom/libsvtav1 (av1) video codecs ([compilation guide](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu)). **Note:** While you still need to build torchvision with a conda-installed `ffmpeg<4.3` to use the `video_reader` decoder (as described in [#220](https://github.com/huggingface/lerobot/pull/220)), you also need another version which is custom-built with all the video codecs for encoding. For the script to then use that version, you can prepend the command above with `PATH="$HOME/bin:$PATH"`, which is where ffmpeg should be built. ## Adding a video decoder Right now, we're only benchmarking the two video decoder available with torchvision: `pyav` and `video_reader`. You can easily add a new decoder to benchmark by adding it to this function in the script: ```diff def decode_video_frames( video_path: str, timestamps: list[float], tolerance_s: float, backend: str, ) -> torch.Tensor: if backend in ["pyav", "video_reader"]: return decode_video_frames_torchvision( video_path, timestamps, tolerance_s, backend ) + elif backend == ["your_decoder"]: + return your_decoder_function( + video_path, timestamps, tolerance_s, backend + ) else: raise NotImplementedError(backend) ``` ## Example For a quick run, you can try these parameters: ```bash python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ aliberts/aloha_mobile_shrimp_image \ --vcodec libx264 libx265 \ --pix-fmt yuv444p yuv420p \ --g 2 20 None \ --crf 10 40 None \ --timestamps-modes 1_frame 2_frames \ --backends pyav video_reader \ --num-samples 5 \ --num-workers 5 \ --save-frames 0 ``` ## Results ### Reproduce We ran the benchmark with the following parameters: ```bash # h264 and h265 encodings python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ aliberts/aloha_mobile_shrimp_image \ aliberts/paris_street \ aliberts/kitchen \ --vcodec libx264 libx265 \ --pix-fmt yuv444p yuv420p \ --g 1 2 3 4 5 6 10 15 20 40 None \ --crf 0 5 10 15 20 25 30 40 50 None \ --timestamps-modes 1_frame 2_frames 6_frames \ --backends pyav video_reader \ --num-samples 50 \ --num-workers 5 \ --save-frames 1 # av1 encoding (only compatible with yuv420p and pyav decoder) python benchmark/video/run_video_benchmark.py \ --output-dir outputs/video_benchmark \ --repo-ids \ lerobot/pusht_image \ aliberts/aloha_mobile_shrimp_image \ aliberts/paris_street \ aliberts/kitchen \ --vcodec libsvtav1 \ --pix-fmt yuv420p \ --g 1 2 3 4 5 6 10 15 20 40 None \ --crf 0 5 10 15 20 25 30 40 50 None \ --timestamps-modes 1_frame 2_frames 6_frames \ --backends pyav \ --num-samples 50 \ --num-workers 5 \ --save-frames 1 ``` The full results are available [here](https://docs.google.com/spreadsheets/d/1OYJB43Qu8fC26k_OyoMFgGBBKfQRCi4BIuYitQnq3sw/edit?usp=sharing) ### Parameters selected for LeRobotDataset Considering these results, we chose what we think is the best set of encoding parameter: - vcodec: `libsvtav1` - pix-fmt: `yuv420p` - g: `2` - crf: `30` Since we're using av1 encoding, we're choosing the `pyav` decoder as `video_reader` does not support it (and `pyav` doesn't require a custom build of `torchvision`). ### Summary These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_frames` and `backend=pyav` | video_images_size_ratio | vcodec | pix_fmt | | | | | ---------------------------------- | ---------- | ------- | --------- | --------- | --------- | | | libx264 | | libx265 | | libsvtav1 | | repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | | lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% | | aliberts/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% | | aliberts/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% | | aliberts/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% | | video_images_load_time_ratio | vcodec | pix_fmt | | | | | ---------------------------------- | ------- | ------- | -------- | ------- | --------- | | | libx264 | | libx265 | | libsvtav1 | | repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | | lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 | | aliberts/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** | | aliberts/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** | | aliberts/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** | | | | vcodec | pix_fmt | | | | | ---------------------------------- | -------- | -------- | ------------ | -------- | --------- | ------------ | | | | libx264 | | libx265 | | libsvtav1 | | repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p | | lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 | | | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 | | | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% | | aliberts/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** | | | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** | | | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** | | aliberts/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** | | | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** | | | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** | | aliberts/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** | | | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** | | | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** |
lerobot/benchmarks/video/README.md/0
{ "file_path": "lerobot/benchmarks/video/README.md", "repo_id": "lerobot", "token_count": 6190 }
200
# Imitation Learning in Sim This tutorial will explain how to train a neural network to control a robot in simulation with imitation learning. **You'll learn:** 1. How to record a dataset in simulation with [gym-hil](https://github.com/huggingface/gym-hil) and visualize the dataset. 2. How to train a policy using your data. 3. How to evaluate your policy in simulation and visualize the results. For the simulation environment we use the same [repo](https://github.com/huggingface/gym-hil) that is also being used by the Human-In-the-Loop (HIL) reinforcement learning algorithm. This environment is based on [MuJoCo](https://mujoco.org) and allows you to record datasets in LeRobotDataset format. Teleoperation is easiest with a controller like the Logitech F710, but you can also use your keyboard if you are up for the challenge. ## Installation First, install the `gym_hil` package within the LeRobot environment, go to your LeRobot folder and run this command: ```bash pip install -e ".[hilserl]" ``` ## Teleoperate and Record a Dataset To use `gym_hil` with LeRobot, you need to use a configuration file. An example config file can be found [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/env_config_gym_hil_il.json). To teleoperate and collect a dataset, we need to modify this config file and you should add your `repo_id` here: `"repo_id": "il_gym",` and `"num_episodes": 30,` and make sure you set `mode` to `record`, "mode": "record". If you do not have a Nvidia GPU also change `"device": "cuda"` parameter in the config file (for example to `mps` for MacOS). By default the config file assumes you use a controller. To use your keyboard please change the envoirment specified at `"task"` in the config file and set it to `"PandaPickCubeKeyboard-v0"`. Then we can run this command to start: <hfoptions id="teleop_sim"> <hfoption id="Linux"> ```bash python -m lerobot.scripts.rl.gym_manipulator --config_path path/to/env_config_gym_hil_il.json ``` </hfoption> <hfoption id="MacOS"> ```bash mjpython -m lerobot.scripts.rl.gym_manipulator --config_path path/to/env_config_gym_hil_il.json ``` </hfoption> </hfoptions> Once rendered you can teleoperate the robot with the gamepad or keyboard, below you can find the gamepad/keyboard controls. Note that to teleoperate the robot you have to hold the "Human Take Over Pause Policy" Button `RB` to enable control! **Gamepad Controls** <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/gamepad_guide.jpg?raw=true" alt="Figure shows the control mappings on a Logitech gamepad." title="Gamepad Control Mapping" width="100%" ></img> </p> <p align="center"> <i>Gamepad button mapping for robot control and episode management</i> </p> **Keyboard controls** For keyboard controls use the `spacebar` to enable control and the following keys to move the robot: ```bash Arrow keys: Move in X-Y plane Shift and Shift_R: Move in Z axis Right Ctrl and Left Ctrl: Open and close gripper ESC: Exit ``` ## Visualize a dataset If you uploaded your dataset to the hub you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id. <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/dataset_visualizer_sim.png" alt="Figure shows the dataset visualizer" title="Dataset visualization" width="100%" ></img> </p> <p align="center"> <i>Dataset visualizer</i> </p> ## Train a policy To train a policy to control your robot, use the [`lerobot-train`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash lerobot-train \ --dataset.repo_id=${HF_USER}/il_gym \ --policy.type=act \ --output_dir=outputs/train/il_sim_test \ --job_name=il_sim_test \ --policy.device=cuda \ --wandb.enable=true ``` Let's explain the command: 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/il_gym`. 2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](https://github.com/huggingface/lerobot/blob/main/src/lerobot/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. 3. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. 4. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. Training should take several hours, 100k steps (which is the default) will take about 1h on Nvidia A100. You will find checkpoints in `outputs/train/il_sim_test/checkpoints`. #### Train using Collab If your local computer doesn't have a powerful GPU you could utilize Google Collab to train your model by following the [ACT training notebook](./notebooks#training-act). #### Upload policy checkpoints Once training is done, upload the latest checkpoint with: ```bash huggingface-cli upload ${HF_USER}/il_sim_test \ outputs/train/il_sim_test/checkpoints/last/pretrained_model ``` You can also upload intermediate checkpoints with: ```bash CKPT=010000 huggingface-cli upload ${HF_USER}/il_sim_test${CKPT} \ outputs/train/il_sim_test/checkpoints/${CKPT}/pretrained_model ``` ## Evaluate your policy in Sim To evaluate your policy we have to use the config file that can be found [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/eval_config_gym_hil.json). Make sure to replace the `repo_id` with the dataset you trained on, for example `pepijn223/il_sim_dataset` and replace the `pretrained_policy_name_or_path` with your model id, for example `pepijn223/il_sim_model` Then you can run this command to visualize your trained policy <hfoptions id="eval_policy"> <hfoption id="Linux"> ```bash python -m lerobot.scripts.rl.eval_policy --config_path=path/to/eval_config_gym_hil.json ``` </hfoption> <hfoption id="MacOS"> ```bash mjpython -m lerobot.scripts.rl.eval_policy --config_path=path/to/eval_config_gym_hil.json ``` </hfoption> </hfoptions> > [!WARNING] > While the main workflow of training ACT in simulation is straightforward, there is significant room for exploring how to set up the task, define the initial state of the environment, and determine the type of data required during collection to learn the most effective policy. If your trained policy doesn't perform well, investigate the quality of the dataset it was trained on using our visualizers, as well as the action values and various hyperparameters related to ACT and the simulation. Congrats 🎉, you have finished this tutorial. If you want to continue with using LeRobot in simulation follow this [Tutorial on reinforcement learning in sim with HIL-SERL](https://huggingface.co/docs/lerobot/hilserl_sim) > [!TIP] > If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
lerobot/docs/source/il_sim.mdx/0
{ "file_path": "lerobot/docs/source/il_sim.mdx", "repo_id": "lerobot", "token_count": 2338 }
201
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first. It requires the installation of the 'gym_pusht' simulation environment. Install it by running: ```bash pip install -e ".[pusht]" ``` """ from pathlib import Path import gym_pusht # noqa: F401 import gymnasium as gym import imageio import numpy import torch from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy # Create a directory to store the video of the evaluation output_directory = Path("outputs/eval/example_pusht_diffusion") output_directory.mkdir(parents=True, exist_ok=True) # Select your device device = "cuda" # Provide the [hugging face repo id](https://huggingface.co/lerobot/diffusion_pusht): pretrained_policy_path = "lerobot/diffusion_pusht" # OR a path to a local outputs/train folder. # pretrained_policy_path = Path("outputs/train/example_pusht_diffusion") policy = DiffusionPolicy.from_pretrained(pretrained_policy_path) # Initialize evaluation environment to render two observation types: # an image of the scene and state/position of the agent. The environment # also automatically stops running after 300 interactions/steps. env = gym.make( "gym_pusht/PushT-v0", obs_type="pixels_agent_pos", max_episode_steps=300, ) # We can verify that the shapes of the features expected by the policy match the ones from the observations # produced by the environment print(policy.config.input_features) print(env.observation_space) # Similarly, we can check that the actions produced by the policy will match the actions expected by the # environment print(policy.config.output_features) print(env.action_space) # Reset the policy and environments to prepare for rollout policy.reset() numpy_observation, info = env.reset(seed=42) # Prepare to collect every rewards and all the frames of the episode, # from initial state to final state. rewards = [] frames = [] # Render frame of the initial state frames.append(env.render()) step = 0 done = False while not done: # Prepare observation for the policy running in Pytorch state = torch.from_numpy(numpy_observation["agent_pos"]) image = torch.from_numpy(numpy_observation["pixels"]) # Convert to float32 with image from channel first in [0,255] # to channel last in [0,1] state = state.to(torch.float32) image = image.to(torch.float32) / 255 image = image.permute(2, 0, 1) # Send data tensors from CPU to GPU state = state.to(device, non_blocking=True) image = image.to(device, non_blocking=True) # Add extra (empty) batch dimension, required to forward the policy state = state.unsqueeze(0) image = image.unsqueeze(0) # Create the policy input dictionary observation = { "observation.state": state, "observation.image": image, } # Predict the next action with respect to the current observation with torch.inference_mode(): action = policy.select_action(observation) # Prepare the action for the environment numpy_action = action.squeeze(0).to("cpu").numpy() # Step through the environment and receive a new observation numpy_observation, reward, terminated, truncated, info = env.step(numpy_action) print(f"{step=} {reward=} {terminated=}") # Keep track of all the rewards and frames rewards.append(reward) frames.append(env.render()) # The rollout is considered done when the success state is reached (i.e. terminated is True), # or the maximum number of iterations is reached (i.e. truncated is True) done = terminated | truncated | done step += 1 if terminated: print("Success!") else: print("Failure!") # Get the speed of environment (i.e. its number of frames per second). fps = env.metadata["render_fps"] # Encode all frames into a mp4 video. video_path = output_directory / "rollout.mp4" imageio.mimsave(str(video_path), numpy.stack(frames), fps=fps) print(f"Video of the evaluation is available in '{video_path}'.")
lerobot/examples/2_evaluate_pretrained_policy.py/0
{ "file_path": "lerobot/examples/2_evaluate_pretrained_policy.py", "repo_id": "lerobot", "token_count": 1462 }
202
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import logging import shutil from collections.abc import Callable from pathlib import Path import datasets import numpy as np import packaging.version import PIL.Image import torch import torch.utils from datasets import concatenate_datasets, load_dataset from huggingface_hub import HfApi, snapshot_download from huggingface_hub.constants import REPOCARD_NAME from huggingface_hub.errors import RevisionNotFoundError from lerobot.constants import HF_LEROBOT_HOME from lerobot.datasets.compute_stats import aggregate_stats, compute_episode_stats from lerobot.datasets.image_writer import AsyncImageWriter, write_image from lerobot.datasets.utils import ( DEFAULT_FEATURES, DEFAULT_IMAGE_PATH, INFO_PATH, TASKS_PATH, _validate_feature_names, append_jsonlines, backward_compatible_episodes_stats, check_delta_timestamps, check_timestamps_sync, check_version_compatibility, create_empty_dataset_info, create_lerobot_dataset_card, embed_images, get_delta_indices, get_episode_data_index, get_hf_features_from_features, get_safe_version, hf_transform_to_torch, is_valid_version, load_episodes, load_episodes_stats, load_info, load_stats, load_tasks, validate_episode_buffer, validate_frame, write_episode, write_episode_stats, write_info, write_json, ) from lerobot.datasets.video_utils import ( VideoFrame, decode_video_frames, encode_video_frames, get_safe_default_codec, get_video_info, ) CODEBASE_VERSION = "v2.1" class LeRobotDatasetMetadata: def __init__( self, repo_id: str, root: str | Path | None = None, revision: str | None = None, force_cache_sync: bool = False, ): self.repo_id = repo_id self.revision = revision if revision else CODEBASE_VERSION self.root = Path(root) if root is not None else HF_LEROBOT_HOME / repo_id try: if force_cache_sync: raise FileNotFoundError self.load_metadata() except (FileNotFoundError, NotADirectoryError): if is_valid_version(self.revision): self.revision = get_safe_version(self.repo_id, self.revision) (self.root / "meta").mkdir(exist_ok=True, parents=True) self.pull_from_repo(allow_patterns="meta/") self.load_metadata() def load_metadata(self): self.info = load_info(self.root) check_version_compatibility(self.repo_id, self._version, CODEBASE_VERSION) self.tasks, self.task_to_task_index = load_tasks(self.root) self.episodes = load_episodes(self.root) if self._version < packaging.version.parse("v2.1"): self.stats = load_stats(self.root) self.episodes_stats = backward_compatible_episodes_stats(self.stats, self.episodes) else: self.episodes_stats = load_episodes_stats(self.root) self.stats = aggregate_stats(list(self.episodes_stats.values())) def pull_from_repo( self, allow_patterns: list[str] | str | None = None, ignore_patterns: list[str] | str | None = None, ) -> None: snapshot_download( self.repo_id, repo_type="dataset", revision=self.revision, local_dir=self.root, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, ) @property def _version(self) -> packaging.version.Version: """Codebase version used to create this dataset.""" return packaging.version.parse(self.info["codebase_version"]) def get_data_file_path(self, ep_index: int) -> Path: ep_chunk = self.get_episode_chunk(ep_index) fpath = self.data_path.format(episode_chunk=ep_chunk, episode_index=ep_index) return Path(fpath) def get_video_file_path(self, ep_index: int, vid_key: str) -> Path: ep_chunk = self.get_episode_chunk(ep_index) fpath = self.video_path.format(episode_chunk=ep_chunk, video_key=vid_key, episode_index=ep_index) return Path(fpath) def get_episode_chunk(self, ep_index: int) -> int: return ep_index // self.chunks_size @property def data_path(self) -> str: """Formattable string for the parquet files.""" return self.info["data_path"] @property def video_path(self) -> str | None: """Formattable string for the video files.""" return self.info["video_path"] @property def robot_type(self) -> str | None: """Robot type used in recording this dataset.""" return self.info["robot_type"] @property def fps(self) -> int: """Frames per second used during data collection.""" return self.info["fps"] @property def features(self) -> dict[str, dict]: """All features contained in the dataset.""" return self.info["features"] @property def image_keys(self) -> list[str]: """Keys to access visual modalities stored as images.""" return [key for key, ft in self.features.items() if ft["dtype"] == "image"] @property def video_keys(self) -> list[str]: """Keys to access visual modalities stored as videos.""" return [key for key, ft in self.features.items() if ft["dtype"] == "video"] @property def camera_keys(self) -> list[str]: """Keys to access visual modalities (regardless of their storage method).""" return [key for key, ft in self.features.items() if ft["dtype"] in ["video", "image"]] @property def names(self) -> dict[str, list | dict]: """Names of the various dimensions of vector modalities.""" return {key: ft["names"] for key, ft in self.features.items()} @property def shapes(self) -> dict: """Shapes for the different features.""" return {key: tuple(ft["shape"]) for key, ft in self.features.items()} @property def total_episodes(self) -> int: """Total number of episodes available.""" return self.info["total_episodes"] @property def total_frames(self) -> int: """Total number of frames saved in this dataset.""" return self.info["total_frames"] @property def total_tasks(self) -> int: """Total number of different tasks performed in this dataset.""" return self.info["total_tasks"] @property def total_chunks(self) -> int: """Total number of chunks (groups of episodes).""" return self.info["total_chunks"] @property def chunks_size(self) -> int: """Max number of episodes per chunk.""" return self.info["chunks_size"] def get_task_index(self, task: str) -> int | None: """ Given a task in natural language, returns its task_index if the task already exists in the dataset, otherwise return None. """ return self.task_to_task_index.get(task, None) def add_task(self, task: str): """ Given a task in natural language, add it to the dictionary of tasks. """ if task in self.task_to_task_index: raise ValueError(f"The task '{task}' already exists and can't be added twice.") task_index = self.info["total_tasks"] self.task_to_task_index[task] = task_index self.tasks[task_index] = task self.info["total_tasks"] += 1 task_dict = { "task_index": task_index, "task": task, } append_jsonlines(task_dict, self.root / TASKS_PATH) def save_episode( self, episode_index: int, episode_length: int, episode_tasks: list[str], episode_stats: dict[str, dict], ) -> None: self.info["total_episodes"] += 1 self.info["total_frames"] += episode_length chunk = self.get_episode_chunk(episode_index) if chunk >= self.total_chunks: self.info["total_chunks"] += 1 self.info["splits"] = {"train": f"0:{self.info['total_episodes']}"} self.info["total_videos"] += len(self.video_keys) write_info(self.info, self.root) episode_dict = { "episode_index": episode_index, "tasks": episode_tasks, "length": episode_length, } self.episodes[episode_index] = episode_dict write_episode(episode_dict, self.root) self.episodes_stats[episode_index] = episode_stats self.stats = aggregate_stats([self.stats, episode_stats]) if self.stats else episode_stats write_episode_stats(episode_index, episode_stats, self.root) def update_video_info(self) -> None: """ Warning: this function writes info from first episode videos, implicitly assuming that all videos have been encoded the same way. Also, this means it assumes the first episode exists. """ for key in self.video_keys: if not self.features[key].get("info", None): video_path = self.root / self.get_video_file_path(ep_index=0, vid_key=key) self.info["features"][key]["info"] = get_video_info(video_path) def __repr__(self): feature_keys = list(self.features) return ( f"{self.__class__.__name__}({{\n" f" Repository ID: '{self.repo_id}',\n" f" Total episodes: '{self.total_episodes}',\n" f" Total frames: '{self.total_frames}',\n" f" Features: '{feature_keys}',\n" "})',\n" ) @classmethod def create( cls, repo_id: str, fps: int, features: dict, robot_type: str | None = None, root: str | Path | None = None, use_videos: bool = True, ) -> "LeRobotDatasetMetadata": """Creates metadata for a LeRobotDataset.""" obj = cls.__new__(cls) obj.repo_id = repo_id obj.root = Path(root) if root is not None else HF_LEROBOT_HOME / repo_id obj.root.mkdir(parents=True, exist_ok=False) # TODO(aliberts, rcadene): implement sanity check for features features = {**features, **DEFAULT_FEATURES} _validate_feature_names(features) obj.tasks, obj.task_to_task_index = {}, {} obj.episodes_stats, obj.stats, obj.episodes = {}, {}, {} obj.info = create_empty_dataset_info(CODEBASE_VERSION, fps, features, use_videos, robot_type) if len(obj.video_keys) > 0 and not use_videos: raise ValueError() write_json(obj.info, obj.root / INFO_PATH) obj.revision = None return obj class LeRobotDataset(torch.utils.data.Dataset): def __init__( self, repo_id: str, root: str | Path | None = None, episodes: list[int] | None = None, image_transforms: Callable | None = None, delta_timestamps: dict[list[float]] | None = None, tolerance_s: float = 1e-4, revision: str | None = None, force_cache_sync: bool = False, download_videos: bool = True, video_backend: str | None = None, batch_encoding_size: int = 1, ): """ 2 modes are available for instantiating this class, depending on 2 different use cases: 1. Your dataset already exists: - On your local disk in the 'root' folder. This is typically the case when you recorded your dataset locally and you may or may not have pushed it to the hub yet. Instantiating this class with 'root' will load your dataset directly from disk. This can happen while you're offline (no internet connection). - On the Hugging Face Hub at the address https://huggingface.co/datasets/{repo_id} and not on your local disk in the 'root' folder. Instantiating this class with this 'repo_id' will download the dataset from that address and load it, pending your dataset is compliant with codebase_version v2.0. If your dataset has been created before this new format, you will be prompted to convert it using our conversion script from v1.6 to v2.0, which you can find at lerobot/datasets/v2/convert_dataset_v1_to_v2.py. 2. Your dataset doesn't already exists (either on local disk or on the Hub): you can create an empty LeRobotDataset with the 'create' classmethod. This can be used for recording a dataset or port an existing dataset to the LeRobotDataset format. In terms of files, LeRobotDataset encapsulates 3 main things: - metadata: - info contains various information about the dataset like shapes, keys, fps etc. - stats stores the dataset statistics of the different modalities for normalization - tasks contains the prompts for each task of the dataset, which can be used for task-conditioned training. - hf_dataset (from datasets.Dataset), which will read any values from parquet files. - videos (optional) from which frames are loaded to be synchronous with data from parquet files. A typical LeRobotDataset looks like this from its root path: . ├── data │ ├── chunk-000 │ │ ├── episode_000000.parquet │ │ ├── episode_000001.parquet │ │ ├── episode_000002.parquet │ │ └── ... │ ├── chunk-001 │ │ ├── episode_001000.parquet │ │ ├── episode_001001.parquet │ │ ├── episode_001002.parquet │ │ └── ... │ └── ... ├── meta │ ├── episodes.jsonl │ ├── info.json │ ├── stats.json │ └── tasks.jsonl └── videos ├── chunk-000 │ ├── observation.images.laptop │ │ ├── episode_000000.mp4 │ │ ├── episode_000001.mp4 │ │ ├── episode_000002.mp4 │ │ └── ... │ ├── observation.images.phone │ │ ├── episode_000000.mp4 │ │ ├── episode_000001.mp4 │ │ ├── episode_000002.mp4 │ │ └── ... ├── chunk-001 └── ... Note that this file-based structure is designed to be as versatile as possible. The files are split by episodes which allows a more granular control over which episodes one wants to use and download. The structure of the dataset is entirely described in the info.json file, which can be easily downloaded or viewed directly on the hub before downloading any actual data. The type of files used are very simple and do not need complex tools to be read, it only uses .parquet, .json and .mp4 files (and .md for the README). Args: repo_id (str): This is the repo id that will be used to fetch the dataset. Locally, the dataset will be stored under root/repo_id. root (Path | None, optional): Local directory to use for downloading/writing files. You can also set the LEROBOT_HOME environment variable to point to a different location. Defaults to '~/.cache/huggingface/lerobot'. episodes (list[int] | None, optional): If specified, this will only load episodes specified by their episode_index in this list. Defaults to None. image_transforms (Callable | None, optional): You can pass standard v2 image transforms from torchvision.transforms.v2 here which will be applied to visual modalities (whether they come from videos or images). Defaults to None. delta_timestamps (dict[list[float]] | None, optional): _description_. Defaults to None. tolerance_s (float, optional): Tolerance in seconds used to ensure data timestamps are actually in sync with the fps value. It is used at the init of the dataset to make sure that each timestamps is separated to the next by 1/fps +/- tolerance_s. This also applies to frames decoded from video files. It is also used to check that `delta_timestamps` (when provided) are multiples of 1/fps. Defaults to 1e-4. revision (str, optional): An optional Git revision id which can be a branch name, a tag, or a commit hash. Defaults to current codebase version tag. force_cache_sync (bool, optional): Flag to sync and refresh local files first. If True and files are already present in the local cache, this will be faster. However, files loaded might not be in sync with the version on the hub, especially if you specified 'revision'. Defaults to False. download_videos (bool, optional): Flag to download the videos. Note that when set to True but the video files are already present on local disk, they won't be downloaded again. Defaults to True. video_backend (str | None, optional): Video backend to use for decoding videos. Defaults to torchcodec when available int the platform; otherwise, defaults to 'pyav'. You can also use the 'pyav' decoder used by Torchvision, which used to be the default option, or 'video_reader' which is another decoder of Torchvision. batch_encoding_size (int, optional): Number of episodes to accumulate before batch encoding videos. Set to 1 for immediate encoding (default), or higher for batched encoding. Defaults to 1. """ super().__init__() self.repo_id = repo_id self.root = Path(root) if root else HF_LEROBOT_HOME / repo_id self.image_transforms = image_transforms self.delta_timestamps = delta_timestamps self.episodes = episodes self.tolerance_s = tolerance_s self.revision = revision if revision else CODEBASE_VERSION self.video_backend = video_backend if video_backend else get_safe_default_codec() self.delta_indices = None self.batch_encoding_size = batch_encoding_size self.episodes_since_last_encoding = 0 # Unused attributes self.image_writer = None self.episode_buffer = None self.root.mkdir(exist_ok=True, parents=True) # Load metadata self.meta = LeRobotDatasetMetadata( self.repo_id, self.root, self.revision, force_cache_sync=force_cache_sync ) if self.episodes is not None and self.meta._version >= packaging.version.parse("v2.1"): episodes_stats = [self.meta.episodes_stats[ep_idx] for ep_idx in self.episodes] self.stats = aggregate_stats(episodes_stats) # Load actual data try: if force_cache_sync: raise FileNotFoundError assert all((self.root / fpath).is_file() for fpath in self.get_episodes_file_paths()) self.hf_dataset = self.load_hf_dataset() except (AssertionError, FileNotFoundError, NotADirectoryError): self.revision = get_safe_version(self.repo_id, self.revision) self.download_episodes(download_videos) self.hf_dataset = self.load_hf_dataset() self.episode_data_index = get_episode_data_index(self.meta.episodes, self.episodes) # Check timestamps timestamps = torch.stack(self.hf_dataset["timestamp"]).numpy() episode_indices = torch.stack(self.hf_dataset["episode_index"]).numpy() ep_data_index_np = {k: t.numpy() for k, t in self.episode_data_index.items()} check_timestamps_sync(timestamps, episode_indices, ep_data_index_np, self.fps, self.tolerance_s) # Setup delta_indices if self.delta_timestamps is not None: check_delta_timestamps(self.delta_timestamps, self.fps, self.tolerance_s) self.delta_indices = get_delta_indices(self.delta_timestamps, self.fps) def push_to_hub( self, branch: str | None = None, tags: list | None = None, license: str | None = "apache-2.0", tag_version: bool = True, push_videos: bool = True, private: bool = False, allow_patterns: list[str] | str | None = None, upload_large_folder: bool = False, **card_kwargs, ) -> None: ignore_patterns = ["images/"] if not push_videos: ignore_patterns.append("videos/") hub_api = HfApi() hub_api.create_repo( repo_id=self.repo_id, private=private, repo_type="dataset", exist_ok=True, ) if branch: hub_api.create_branch( repo_id=self.repo_id, branch=branch, revision=self.revision, repo_type="dataset", exist_ok=True, ) upload_kwargs = { "repo_id": self.repo_id, "folder_path": self.root, "repo_type": "dataset", "revision": branch, "allow_patterns": allow_patterns, "ignore_patterns": ignore_patterns, } if upload_large_folder: hub_api.upload_large_folder(**upload_kwargs) else: hub_api.upload_folder(**upload_kwargs) if not hub_api.file_exists(self.repo_id, REPOCARD_NAME, repo_type="dataset", revision=branch): card = create_lerobot_dataset_card( tags=tags, dataset_info=self.meta.info, license=license, **card_kwargs ) card.push_to_hub(repo_id=self.repo_id, repo_type="dataset", revision=branch) if tag_version: with contextlib.suppress(RevisionNotFoundError): hub_api.delete_tag(self.repo_id, tag=CODEBASE_VERSION, repo_type="dataset") hub_api.create_tag(self.repo_id, tag=CODEBASE_VERSION, revision=branch, repo_type="dataset") def pull_from_repo( self, allow_patterns: list[str] | str | None = None, ignore_patterns: list[str] | str | None = None, ) -> None: snapshot_download( self.repo_id, repo_type="dataset", revision=self.revision, local_dir=self.root, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, ) def download_episodes(self, download_videos: bool = True) -> None: """Downloads the dataset from the given 'repo_id' at the provided version. If 'episodes' is given, this will only download those episodes (selected by their episode_index). If 'episodes' is None, the whole dataset will be downloaded. Thanks to the behavior of snapshot_download, if the files are already present in 'local_dir', they won't be downloaded again. """ # TODO(rcadene, aliberts): implement faster transfer # https://huggingface.co/docs/huggingface_hub/en/guides/download#faster-downloads files = None ignore_patterns = None if download_videos else "videos/" if self.episodes is not None: files = self.get_episodes_file_paths() self.pull_from_repo(allow_patterns=files, ignore_patterns=ignore_patterns) def get_episodes_file_paths(self) -> list[Path]: episodes = self.episodes if self.episodes is not None else list(range(self.meta.total_episodes)) fpaths = [str(self.meta.get_data_file_path(ep_idx)) for ep_idx in episodes] if len(self.meta.video_keys) > 0: video_files = [ str(self.meta.get_video_file_path(ep_idx, vid_key)) for vid_key in self.meta.video_keys for ep_idx in episodes ] fpaths += video_files return fpaths def load_hf_dataset(self) -> datasets.Dataset: """hf_dataset contains all the observations, states, actions, rewards, etc.""" if self.episodes is None: path = str(self.root / "data") hf_dataset = load_dataset("parquet", data_dir=path, split="train") else: files = [str(self.root / self.meta.get_data_file_path(ep_idx)) for ep_idx in self.episodes] hf_dataset = load_dataset("parquet", data_files=files, split="train") # TODO(aliberts): hf_dataset.set_format("torch") hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def create_hf_dataset(self) -> datasets.Dataset: features = get_hf_features_from_features(self.features) ft_dict = {col: [] for col in features} hf_dataset = datasets.Dataset.from_dict(ft_dict, features=features, split="train") # TODO(aliberts): hf_dataset.set_format("torch") hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset @property def fps(self) -> int: """Frames per second used during data collection.""" return self.meta.fps @property def num_frames(self) -> int: """Number of frames in selected episodes.""" return len(self.hf_dataset) if self.hf_dataset is not None else self.meta.total_frames @property def num_episodes(self) -> int: """Number of episodes selected.""" return len(self.episodes) if self.episodes is not None else self.meta.total_episodes @property def features(self) -> dict[str, dict]: return self.meta.features @property def hf_features(self) -> datasets.Features: """Features of the hf_dataset.""" if self.hf_dataset is not None: return self.hf_dataset.features else: return get_hf_features_from_features(self.features) def _get_query_indices(self, idx: int, ep_idx: int) -> tuple[dict[str, list[int | bool]]]: ep_start = self.episode_data_index["from"][ep_idx] ep_end = self.episode_data_index["to"][ep_idx] query_indices = { key: [max(ep_start.item(), min(ep_end.item() - 1, idx + delta)) for delta in delta_idx] for key, delta_idx in self.delta_indices.items() } padding = { # Pad values outside of current episode range f"{key}_is_pad": torch.BoolTensor( [(idx + delta < ep_start.item()) | (idx + delta >= ep_end.item()) for delta in delta_idx] ) for key, delta_idx in self.delta_indices.items() } return query_indices, padding def _get_query_timestamps( self, current_ts: float, query_indices: dict[str, list[int]] | None = None, ) -> dict[str, list[float]]: query_timestamps = {} for key in self.meta.video_keys: if query_indices is not None and key in query_indices: timestamps = self.hf_dataset.select(query_indices[key])["timestamp"] query_timestamps[key] = torch.stack(timestamps).tolist() else: query_timestamps[key] = [current_ts] return query_timestamps def _query_hf_dataset(self, query_indices: dict[str, list[int]]) -> dict: return { key: torch.stack(self.hf_dataset.select(q_idx)[key]) for key, q_idx in query_indices.items() if key not in self.meta.video_keys } def _query_videos(self, query_timestamps: dict[str, list[float]], ep_idx: int) -> dict[str, torch.Tensor]: """Note: When using data workers (e.g. DataLoader with num_workers>0), do not call this function in the main process (e.g. by using a second Dataloader with num_workers=0). It will result in a Segmentation Fault. This probably happens because a memory reference to the video loader is created in the main process and a subprocess fails to access it. """ item = {} for vid_key, query_ts in query_timestamps.items(): video_path = self.root / self.meta.get_video_file_path(ep_idx, vid_key) frames = decode_video_frames(video_path, query_ts, self.tolerance_s, self.video_backend) item[vid_key] = frames.squeeze(0) return item def _add_padding_keys(self, item: dict, padding: dict[str, list[bool]]) -> dict: for key, val in padding.items(): item[key] = torch.BoolTensor(val) return item def __len__(self): return self.num_frames def __getitem__(self, idx) -> dict: item = self.hf_dataset[idx] ep_idx = item["episode_index"].item() query_indices = None if self.delta_indices is not None: query_indices, padding = self._get_query_indices(idx, ep_idx) query_result = self._query_hf_dataset(query_indices) item = {**item, **padding} for key, val in query_result.items(): item[key] = val if len(self.meta.video_keys) > 0: current_ts = item["timestamp"].item() query_timestamps = self._get_query_timestamps(current_ts, query_indices) video_frames = self._query_videos(query_timestamps, ep_idx) item = {**video_frames, **item} if self.image_transforms is not None: image_keys = self.meta.camera_keys for cam in image_keys: item[cam] = self.image_transforms(item[cam]) # Add task as a string task_idx = item["task_index"].item() item["task"] = self.meta.tasks[task_idx] return item def __repr__(self): feature_keys = list(self.features) return ( f"{self.__class__.__name__}({{\n" f" Repository ID: '{self.repo_id}',\n" f" Number of selected episodes: '{self.num_episodes}',\n" f" Number of selected samples: '{self.num_frames}',\n" f" Features: '{feature_keys}',\n" "})',\n" ) def create_episode_buffer(self, episode_index: int | None = None) -> dict: current_ep_idx = self.meta.total_episodes if episode_index is None else episode_index ep_buffer = {} # size and task are special cases that are not in self.features ep_buffer["size"] = 0 ep_buffer["task"] = [] for key in self.features: ep_buffer[key] = current_ep_idx if key == "episode_index" else [] return ep_buffer def _get_image_file_path(self, episode_index: int, image_key: str, frame_index: int) -> Path: fpath = DEFAULT_IMAGE_PATH.format( image_key=image_key, episode_index=episode_index, frame_index=frame_index ) return self.root / fpath def _save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path) -> None: if self.image_writer is None: if isinstance(image, torch.Tensor): image = image.cpu().numpy() write_image(image, fpath) else: self.image_writer.save_image(image=image, fpath=fpath) def add_frame(self, frame: dict, task: str, timestamp: float | None = None) -> None: """ This function only adds the frame to the episode_buffer. Apart from images — which are written in a temporary directory — nothing is written to disk. To save those frames, the 'save_episode()' method then needs to be called. """ # Convert torch to numpy if needed for name in frame: if isinstance(frame[name], torch.Tensor): frame[name] = frame[name].numpy() validate_frame(frame, self.features) if self.episode_buffer is None: self.episode_buffer = self.create_episode_buffer() # Automatically add frame_index and timestamp to episode buffer frame_index = self.episode_buffer["size"] if timestamp is None: timestamp = frame_index / self.fps self.episode_buffer["frame_index"].append(frame_index) self.episode_buffer["timestamp"].append(timestamp) self.episode_buffer["task"].append(task) # Add frame features to episode_buffer for key in frame: if key not in self.features: raise ValueError( f"An element of the frame is not in the features. '{key}' not in '{self.features.keys()}'." ) if self.features[key]["dtype"] in ["image", "video"]: img_path = self._get_image_file_path( episode_index=self.episode_buffer["episode_index"], image_key=key, frame_index=frame_index ) if frame_index == 0: img_path.parent.mkdir(parents=True, exist_ok=True) self._save_image(frame[key], img_path) self.episode_buffer[key].append(str(img_path)) else: self.episode_buffer[key].append(frame[key]) self.episode_buffer["size"] += 1 def save_episode(self, episode_data: dict | None = None) -> None: """ This will save to disk the current episode in self.episode_buffer. Video encoding is handled automatically based on batch_encoding_size: - If batch_encoding_size == 1: Videos are encoded immediately after each episode - If batch_encoding_size > 1: Videos are encoded in batches. Args: episode_data (dict | None, optional): Dict containing the episode data to save. If None, this will save the current episode in self.episode_buffer, which is filled with 'add_frame'. Defaults to None. """ if not episode_data: episode_buffer = self.episode_buffer else: episode_buffer = episode_data validate_episode_buffer(episode_buffer, self.meta.total_episodes, self.features) # size and task are special cases that won't be added to hf_dataset episode_length = episode_buffer.pop("size") tasks = episode_buffer.pop("task") episode_tasks = list(set(tasks)) episode_index = episode_buffer["episode_index"] episode_buffer["index"] = np.arange(self.meta.total_frames, self.meta.total_frames + episode_length) episode_buffer["episode_index"] = np.full((episode_length,), episode_index) # Add new tasks to the tasks dictionary for task in episode_tasks: task_index = self.meta.get_task_index(task) if task_index is None: self.meta.add_task(task) # Given tasks in natural language, find their corresponding task indices episode_buffer["task_index"] = np.array([self.meta.get_task_index(task) for task in tasks]) for key, ft in self.features.items(): # index, episode_index, task_index are already processed above, and image and video # are processed separately by storing image path and frame info as meta data if key in ["index", "episode_index", "task_index"] or ft["dtype"] in ["image", "video"]: continue episode_buffer[key] = np.stack(episode_buffer[key]) self._wait_image_writer() self._save_episode_table(episode_buffer, episode_index) ep_stats = compute_episode_stats(episode_buffer, self.features) has_video_keys = len(self.meta.video_keys) > 0 use_batched_encoding = self.batch_encoding_size > 1 if has_video_keys and not use_batched_encoding: self.encode_episode_videos(episode_index) # `meta.save_episode` should be executed after encoding the videos self.meta.save_episode(episode_index, episode_length, episode_tasks, ep_stats) # Check if we should trigger batch encoding if has_video_keys and use_batched_encoding: self.episodes_since_last_encoding += 1 if self.episodes_since_last_encoding == self.batch_encoding_size: start_ep = self.num_episodes - self.batch_encoding_size end_ep = self.num_episodes logging.info( f"Batch encoding {self.batch_encoding_size} videos for episodes {start_ep} to {end_ep - 1}" ) self.batch_encode_videos(start_ep, end_ep) self.episodes_since_last_encoding = 0 # Episode data index and timestamp checking ep_data_index = get_episode_data_index(self.meta.episodes, [episode_index]) ep_data_index_np = {k: t.numpy() for k, t in ep_data_index.items()} check_timestamps_sync( episode_buffer["timestamp"], episode_buffer["episode_index"], ep_data_index_np, self.fps, self.tolerance_s, ) # Verify that we have one parquet file per episode and the number of video files matches the number of encoded episodes parquet_files = list(self.root.rglob("*.parquet")) assert len(parquet_files) == self.num_episodes video_files = list(self.root.rglob("*.mp4")) assert len(video_files) == (self.num_episodes - self.episodes_since_last_encoding) * len( self.meta.video_keys ) if not episode_data: # Reset the buffer self.episode_buffer = self.create_episode_buffer() def _save_episode_table(self, episode_buffer: dict, episode_index: int) -> None: episode_dict = {key: episode_buffer[key] for key in self.hf_features} ep_dataset = datasets.Dataset.from_dict(episode_dict, features=self.hf_features, split="train") ep_dataset = embed_images(ep_dataset) self.hf_dataset = concatenate_datasets([self.hf_dataset, ep_dataset]) self.hf_dataset.set_transform(hf_transform_to_torch) ep_data_path = self.root / self.meta.get_data_file_path(ep_index=episode_index) ep_data_path.parent.mkdir(parents=True, exist_ok=True) ep_dataset.to_parquet(ep_data_path) def clear_episode_buffer(self) -> None: episode_index = self.episode_buffer["episode_index"] # Clean up image files for the current episode buffer if self.image_writer is not None: for cam_key in self.meta.camera_keys: img_dir = self._get_image_file_path( episode_index=episode_index, image_key=cam_key, frame_index=0 ).parent if img_dir.is_dir(): shutil.rmtree(img_dir) # Reset the buffer self.episode_buffer = self.create_episode_buffer() def start_image_writer(self, num_processes: int = 0, num_threads: int = 4) -> None: if isinstance(self.image_writer, AsyncImageWriter): logging.warning( "You are starting a new AsyncImageWriter that is replacing an already existing one in the dataset." ) self.image_writer = AsyncImageWriter( num_processes=num_processes, num_threads=num_threads, ) def stop_image_writer(self) -> None: """ Whenever wrapping this dataset inside a parallelized DataLoader, this needs to be called first to remove the image_writer in order for the LeRobotDataset object to be picklable and parallelized. """ if self.image_writer is not None: self.image_writer.stop() self.image_writer = None def _wait_image_writer(self) -> None: """Wait for asynchronous image writer to finish.""" if self.image_writer is not None: self.image_writer.wait_until_done() def encode_episode_videos(self, episode_index: int) -> None: """ Use ffmpeg to convert frames stored as png into mp4 videos. Note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding, since video encoding with ffmpeg is already using multithreading. This method handles video encoding steps: - Video encoding via ffmpeg - Video info updating in metadata - Raw image cleanup Args: episode_index (int): Index of the episode to encode. """ for key in self.meta.video_keys: video_path = self.root / self.meta.get_video_file_path(episode_index, key) if video_path.is_file(): # Skip if video is already encoded. Could be the case when resuming data recording. continue img_dir = self._get_image_file_path( episode_index=episode_index, image_key=key, frame_index=0 ).parent encode_video_frames(img_dir, video_path, self.fps, overwrite=True) shutil.rmtree(img_dir) # Update video info (only needed when first episode is encoded since it reads from episode 0) if len(self.meta.video_keys) > 0 and episode_index == 0: self.meta.update_video_info() write_info(self.meta.info, self.meta.root) # ensure video info always written properly def batch_encode_videos(self, start_episode: int = 0, end_episode: int | None = None) -> None: """ Batch encode videos for multiple episodes. Args: start_episode: Starting episode index (inclusive) end_episode: Ending episode index (exclusive). If None, encodes all episodes from start_episode """ if end_episode is None: end_episode = self.meta.total_episodes logging.info(f"Starting batch video encoding for episodes {start_episode} to {end_episode - 1}") # Encode all episodes with cleanup enabled for individual episodes for ep_idx in range(start_episode, end_episode): logging.info(f"Encoding videos for episode {ep_idx}") self.encode_episode_videos(ep_idx) logging.info("Batch video encoding completed") @classmethod def create( cls, repo_id: str, fps: int, features: dict, root: str | Path | None = None, robot_type: str | None = None, use_videos: bool = True, tolerance_s: float = 1e-4, image_writer_processes: int = 0, image_writer_threads: int = 0, video_backend: str | None = None, batch_encoding_size: int = 1, ) -> "LeRobotDataset": """Create a LeRobot Dataset from scratch in order to record data.""" obj = cls.__new__(cls) obj.meta = LeRobotDatasetMetadata.create( repo_id=repo_id, fps=fps, robot_type=robot_type, features=features, root=root, use_videos=use_videos, ) obj.repo_id = obj.meta.repo_id obj.root = obj.meta.root obj.revision = None obj.tolerance_s = tolerance_s obj.image_writer = None obj.batch_encoding_size = batch_encoding_size obj.episodes_since_last_encoding = 0 if image_writer_processes or image_writer_threads: obj.start_image_writer(image_writer_processes, image_writer_threads) # TODO(aliberts, rcadene, alexander-soare): Merge this with OnlineBuffer/DataBuffer obj.episode_buffer = obj.create_episode_buffer() obj.episodes = None obj.hf_dataset = obj.create_hf_dataset() obj.image_transforms = None obj.delta_timestamps = None obj.delta_indices = None obj.episode_data_index = None obj.video_backend = video_backend if video_backend is not None else get_safe_default_codec() return obj class MultiLeRobotDataset(torch.utils.data.Dataset): """A dataset consisting of multiple underlying `LeRobotDataset`s. The underlying `LeRobotDataset`s are effectively concatenated, and this class adopts much of the API structure of `LeRobotDataset`. """ def __init__( self, repo_ids: list[str], root: str | Path | None = None, episodes: dict | None = None, image_transforms: Callable | None = None, delta_timestamps: dict[list[float]] | None = None, tolerances_s: dict | None = None, download_videos: bool = True, video_backend: str | None = None, ): super().__init__() self.repo_ids = repo_ids self.root = Path(root) if root else HF_LEROBOT_HOME self.tolerances_s = tolerances_s if tolerances_s else dict.fromkeys(repo_ids, 0.0001) # Construct the underlying datasets passing everything but `transform` and `delta_timestamps` which # are handled by this class. self._datasets = [ LeRobotDataset( repo_id, root=self.root / repo_id, episodes=episodes[repo_id] if episodes else None, image_transforms=image_transforms, delta_timestamps=delta_timestamps, tolerance_s=self.tolerances_s[repo_id], download_videos=download_videos, video_backend=video_backend, ) for repo_id in repo_ids ] # Disable any data keys that are not common across all of the datasets. Note: we may relax this # restriction in future iterations of this class. For now, this is necessary at least for being able # to use PyTorch's default DataLoader collate function. self.disabled_features = set() intersection_features = set(self._datasets[0].features) for ds in self._datasets: intersection_features.intersection_update(ds.features) if len(intersection_features) == 0: raise RuntimeError( "Multiple datasets were provided but they had no keys common to all of them. " "The multi-dataset functionality currently only keeps common keys." ) for repo_id, ds in zip(self.repo_ids, self._datasets, strict=True): extra_keys = set(ds.features).difference(intersection_features) logging.warning( f"keys {extra_keys} of {repo_id} were disabled as they are not contained in all the " "other datasets." ) self.disabled_features.update(extra_keys) self.image_transforms = image_transforms self.delta_timestamps = delta_timestamps # TODO(rcadene, aliberts): We should not perform this aggregation for datasets # with multiple robots of different ranges. Instead we should have one normalization # per robot. self.stats = aggregate_stats([dataset.meta.stats for dataset in self._datasets]) @property def repo_id_to_index(self): """Return a mapping from dataset repo_id to a dataset index automatically created by this class. This index is incorporated as a data key in the dictionary returned by `__getitem__`. """ return {repo_id: i for i, repo_id in enumerate(self.repo_ids)} @property def repo_index_to_id(self): """Return the inverse mapping if repo_id_to_index.""" return {v: k for k, v in self.repo_id_to_index} @property def fps(self) -> int: """Frames per second used during data collection. NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info. """ return self._datasets[0].meta.info["fps"] @property def video(self) -> bool: """Returns True if this dataset loads video frames from mp4 files. Returns False if it only loads images from png files. NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info. """ return self._datasets[0].meta.info.get("video", False) @property def features(self) -> datasets.Features: features = {} for dataset in self._datasets: features.update({k: v for k, v in dataset.hf_features.items() if k not in self.disabled_features}) return features @property def camera_keys(self) -> list[str]: """Keys to access image and video stream from cameras.""" keys = [] for key, feats in self.features.items(): if isinstance(feats, (datasets.Image, VideoFrame)): keys.append(key) return keys @property def video_frame_keys(self) -> list[str]: """Keys to access video frames that requires to be decoded into images. Note: It is empty if the dataset contains images only, or equal to `self.cameras` if the dataset contains videos only, or can even be a subset of `self.cameras` in a case of a mixed image/video dataset. """ video_frame_keys = [] for key, feats in self.features.items(): if isinstance(feats, VideoFrame): video_frame_keys.append(key) return video_frame_keys @property def num_frames(self) -> int: """Number of samples/frames.""" return sum(d.num_frames for d in self._datasets) @property def num_episodes(self) -> int: """Number of episodes.""" return sum(d.num_episodes for d in self._datasets) @property def tolerance_s(self) -> float: """Tolerance in seconds used to discard loaded frames when their timestamps are not close enough from the requested frames. It is only used when `delta_timestamps` is provided or when loading video frames from mp4 files. """ # 1e-4 to account for possible numerical error return 1 / self.fps - 1e-4 def __len__(self): return self.num_frames def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: if idx >= len(self): raise IndexError(f"Index {idx} out of bounds.") # Determine which dataset to get an item from based on the index. start_idx = 0 dataset_idx = 0 for dataset in self._datasets: if idx >= start_idx + dataset.num_frames: start_idx += dataset.num_frames dataset_idx += 1 continue break else: raise AssertionError("We expect the loop to break out as long as the index is within bounds.") item = self._datasets[dataset_idx][idx - start_idx] item["dataset_index"] = torch.tensor(dataset_idx) for data_key in self.disabled_features: if data_key in item: del item[data_key] return item def __repr__(self): return ( f"{self.__class__.__name__}(\n" f" Repository IDs: '{self.repo_ids}',\n" f" Number of Samples: {self.num_frames},\n" f" Number of Episodes: {self.num_episodes},\n" f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n" f" Recorded Frames per Second: {self.fps},\n" f" Camera Keys: {self.camera_keys},\n" f" Video Frame Keys: {self.video_frame_keys if self.video else 'N/A'},\n" f" Transformations: {self.image_transforms},\n" f")" )
lerobot/src/lerobot/datasets/lerobot_dataset.py/0
{ "file_path": "lerobot/src/lerobot/datasets/lerobot_dataset.py", "repo_id": "lerobot", "token_count": 22296 }
203
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Any import einops import gymnasium as gym import numpy as np import torch from torch import Tensor from lerobot.configs.types import FeatureType, PolicyFeature from lerobot.envs.configs import EnvConfig from lerobot.utils.utils import get_channel_first_image_shape def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Tensor]: # TODO(aliberts, rcadene): refactor this to use features from the environment (no hardcoding) """Convert environment observation to LeRobot format observation. Args: observation: Dictionary of observation batches from a Gym vector environment. Returns: Dictionary of observation batches with keys renamed to LeRobot format and values as tensors. """ # map to expected inputs for the policy return_observations = {} if "pixels" in observations: if isinstance(observations["pixels"], dict): imgs = {f"observation.images.{key}": img for key, img in observations["pixels"].items()} else: imgs = {"observation.image": observations["pixels"]} for imgkey, img in imgs.items(): # TODO(aliberts, rcadene): use transforms.ToTensor()? img = torch.from_numpy(img) # When preprocessing observations in a non-vectorized environment, we need to add a batch dimension. # This is the case for human-in-the-loop RL where there is only one environment. if img.ndim == 3: img = img.unsqueeze(0) # sanity check that images are channel last _, h, w, c = img.shape assert c < h and c < w, f"expect channel last images, but instead got {img.shape=}" # sanity check that images are uint8 assert img.dtype == torch.uint8, f"expect torch.uint8, but instead {img.dtype=}" # convert to channel first of type float32 in range [0,1] img = einops.rearrange(img, "b h w c -> b c h w").contiguous() img = img.type(torch.float32) img /= 255 return_observations[imgkey] = img if "environment_state" in observations: env_state = torch.from_numpy(observations["environment_state"]).float() if env_state.dim() == 1: env_state = env_state.unsqueeze(0) return_observations["observation.environment_state"] = env_state # TODO(rcadene): enable pixels only baseline with `obs_type="pixels"` in environment by removing agent_pos = torch.from_numpy(observations["agent_pos"]).float() if agent_pos.dim() == 1: agent_pos = agent_pos.unsqueeze(0) return_observations["observation.state"] = agent_pos return return_observations def env_to_policy_features(env_cfg: EnvConfig) -> dict[str, PolicyFeature]: # TODO(aliberts, rcadene): remove this hardcoding of keys and just use the nested keys as is # (need to also refactor preprocess_observation and externalize normalization from policies) policy_features = {} for key, ft in env_cfg.features.items(): if ft.type is FeatureType.VISUAL: if len(ft.shape) != 3: raise ValueError(f"Number of dimensions of {key} != 3 (shape={ft.shape})") shape = get_channel_first_image_shape(ft.shape) feature = PolicyFeature(type=ft.type, shape=shape) else: feature = ft policy_key = env_cfg.features_map[key] policy_features[policy_key] = feature return policy_features def are_all_envs_same_type(env: gym.vector.VectorEnv) -> bool: first_type = type(env.envs[0]) # Get type of first env return all(type(e) is first_type for e in env.envs) # Fast type check def check_env_attributes_and_types(env: gym.vector.VectorEnv) -> None: with warnings.catch_warnings(): warnings.simplefilter("once", UserWarning) # Apply filter only in this function if not (hasattr(env.envs[0], "task_description") and hasattr(env.envs[0], "task")): warnings.warn( "The environment does not have 'task_description' and 'task'. Some policies require these features.", UserWarning, stacklevel=2, ) if not are_all_envs_same_type(env): warnings.warn( "The environments have different types. Make sure you infer the right task from each environment. Empty task will be passed instead.", UserWarning, stacklevel=2, ) def add_envs_task(env: gym.vector.VectorEnv, observation: dict[str, Any]) -> dict[str, Any]: """Adds task feature to the observation dict with respect to the first environment attribute.""" if hasattr(env.envs[0], "task_description"): observation["task"] = env.call("task_description") elif hasattr(env.envs[0], "task"): observation["task"] = env.call("task") else: # For envs without language instructions, e.g. aloha transfer cube and etc. num_envs = observation[list(observation.keys())[0]].shape[0] observation["task"] = ["" for _ in range(num_envs)] return observation
lerobot/src/lerobot/envs/utils.py/0
{ "file_path": "lerobot/src/lerobot/envs/utils.py", "repo_id": "lerobot", "token_count": 2199 }
204
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from dataclasses import asdict, dataclass, field from pathlib import Path from typing import Any import draccus import torch from safetensors.torch import load_file, save_file from lerobot.constants import ( OPTIMIZER_PARAM_GROUPS, OPTIMIZER_STATE, ) from lerobot.datasets.utils import flatten_dict, unflatten_dict, write_json from lerobot.utils.io_utils import deserialize_json_into_object @dataclass class OptimizerConfig(draccus.ChoiceRegistry, abc.ABC): lr: float weight_decay: float grad_clip_norm: float @property def type(self) -> str: return self.get_choice_name(self.__class__) @classmethod def default_choice_name(cls) -> str | None: return "adam" @abc.abstractmethod def build(self) -> torch.optim.Optimizer | dict[str, torch.optim.Optimizer]: """ Build the optimizer. It can be a single optimizer or a dictionary of optimizers. NOTE: Multiple optimizers are useful when you have different models to optimize. For example, you can have one optimizer for the policy and another one for the value function in reinforcement learning settings. Returns: The optimizer or a dictionary of optimizers. """ raise NotImplementedError @OptimizerConfig.register_subclass("adam") @dataclass class AdamConfig(OptimizerConfig): lr: float = 1e-3 betas: tuple[float, float] = (0.9, 0.999) eps: float = 1e-8 weight_decay: float = 0.0 grad_clip_norm: float = 10.0 def build(self, params: dict) -> torch.optim.Optimizer: kwargs = asdict(self) kwargs.pop("grad_clip_norm") return torch.optim.Adam(params, **kwargs) @OptimizerConfig.register_subclass("adamw") @dataclass class AdamWConfig(OptimizerConfig): lr: float = 1e-3 betas: tuple[float, float] = (0.9, 0.999) eps: float = 1e-8 weight_decay: float = 1e-2 grad_clip_norm: float = 10.0 def build(self, params: dict) -> torch.optim.Optimizer: kwargs = asdict(self) kwargs.pop("grad_clip_norm") return torch.optim.AdamW(params, **kwargs) @OptimizerConfig.register_subclass("sgd") @dataclass class SGDConfig(OptimizerConfig): lr: float = 1e-3 momentum: float = 0.0 dampening: float = 0.0 nesterov: bool = False weight_decay: float = 0.0 grad_clip_norm: float = 10.0 def build(self, params: dict) -> torch.optim.Optimizer: kwargs = asdict(self) kwargs.pop("grad_clip_norm") return torch.optim.SGD(params, **kwargs) @OptimizerConfig.register_subclass("multi_adam") @dataclass class MultiAdamConfig(OptimizerConfig): """Configuration for multiple Adam optimizers with different parameter groups. This creates a dictionary of Adam optimizers, each with its own hyperparameters. Args: lr: Default learning rate (used if not specified for a group) weight_decay: Default weight decay (used if not specified for a group) optimizer_groups: Dictionary mapping parameter group names to their hyperparameters grad_clip_norm: Gradient clipping norm """ lr: float = 1e-3 weight_decay: float = 0.0 grad_clip_norm: float = 10.0 optimizer_groups: dict[str, dict[str, Any]] = field(default_factory=dict) def build(self, params_dict: dict[str, list]) -> dict[str, torch.optim.Optimizer]: """Build multiple Adam optimizers. Args: params_dict: Dictionary mapping parameter group names to lists of parameters The keys should match the keys in optimizer_groups Returns: Dictionary mapping parameter group names to their optimizers """ optimizers = {} for name, params in params_dict.items(): # Get group-specific hyperparameters or use defaults group_config = self.optimizer_groups.get(name, {}) # Create optimizer with merged parameters (defaults + group-specific) optimizer_kwargs = { "lr": group_config.get("lr", self.lr), "betas": group_config.get("betas", (0.9, 0.999)), "eps": group_config.get("eps", 1e-5), "weight_decay": group_config.get("weight_decay", self.weight_decay), } optimizers[name] = torch.optim.Adam(params, **optimizer_kwargs) return optimizers def save_optimizer_state( optimizer: torch.optim.Optimizer | dict[str, torch.optim.Optimizer], save_dir: Path ) -> None: """Save optimizer state to disk. Args: optimizer: Either a single optimizer or a dictionary of optimizers. save_dir: Directory to save the optimizer state. """ if isinstance(optimizer, dict): # Handle dictionary of optimizers for name, opt in optimizer.items(): optimizer_dir = save_dir / name optimizer_dir.mkdir(exist_ok=True, parents=True) _save_single_optimizer_state(opt, optimizer_dir) else: # Handle single optimizer _save_single_optimizer_state(optimizer, save_dir) def _save_single_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> None: """Save a single optimizer's state to disk.""" state = optimizer.state_dict() param_groups = state.pop("param_groups") flat_state = flatten_dict(state) save_file(flat_state, save_dir / OPTIMIZER_STATE) write_json(param_groups, save_dir / OPTIMIZER_PARAM_GROUPS) def load_optimizer_state( optimizer: torch.optim.Optimizer | dict[str, torch.optim.Optimizer], save_dir: Path ) -> torch.optim.Optimizer | dict[str, torch.optim.Optimizer]: """Load optimizer state from disk. Args: optimizer: Either a single optimizer or a dictionary of optimizers. save_dir: Directory to load the optimizer state from. Returns: The updated optimizer(s) with loaded state. """ if isinstance(optimizer, dict): # Handle dictionary of optimizers loaded_optimizers = {} for name, opt in optimizer.items(): optimizer_dir = save_dir / name if optimizer_dir.exists(): loaded_optimizers[name] = _load_single_optimizer_state(opt, optimizer_dir) else: loaded_optimizers[name] = opt return loaded_optimizers else: # Handle single optimizer return _load_single_optimizer_state(optimizer, save_dir) def _load_single_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> torch.optim.Optimizer: """Load a single optimizer's state from disk.""" current_state_dict = optimizer.state_dict() flat_state = load_file(save_dir / OPTIMIZER_STATE) state = unflatten_dict(flat_state) # Handle case where 'state' key might not exist (for newly created optimizers) if "state" in state: loaded_state_dict = {"state": {int(k): v for k, v in state["state"].items()}} else: loaded_state_dict = {"state": {}} if "param_groups" in current_state_dict: param_groups = deserialize_json_into_object( save_dir / OPTIMIZER_PARAM_GROUPS, current_state_dict["param_groups"] ) loaded_state_dict["param_groups"] = param_groups optimizer.load_state_dict(loaded_state_dict) return optimizer
lerobot/src/lerobot/optim/optimizers.py/0
{ "file_path": "lerobot/src/lerobot/optim/optimizers.py", "repo_id": "lerobot", "token_count": 3104 }
205
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn.functional as F # noqa: N812 from packaging.version import Version if Version(torch.__version__) > Version("2.5.0"): # Ffex attention is only available from torch 2.5 onwards from torch.nn.attention.flex_attention import ( _mask_mod_signature, _round_up_to_multiple, create_block_mask, create_mask, flex_attention, ) # @torch.compile(dynamic=False) def flex_attention_forward( attention_mask: torch.Tensor, batch_size: int, head_dim: int, query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, scaling=None, ): """ This is defined out of classes to make compile happy. """ original_dtype = query_states.dtype num_att_heads = 8 num_key_value_heads = 1 num_key_value_groups = num_att_heads // num_key_value_heads key_states = key_states[:, :, :, None, :] key_states = key_states.expand( batch_size, key_states.shape[1], num_key_value_heads, num_key_value_groups, head_dim ) key_states = key_states.reshape( batch_size, key_states.shape[1], num_key_value_heads * num_key_value_groups, head_dim ) value_states = value_states[:, :, :, None, :] value_states = value_states.expand( batch_size, value_states.shape[1], num_key_value_heads, num_key_value_groups, head_dim ) value_states = value_states.reshape( batch_size, value_states.shape[1], num_key_value_heads * num_key_value_groups, head_dim ) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) query_states = query_states.to(torch.float32) key_states = key_states.to(torch.float32) value_states = value_states.to(torch.float32) causal_mask = attention_mask if causal_mask is not None: causal_mask = causal_mask[:, None, :, : key_states.shape[2]] if causal_mask.shape[1] == 1 and query_states.shape[1] > 1: causal_mask = causal_mask.expand(-1, query_states.shape[1], -1, -1) def precomputed_mask_factory(precomputed_mask: torch.Tensor) -> _mask_mod_signature: def mask_mod(b, h, q_idx, kv_idx): # Danger zone: if b,h,q_idx,kv_idx exceed the shape, device-side assert occurs. return precomputed_mask[b][h][q_idx][kv_idx] return mask_mod b_mask, h_mask, q_len, kv_len = causal_mask.shape # The shape of your mask block_size = 128 q_len_rounded = _round_up_to_multiple(q_len, block_size) kv_len_rounded = _round_up_to_multiple(kv_len, block_size) # *CRITICAL* we do need to expand here, else we get a CUDA index error pad_q = q_len_rounded - q_len pad_k = kv_len_rounded - kv_len padded_causal_mask = F.pad(causal_mask, (0, pad_k, 0, pad_q), value=0.0) mask_mod_fn_orig = precomputed_mask_factory(padded_causal_mask) mask_4d = create_mask( mod_fn=mask_mod_fn_orig, B=b_mask, H=h_mask, Q_LEN=q_len_rounded, KV_LEN=kv_len_rounded, device=causal_mask.device, _compile=False, ) mask_mod_fn_padded = precomputed_mask_factory(mask_4d) block_mask = create_block_mask( mask_mod=mask_mod_fn_padded, B=b_mask, H=h_mask, Q_LEN=q_len_rounded, KV_LEN=kv_len_rounded, BLOCK_SIZE=block_size, device=causal_mask.device, _compile=False, ) # mask is applied inside the kernel, ideally more efficiently than score_mod. attn_output, attention_weights = flex_attention( query_states, key_states, value_states, block_mask=block_mask, enable_gqa=True, # because we shaped query/key states for GQA scale=head_dim**-0.5 if scaling is None else scaling, return_lse=True, ) attn_output = attn_output.to(dtype=original_dtype) attn_output = attn_output.transpose(1, 2).contiguous() # [B, Q_LEN, H, head_dim] attn_output = attn_output.reshape( batch_size, -1, attn_output.shape[2] * attn_output.shape[3], # merges [H, head_dim] ) return attn_output
lerobot/src/lerobot/policies/pi0/flex_attention.py/0
{ "file_path": "lerobot/src/lerobot/policies/pi0/flex_attention.py", "repo_id": "lerobot", "token_count": 2050 }
206
#!/usr/bin/env python # Copyright 2024 Nicklas Hansen, Xiaolong Wang, Hao Su, # and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of Finetuning Offline World Models in the Real World. The comments in this code may sometimes refer to these references: TD-MPC paper: Temporal Difference Learning for Model Predictive Control (https://huggingface.co/papers/2203.04955) FOWM paper: Finetuning Offline World Models in the Real World (https://huggingface.co/papers/2310.16029) """ # ruff: noqa: N806 from collections import deque from collections.abc import Callable from copy import deepcopy from functools import partial import einops import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # noqa: N812 from torch import Tensor from lerobot.constants import ACTION, OBS_ENV_STATE, OBS_IMAGE, OBS_STATE, REWARD from lerobot.policies.normalize import Normalize, Unnormalize from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.policies.utils import get_device_from_parameters, get_output_shape, populate_queues class TDMPCPolicy(PreTrainedPolicy): """Implementation of TD-MPC learning + inference. Please note several warnings for this policy. - Evaluation of pretrained weights created with the original FOWM code (https://github.com/fyhMer/fowm) works as expected. To be precise: we trained and evaluated a model with the FOWM code for the xarm_lift_medium_replay dataset. We ported the weights across to LeRobot, and were able to evaluate with the same success metric. BUT, we had to use inter- process communication to use the xarm environment from FOWM. This is because our xarm environment uses newer dependencies and does not match the environment in FOWM. See https://github.com/huggingface/lerobot/pull/103 for implementation details. - We have NOT checked that training on LeRobot reproduces the results from FOWM. - Nevertheless, we have verified that we can train TD-MPC for PushT. See `lerobot/configs/policy/tdmpc_pusht_keypoints.yaml`. - Our current xarm datasets were generated using the environment from FOWM. Therefore they do not match our xarm environment. """ config_class = TDMPCConfig name = "tdmpc" def __init__(self, config: TDMPCConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None): """ Args: config: Policy configuration class instance or None, in which case the default instantiation of the configuration class is used. dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__(config) config.validate_features() self.config = config self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) self.normalize_targets = Normalize( config.output_features, config.normalization_mapping, dataset_stats ) self.unnormalize_outputs = Unnormalize( config.output_features, config.normalization_mapping, dataset_stats ) self.model = TDMPCTOLD(config) self.model_target = deepcopy(self.model) for param in self.model_target.parameters(): param.requires_grad = False self.reset() def get_optim_params(self) -> dict: return self.parameters() def reset(self): """ Clear observation and action queues. Clear previous means for warm starting of MPPI/CEM. Should be called on `env.reset()` """ self._queues = { "observation.state": deque(maxlen=1), "action": deque(maxlen=max(self.config.n_action_steps, self.config.n_action_repeats)), } if self.config.image_features: self._queues["observation.image"] = deque(maxlen=1) if self.config.env_state_feature: self._queues["observation.environment_state"] = deque(maxlen=1) # Previous mean obtained from the cross-entropy method (CEM) used during MPC. It is used to warm start # CEM for the next step. self._prev_mean: torch.Tensor | None = None @torch.no_grad() def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: """Predict a chunk of actions given environment observations.""" batch = {key: torch.stack(list(self._queues[key]), dim=1) for key in batch if key in self._queues} # Remove the time dimensions as it is not handled yet. for key in batch: assert batch[key].shape[1] == 1 batch[key] = batch[key][:, 0] # NOTE: Order of observations matters here. encode_keys = [] if self.config.image_features: encode_keys.append(OBS_IMAGE) if self.config.env_state_feature: encode_keys.append(OBS_ENV_STATE) encode_keys.append(OBS_STATE) z = self.model.encode({k: batch[k] for k in encode_keys}) if self.config.use_mpc: # noqa: SIM108 actions = self.plan(z) # (horizon, batch, action_dim) else: # Plan with the policy (π) alone. This always returns one action so unsqueeze to get a # sequence dimension like in the MPC branch. actions = self.model.pi(z).unsqueeze(0) actions = torch.clamp(actions, -1, +1) actions = self.unnormalize_outputs({ACTION: actions})[ACTION] return actions @torch.no_grad() def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select a single action given environment observations.""" # NOTE: for offline evaluation, we have action in the batch, so we need to pop it out if ACTION in batch: batch.pop(ACTION) batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch[OBS_IMAGE] = batch[next(iter(self.config.image_features))] self._queues = populate_queues(self._queues, batch) # When the action queue is depleted, populate it again by querying the policy. if len(self._queues[ACTION]) == 0: actions = self.predict_action_chunk(batch) if self.config.n_action_repeats > 1: for _ in range(self.config.n_action_repeats): self._queues[ACTION].append(actions[0]) else: # Action queue is (n_action_steps, batch_size, action_dim), so we transpose the action. self._queues[ACTION].extend(actions[: self.config.n_action_steps]) action = self._queues[ACTION].popleft() return action @torch.no_grad() def plan(self, z: Tensor) -> Tensor: """Plan sequence of actions using TD-MPC inference. Args: z: (batch, latent_dim,) tensor for the initial state. Returns: (horizon, batch, action_dim,) tensor for the planned trajectory of actions. """ device = get_device_from_parameters(self) batch_size = z.shape[0] # Sample Nπ trajectories from the policy. pi_actions = torch.empty( self.config.horizon, self.config.n_pi_samples, batch_size, self.config.action_feature.shape[0], device=device, ) if self.config.n_pi_samples > 0: _z = einops.repeat(z, "b d -> n b d", n=self.config.n_pi_samples) for t in range(self.config.horizon): # Note: Adding a small amount of noise here doesn't hurt during inference and may even be # helpful for CEM. pi_actions[t] = self.model.pi(_z, self.config.min_std) _z = self.model.latent_dynamics(_z, pi_actions[t]) # In the CEM loop we will need this for a call to estimate_value with the gaussian sampled # trajectories. z = einops.repeat(z, "b d -> n b d", n=self.config.n_gaussian_samples + self.config.n_pi_samples) # Model Predictive Path Integral (MPPI) with the cross-entropy method (CEM) as the optimization # algorithm. # The initial mean and standard deviation for the cross-entropy method (CEM). mean = torch.zeros( self.config.horizon, batch_size, self.config.action_feature.shape[0], device=device ) # Maybe warm start CEM with the mean from the previous step. if self._prev_mean is not None: mean[:-1] = self._prev_mean[1:] std = self.config.max_std * torch.ones_like(mean) for _ in range(self.config.cem_iterations): # Randomly sample action trajectories for the gaussian distribution. std_normal_noise = torch.randn( self.config.horizon, self.config.n_gaussian_samples, batch_size, self.config.action_feature.shape[0], device=std.device, ) gaussian_actions = torch.clamp(mean.unsqueeze(1) + std.unsqueeze(1) * std_normal_noise, -1, 1) # Compute elite actions. actions = torch.cat([gaussian_actions, pi_actions], dim=1) value = self.estimate_value(z, actions).nan_to_num_(0) elite_idxs = torch.topk(value, self.config.n_elites, dim=0).indices # (n_elites, batch) elite_value = value.take_along_dim(elite_idxs, dim=0) # (n_elites, batch) # (horizon, n_elites, batch, action_dim) elite_actions = actions.take_along_dim(einops.rearrange(elite_idxs, "n b -> 1 n b 1"), dim=1) # Update gaussian PDF parameters to be the (weighted) mean and standard deviation of the elites. max_value = elite_value.max(0, keepdim=True)[0] # (1, batch) # The weighting is a softmax over trajectory values. Note that this is not the same as the usage # of Ω in eqn 4 of the TD-MPC paper. Instead it is the normalized version of it: s = Ω/ΣΩ. This # makes the equations: μ = Σ(s⋅Γ), σ = Σ(s⋅(Γ-μ)²). score = torch.exp(self.config.elite_weighting_temperature * (elite_value - max_value)) score /= score.sum(axis=0, keepdim=True) # (horizon, batch, action_dim) _mean = torch.sum(einops.rearrange(score, "n b -> n b 1") * elite_actions, dim=1) _std = torch.sqrt( torch.sum( einops.rearrange(score, "n b -> n b 1") * (elite_actions - einops.rearrange(_mean, "h b d -> h 1 b d")) ** 2, dim=1, ) ) # Update mean with an exponential moving average, and std with a direct replacement. mean = ( self.config.gaussian_mean_momentum * mean + (1 - self.config.gaussian_mean_momentum) * _mean ) std = _std.clamp_(self.config.min_std, self.config.max_std) # Keep track of the mean for warm-starting subsequent steps. self._prev_mean = mean # Randomly select one of the elite actions from the last iteration of MPPI/CEM using the softmax # scores from the last iteration. actions = elite_actions[:, torch.multinomial(score.T, 1).squeeze(), torch.arange(batch_size)] return actions @torch.no_grad() def estimate_value(self, z: Tensor, actions: Tensor): """Estimates the value of a trajectory as per eqn 4 of the FOWM paper. Args: z: (batch, latent_dim) tensor of initial latent states. actions: (horizon, batch, action_dim) tensor of action trajectories. Returns: (batch,) tensor of values. """ # Initialize return and running discount factor. G, running_discount = 0, 1 # Iterate over the actions in the trajectory to simulate the trajectory using the latent dynamics # model. Keep track of return. for t in range(actions.shape[0]): # We will compute the reward in a moment. First compute the uncertainty regularizer from eqn 4 # of the FOWM paper. if self.config.uncertainty_regularizer_coeff > 0: regularization = -( self.config.uncertainty_regularizer_coeff * self.model.Qs(z, actions[t]).std(0) ) else: regularization = 0 # Estimate the next state (latent) and reward. z, reward = self.model.latent_dynamics_and_reward(z, actions[t]) # Update the return and running discount. G += running_discount * (reward + regularization) running_discount *= self.config.discount # Add the estimated value of the final state (using the minimum for a conservative estimate). # Do so by predicting the next action, then taking a minimum over the ensemble of state-action value # estimators. # Note: This small amount of added noise seems to help a bit at inference time as observed by success # metrics over 50 episodes of xarm_lift_medium_replay. next_action = self.model.pi(z, self.config.min_std) # (batch, action_dim) terminal_values = self.model.Qs(z, next_action) # (ensemble, batch) # Randomly choose 2 of the Qs for terminal value estimation (as in App C. of the FOWM paper). if self.config.q_ensemble_size > 2: G += ( running_discount * torch.min(terminal_values[torch.randint(0, self.config.q_ensemble_size, size=(2,))], dim=0)[ 0 ] ) else: G += running_discount * torch.min(terminal_values, dim=0)[0] # Finally, also regularize the terminal value. if self.config.uncertainty_regularizer_coeff > 0: G -= running_discount * self.config.uncertainty_regularizer_coeff * terminal_values.std(0) return G def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: """Run the batch through the model and compute the loss. Returns a dictionary with loss as a tensor, and other information as native floats. """ device = get_device_from_parameters(self) batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch[OBS_IMAGE] = batch[next(iter(self.config.image_features))] batch = self.normalize_targets(batch) info = {} # (b, t) -> (t, b) for key in batch: if isinstance(batch[key], torch.Tensor) and batch[key].ndim > 1: batch[key] = batch[key].transpose(1, 0) action = batch[ACTION] # (t, b, action_dim) reward = batch[REWARD] # (t, b) observations = {k: v for k, v in batch.items() if k.startswith("observation.")} # Apply random image augmentations. if self.config.image_features and self.config.max_random_shift_ratio > 0: observations[OBS_IMAGE] = flatten_forward_unflatten( partial(random_shifts_aug, max_random_shift_ratio=self.config.max_random_shift_ratio), observations[OBS_IMAGE], ) # Get the current observation for predicting trajectories, and all future observations for use in # the latent consistency loss and TD loss. current_observation, next_observations = {}, {} for k in observations: current_observation[k] = observations[k][0] next_observations[k] = observations[k][1:] horizon, batch_size = next_observations[ OBS_IMAGE if self.config.image_features else OBS_ENV_STATE ].shape[:2] # Run latent rollout using the latent dynamics model and policy model. # Note this has shape `horizon+1` because there are `horizon` actions and a current `z`. Each action # gives us a next `z`. batch_size = batch["index"].shape[0] z_preds = torch.empty(horizon + 1, batch_size, self.config.latent_dim, device=device) z_preds[0] = self.model.encode(current_observation) reward_preds = torch.empty_like(reward, device=device) for t in range(horizon): z_preds[t + 1], reward_preds[t] = self.model.latent_dynamics_and_reward(z_preds[t], action[t]) # Compute Q and V value predictions based on the latent rollout. q_preds_ensemble = self.model.Qs(z_preds[:-1], action) # (ensemble, horizon, batch) v_preds = self.model.V(z_preds[:-1]) info.update({"Q": q_preds_ensemble.mean().item(), "V": v_preds.mean().item()}) # Compute various targets with stopgrad. with torch.no_grad(): # Latent state consistency targets. z_targets = self.model_target.encode(next_observations) # State-action value targets (or TD targets) as in eqn 3 of the FOWM. Unlike TD-MPC which uses the # learned state-action value function in conjunction with the learned policy: Q(z, π(z)), FOWM # uses a learned state value function: V(z). This means the TD targets only depend on in-sample # actions (not actions estimated by π). # Note: Here we do not use self.model_target, but self.model. This is to follow the original code # and the FOWM paper. q_targets = reward + self.config.discount * self.model.V(self.model.encode(next_observations)) # From eqn 3 of FOWM. These appear as Q(z, a). Here we call them v_targets to emphasize that we # are using them to compute loss for V. v_targets = self.model_target.Qs(z_preds[:-1].detach(), action, return_min=True) # Compute losses. # Exponentially decay the loss weight with respect to the timestep. Steps that are more distant in the # future have less impact on the loss. Note: unsqueeze will let us broadcast to (seq, batch). temporal_loss_coeffs = torch.pow( self.config.temporal_decay_coeff, torch.arange(horizon, device=device) ).unsqueeze(-1) # Compute consistency loss as MSE loss between latents predicted from the rollout and latents # predicted from the (target model's) observation encoder. consistency_loss = ( ( temporal_loss_coeffs * F.mse_loss(z_preds[1:], z_targets, reduction="none").mean(dim=-1) # `z_preds` depends on the current observation and the actions. * ~batch["observation.state_is_pad"][0] * ~batch["action_is_pad"] # `z_targets` depends on the next observation. * ~batch["observation.state_is_pad"][1:] ) .sum(0) .mean() ) # Compute the reward loss as MSE loss between rewards predicted from the rollout and the dataset # rewards. reward_loss = ( ( temporal_loss_coeffs * F.mse_loss(reward_preds, reward, reduction="none") * ~batch["next.reward_is_pad"] # `reward_preds` depends on the current observation and the actions. * ~batch["observation.state_is_pad"][0] * ~batch["action_is_pad"] ) .sum(0) .mean() ) # Compute state-action value loss (TD loss) for all of the Q functions in the ensemble. q_value_loss = ( ( temporal_loss_coeffs * F.mse_loss( q_preds_ensemble, einops.repeat(q_targets, "t b -> e t b", e=q_preds_ensemble.shape[0]), reduction="none", ).sum(0) # sum over ensemble # `q_preds_ensemble` depends on the first observation and the actions. * ~batch["observation.state_is_pad"][0] * ~batch["action_is_pad"] # q_targets depends on the reward and the next observations. * ~batch["next.reward_is_pad"] * ~batch["observation.state_is_pad"][1:] ) .sum(0) .mean() ) # Compute state value loss as in eqn 3 of FOWM. diff = v_targets - v_preds # Expectile loss penalizes: # - `v_preds < v_targets` with weighting `expectile_weight` # - `v_preds >= v_targets` with weighting `1 - expectile_weight` raw_v_value_loss = torch.where( diff > 0, self.config.expectile_weight, (1 - self.config.expectile_weight) ) * (diff**2) v_value_loss = ( ( temporal_loss_coeffs * raw_v_value_loss # `v_targets` depends on the first observation and the actions, as does `v_preds`. * ~batch["observation.state_is_pad"][0] * ~batch["action_is_pad"] ) .sum(0) .mean() ) # Calculate the advantage weighted regression loss for π as detailed in FOWM 3.1. # We won't need these gradients again so detach. z_preds = z_preds.detach() # Use stopgrad for the advantage calculation. with torch.no_grad(): advantage = self.model_target.Qs(z_preds[:-1], action, return_min=True) - self.model.V( z_preds[:-1] ) info["advantage"] = advantage[0] # (t, b) exp_advantage = torch.clamp(torch.exp(advantage * self.config.advantage_scaling), max=100.0) action_preds = self.model.pi(z_preds[:-1]) # (t, b, a) # Calculate the MSE between the actions and the action predictions. # Note: FOWM's original code calculates the log probability (wrt to a unit standard deviation # gaussian) and sums over the action dimension. Computing the (negative) log probability amounts to # multiplying the MSE by 0.5 and adding a constant offset (the log(2*pi)/2 term, times the action # dimension). Here we drop the constant offset as it doesn't change the optimization step, and we drop # the 0.5 as we instead make a configuration parameter for it (see below where we compute the total # loss). mse = F.mse_loss(action_preds, action, reduction="none").sum(-1) # (t, b) # NOTE: The original implementation does not take the sum over the temporal dimension like with the # other losses. # TODO(alexander-soare): Take the sum over the temporal dimension and check that training still works # as well as expected. pi_loss = ( exp_advantage * mse * temporal_loss_coeffs # `action_preds` depends on the first observation and the actions. * ~batch["observation.state_is_pad"][0] * ~batch["action_is_pad"] ).mean() loss = ( self.config.consistency_coeff * consistency_loss + self.config.reward_coeff * reward_loss + self.config.value_coeff * q_value_loss + self.config.value_coeff * v_value_loss + self.config.pi_coeff * pi_loss ) info.update( { "consistency_loss": consistency_loss.item(), "reward_loss": reward_loss.item(), "Q_value_loss": q_value_loss.item(), "V_value_loss": v_value_loss.item(), "pi_loss": pi_loss.item(), "sum_loss": loss.item() * self.config.horizon, } ) # Undo (b, t) -> (t, b). for key in batch: if isinstance(batch[key], torch.Tensor) and batch[key].ndim > 1: batch[key] = batch[key].transpose(1, 0) return loss, info def update(self): """Update the target model's parameters with an EMA step.""" # Note a minor variation with respect to the original FOWM code. Here they do this based on an EMA # update frequency parameter which is set to 2 (every 2 steps an update is done). To simplify the code # we update every step and adjust the decay parameter `alpha` accordingly (0.99 -> 0.995) update_ema_parameters(self.model_target, self.model, self.config.target_model_momentum) class TDMPCTOLD(nn.Module): """Task-Oriented Latent Dynamics (TOLD) model used in TD-MPC.""" def __init__(self, config: TDMPCConfig): super().__init__() self.config = config self._encoder = TDMPCObservationEncoder(config) self._dynamics = nn.Sequential( nn.Linear(config.latent_dim + config.action_feature.shape[0], config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.latent_dim), nn.LayerNorm(config.latent_dim), nn.Sigmoid(), ) self._reward = nn.Sequential( nn.Linear(config.latent_dim + config.action_feature.shape[0], config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, 1), ) self._pi = nn.Sequential( nn.Linear(config.latent_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Mish(), nn.Linear(config.mlp_dim, config.action_feature.shape[0]), ) self._Qs = nn.ModuleList( [ nn.Sequential( nn.Linear(config.latent_dim + config.action_feature.shape[0], config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Tanh(), nn.Linear(config.mlp_dim, config.mlp_dim), nn.ELU(), nn.Linear(config.mlp_dim, 1), ) for _ in range(config.q_ensemble_size) ] ) self._V = nn.Sequential( nn.Linear(config.latent_dim, config.mlp_dim), nn.LayerNorm(config.mlp_dim), nn.Tanh(), nn.Linear(config.mlp_dim, config.mlp_dim), nn.ELU(), nn.Linear(config.mlp_dim, 1), ) self._init_weights() def _init_weights(self): """Initialize model weights. Orthogonal initialization for all linear and convolutional layers' weights (apart from final layers of reward network and Q networks which get zero initialization). Zero initialization for all linear and convolutional layers' biases. """ def _apply_fn(m): if isinstance(m, nn.Linear): nn.init.orthogonal_(m.weight.data) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): gain = nn.init.calculate_gain("relu") nn.init.orthogonal_(m.weight.data, gain) if m.bias is not None: nn.init.zeros_(m.bias) self.apply(_apply_fn) for m in [self._reward, *self._Qs]: assert isinstance(m[-1], nn.Linear), ( "Sanity check. The last linear layer needs 0 initialization on weights." ) nn.init.zeros_(m[-1].weight) nn.init.zeros_(m[-1].bias) # this has already been done, but keep this line here for good measure def encode(self, obs: dict[str, Tensor]) -> Tensor: """Encodes an observation into its latent representation.""" return self._encoder(obs) def latent_dynamics_and_reward(self, z: Tensor, a: Tensor) -> tuple[Tensor, Tensor]: """Predict the next state's latent representation and the reward given a current latent and action. Args: z: (*, latent_dim) tensor for the current state's latent representation. a: (*, action_dim) tensor for the action to be applied. Returns: A tuple containing: - (*, latent_dim) tensor for the next state's latent representation. - (*,) tensor for the estimated reward. """ x = torch.cat([z, a], dim=-1) return self._dynamics(x), self._reward(x).squeeze(-1) def latent_dynamics(self, z: Tensor, a: Tensor) -> Tensor: """Predict the next state's latent representation given a current latent and action. Args: z: (*, latent_dim) tensor for the current state's latent representation. a: (*, action_dim) tensor for the action to be applied. Returns: (*, latent_dim) tensor for the next state's latent representation. """ x = torch.cat([z, a], dim=-1) return self._dynamics(x) def pi(self, z: Tensor, std: float = 0.0) -> Tensor: """Samples an action from the learned policy. The policy can also have added (truncated) Gaussian noise injected for encouraging exploration when generating rollouts for online training. Args: z: (*, latent_dim) tensor for the current state's latent representation. std: The standard deviation of the injected noise. Returns: (*, action_dim) tensor for the sampled action. """ action = torch.tanh(self._pi(z)) if std > 0: std = torch.ones_like(action) * std action += torch.randn_like(action) * std return action def V(self, z: Tensor) -> Tensor: # noqa: N802 """Predict state value (V). Args: z: (*, latent_dim) tensor for the current state's latent representation. Returns: (*,) tensor of estimated state values. """ return self._V(z).squeeze(-1) def Qs(self, z: Tensor, a: Tensor, return_min: bool = False) -> Tensor: # noqa: N802 """Predict state-action value for all of the learned Q functions. Args: z: (*, latent_dim) tensor for the current state's latent representation. a: (*, action_dim) tensor for the action to be applied. return_min: Set to true for implementing the detail in App. C of the FOWM paper: randomly select 2 of the Qs and return the minimum Returns: (q_ensemble, *) tensor for the value predictions of each learned Q function in the ensemble OR (*,) tensor if return_min=True. """ x = torch.cat([z, a], dim=-1) if not return_min: return torch.stack([q(x).squeeze(-1) for q in self._Qs], dim=0) else: if len(self._Qs) > 2: # noqa: SIM108 Qs = [self._Qs[i] for i in np.random.choice(len(self._Qs), size=2)] else: Qs = self._Qs return torch.stack([q(x).squeeze(-1) for q in Qs], dim=0).min(dim=0)[0] class TDMPCObservationEncoder(nn.Module): """Encode image and/or state vector observations.""" def __init__(self, config: TDMPCConfig): """ Creates encoders for pixel and/or state modalities. TODO(alexander-soare): The original work allows for multiple images by concatenating them along the channel dimension. Re-implement this capability. """ super().__init__() self.config = config if config.image_features: self.image_enc_layers = nn.Sequential( nn.Conv2d( next(iter(config.image_features.values())).shape[0], config.image_encoder_hidden_dim, 7, stride=2, ), nn.ReLU(), nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 5, stride=2), nn.ReLU(), nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 3, stride=2), nn.ReLU(), nn.Conv2d(config.image_encoder_hidden_dim, config.image_encoder_hidden_dim, 3, stride=2), nn.ReLU(), ) dummy_shape = (1, *next(iter(config.image_features.values())).shape) out_shape = get_output_shape(self.image_enc_layers, dummy_shape)[1:] self.image_enc_layers.extend( nn.Sequential( nn.Flatten(), nn.Linear(np.prod(out_shape), config.latent_dim), nn.LayerNorm(config.latent_dim), nn.Sigmoid(), ) ) if config.robot_state_feature: self.state_enc_layers = nn.Sequential( nn.Linear(config.robot_state_feature.shape[0], config.state_encoder_hidden_dim), nn.ELU(), nn.Linear(config.state_encoder_hidden_dim, config.latent_dim), nn.LayerNorm(config.latent_dim), nn.Sigmoid(), ) if config.env_state_feature: self.env_state_enc_layers = nn.Sequential( nn.Linear(config.env_state_feature.shape[0], config.state_encoder_hidden_dim), nn.ELU(), nn.Linear(config.state_encoder_hidden_dim, config.latent_dim), nn.LayerNorm(config.latent_dim), nn.Sigmoid(), ) def forward(self, obs_dict: dict[str, Tensor]) -> Tensor: """Encode the image and/or state vector. Each modality is encoded into a feature vector of size (latent_dim,) and then a uniform mean is taken over all features. """ feat = [] # NOTE: Order of observations matters here. if self.config.image_features: feat.append( flatten_forward_unflatten( self.image_enc_layers, obs_dict[next(iter(self.config.image_features))] ) ) if self.config.env_state_feature: feat.append(self.env_state_enc_layers(obs_dict[OBS_ENV_STATE])) if self.config.robot_state_feature: feat.append(self.state_enc_layers(obs_dict[OBS_STATE])) return torch.stack(feat, dim=0).mean(0) def random_shifts_aug(x: Tensor, max_random_shift_ratio: float) -> Tensor: """Randomly shifts images horizontally and vertically. Adapted from https://github.com/facebookresearch/drqv2 """ b, _, h, w = x.size() assert h == w, "non-square images not handled yet" pad = int(round(max_random_shift_ratio * h)) x = F.pad(x, tuple([pad] * 4), "replicate") eps = 1.0 / (h + 2 * pad) arange = torch.linspace( -1.0 + eps, 1.0 - eps, h + 2 * pad, device=x.device, dtype=torch.float32, )[:h] arange = einops.repeat(arange, "w -> h w 1", h=h) base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2) base_grid = einops.repeat(base_grid, "h w c -> b h w c", b=b) # A random shift in units of pixels and within the boundaries of the padding. shift = torch.randint( 0, 2 * pad + 1, size=(b, 1, 1, 2), device=x.device, dtype=torch.float32, ) shift *= 2.0 / (h + 2 * pad) grid = base_grid + shift return F.grid_sample(x, grid, padding_mode="zeros", align_corners=False) def update_ema_parameters(ema_net: nn.Module, net: nn.Module, alpha: float): """Update EMA parameters in place with ema_param <- alpha * ema_param + (1 - alpha) * param.""" for ema_module, module in zip(ema_net.modules(), net.modules(), strict=True): for (n_p_ema, p_ema), (n_p, p) in zip( ema_module.named_parameters(recurse=False), module.named_parameters(recurse=False), strict=True ): assert n_p_ema == n_p, "Parameter names don't match for EMA model update" if isinstance(p, dict): raise RuntimeError("Dict parameter not supported") if isinstance(module, nn.modules.batchnorm._BatchNorm) or not p.requires_grad: # Copy BatchNorm parameters, and non-trainable parameters directly. p_ema.copy_(p.to(dtype=p_ema.dtype).data) with torch.no_grad(): p_ema.mul_(alpha) p_ema.add_(p.to(dtype=p_ema.dtype).data, alpha=1 - alpha) def flatten_forward_unflatten(fn: Callable[[Tensor], Tensor], image_tensor: Tensor) -> Tensor: """Helper to temporarily flatten extra dims at the start of the image tensor. Args: fn: Callable that the image tensor will be passed to. It should accept (B, C, H, W) and return (B, *), where * is any number of dimensions. image_tensor: An image tensor of shape (**, C, H, W), where ** is any number of dimensions, generally different from *. Returns: A return value from the callable reshaped to (**, *). """ if image_tensor.ndim == 4: return fn(image_tensor) start_dims = image_tensor.shape[:-3] inp = torch.flatten(image_tensor, end_dim=-4) flat_out = fn(inp) return torch.reshape(flat_out, (*start_dims, *flat_out.shape[1:]))
lerobot/src/lerobot/policies/tdmpc/modeling_tdmpc.py/0
{ "file_path": "lerobot/src/lerobot/policies/tdmpc/modeling_tdmpc.py", "repo_id": "lerobot", "token_count": 17313 }
207
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import time from functools import cached_property from typing import Any from lerobot.cameras.utils import make_cameras_from_configs from lerobot.robots.so100_follower import SO100Follower from lerobot.robots.so100_follower.config_so100_follower import SO100FollowerConfig from ..robot import Robot from .config_bi_so100_follower import BiSO100FollowerConfig logger = logging.getLogger(__name__) class BiSO100Follower(Robot): """ [Bimanual SO-100 Follower Arms](https://github.com/TheRobotStudio/SO-ARM100) designed by TheRobotStudio This bimanual robot can also be easily adapted to use SO-101 follower arms, just replace the SO100Follower class with SO101Follower and SO100FollowerConfig with SO101FollowerConfig. """ config_class = BiSO100FollowerConfig name = "bi_so100_follower" def __init__(self, config: BiSO100FollowerConfig): super().__init__(config) self.config = config left_arm_config = SO100FollowerConfig( id=f"{config.id}_left" if config.id else None, calibration_dir=config.calibration_dir, port=config.left_arm_port, disable_torque_on_disconnect=config.left_arm_disable_torque_on_disconnect, max_relative_target=config.left_arm_max_relative_target, use_degrees=config.left_arm_use_degrees, cameras={}, ) right_arm_config = SO100FollowerConfig( id=f"{config.id}_right" if config.id else None, calibration_dir=config.calibration_dir, port=config.right_arm_port, disable_torque_on_disconnect=config.right_arm_disable_torque_on_disconnect, max_relative_target=config.right_arm_max_relative_target, use_degrees=config.right_arm_use_degrees, cameras={}, ) self.left_arm = SO100Follower(left_arm_config) self.right_arm = SO100Follower(right_arm_config) self.cameras = make_cameras_from_configs(config.cameras) @property def _motors_ft(self) -> dict[str, type]: return {f"left_{motor}.pos": float for motor in self.left_arm.bus.motors} | { f"right_{motor}.pos": float for motor in self.right_arm.bus.motors } @property def _cameras_ft(self) -> dict[str, tuple]: return { cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras } @cached_property def observation_features(self) -> dict[str, type | tuple]: return {**self._motors_ft, **self._cameras_ft} @cached_property def action_features(self) -> dict[str, type]: return self._motors_ft @property def is_connected(self) -> bool: return ( self.left_arm.bus.is_connected and self.right_arm.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) ) def connect(self, calibrate: bool = True) -> None: self.left_arm.connect(calibrate) self.right_arm.connect(calibrate) for cam in self.cameras.values(): cam.connect() @property def is_calibrated(self) -> bool: return self.left_arm.is_calibrated and self.right_arm.is_calibrated def calibrate(self) -> None: self.left_arm.calibrate() self.right_arm.calibrate() def configure(self) -> None: self.left_arm.configure() self.right_arm.configure() def setup_motors(self) -> None: self.left_arm.setup_motors() self.right_arm.setup_motors() def get_observation(self) -> dict[str, Any]: obs_dict = {} # Add "left_" prefix left_obs = self.left_arm.get_observation() obs_dict.update({f"left_{key}": value for key, value in left_obs.items()}) # Add "right_" prefix right_obs = self.right_arm.get_observation() obs_dict.update({f"right_{key}": value for key, value in right_obs.items()}) for cam_key, cam in self.cameras.items(): start = time.perf_counter() obs_dict[cam_key] = cam.async_read() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") return obs_dict def send_action(self, action: dict[str, Any]) -> dict[str, Any]: # Remove "left_" prefix left_action = { key.removeprefix("left_"): value for key, value in action.items() if key.startswith("left_") } # Remove "right_" prefix right_action = { key.removeprefix("right_"): value for key, value in action.items() if key.startswith("right_") } send_action_left = self.left_arm.send_action(left_action) send_action_right = self.right_arm.send_action(right_action) # Add prefixes back prefixed_send_action_left = {f"left_{key}": value for key, value in send_action_left.items()} prefixed_send_action_right = {f"right_{key}": value for key, value in send_action_right.items()} return {**prefixed_send_action_left, **prefixed_send_action_right} def disconnect(self): self.left_arm.disconnect() self.right_arm.disconnect() for cam in self.cameras.values(): cam.disconnect()
lerobot/src/lerobot/robots/bi_so100_follower/bi_so100_follower.py/0
{ "file_path": "lerobot/src/lerobot/robots/bi_so100_follower/bi_so100_follower.py", "repo_id": "lerobot", "token_count": 2489 }
208
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO(aliberts, Steven, Pepijn): use gRPC calls instead of zmq? import base64 import json import logging from functools import cached_property from typing import Any import cv2 import numpy as np from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from ..robot import Robot from .config_lekiwi import LeKiwiClientConfig class LeKiwiClient(Robot): config_class = LeKiwiClientConfig name = "lekiwi_client" def __init__(self, config: LeKiwiClientConfig): import zmq self._zmq = zmq super().__init__(config) self.config = config self.id = config.id self.robot_type = config.type self.remote_ip = config.remote_ip self.port_zmq_cmd = config.port_zmq_cmd self.port_zmq_observations = config.port_zmq_observations self.teleop_keys = config.teleop_keys self.polling_timeout_ms = config.polling_timeout_ms self.connect_timeout_s = config.connect_timeout_s self.zmq_context = None self.zmq_cmd_socket = None self.zmq_observation_socket = None self.last_frames = {} self.last_remote_state = {} # Define three speed levels and a current index self.speed_levels = [ {"xy": 0.1, "theta": 30}, # slow {"xy": 0.2, "theta": 60}, # medium {"xy": 0.3, "theta": 90}, # fast ] self.speed_index = 0 # Start at slow self._is_connected = False self.logs = {} @cached_property def _state_ft(self) -> dict[str, type]: return dict.fromkeys( ( "arm_shoulder_pan.pos", "arm_shoulder_lift.pos", "arm_elbow_flex.pos", "arm_wrist_flex.pos", "arm_wrist_roll.pos", "arm_gripper.pos", "x.vel", "y.vel", "theta.vel", ), float, ) @cached_property def _state_order(self) -> tuple[str, ...]: return tuple(self._state_ft.keys()) @cached_property def _cameras_ft(self) -> dict[str, tuple[int, int, int]]: return {name: (cfg.height, cfg.width, 3) for name, cfg in self.config.cameras.items()} @cached_property def observation_features(self) -> dict[str, type | tuple]: return {**self._state_ft, **self._cameras_ft} @cached_property def action_features(self) -> dict[str, type]: return self._state_ft @property def is_connected(self) -> bool: return self._is_connected @property def is_calibrated(self) -> bool: pass def connect(self) -> None: """Establishes ZMQ sockets with the remote mobile robot""" if self._is_connected: raise DeviceAlreadyConnectedError( "LeKiwi Daemon is already connected. Do not run `robot.connect()` twice." ) zmq = self._zmq self.zmq_context = zmq.Context() self.zmq_cmd_socket = self.zmq_context.socket(zmq.PUSH) zmq_cmd_locator = f"tcp://{self.remote_ip}:{self.port_zmq_cmd}" self.zmq_cmd_socket.connect(zmq_cmd_locator) self.zmq_cmd_socket.setsockopt(zmq.CONFLATE, 1) self.zmq_observation_socket = self.zmq_context.socket(zmq.PULL) zmq_observations_locator = f"tcp://{self.remote_ip}:{self.port_zmq_observations}" self.zmq_observation_socket.connect(zmq_observations_locator) self.zmq_observation_socket.setsockopt(zmq.CONFLATE, 1) poller = zmq.Poller() poller.register(self.zmq_observation_socket, zmq.POLLIN) socks = dict(poller.poll(self.connect_timeout_s * 1000)) if self.zmq_observation_socket not in socks or socks[self.zmq_observation_socket] != zmq.POLLIN: raise DeviceNotConnectedError("Timeout waiting for LeKiwi Host to connect expired.") self._is_connected = True def calibrate(self) -> None: pass def _poll_and_get_latest_message(self) -> str | None: """Polls the ZMQ socket for a limited time and returns the latest message string.""" zmq = self._zmq poller = zmq.Poller() poller.register(self.zmq_observation_socket, zmq.POLLIN) try: socks = dict(poller.poll(self.polling_timeout_ms)) except zmq.ZMQError as e: logging.error(f"ZMQ polling error: {e}") return None if self.zmq_observation_socket not in socks: logging.info("No new data available within timeout.") return None last_msg = None while True: try: msg = self.zmq_observation_socket.recv_string(zmq.NOBLOCK) last_msg = msg except zmq.Again: break if last_msg is None: logging.warning("Poller indicated data, but failed to retrieve message.") return last_msg def _parse_observation_json(self, obs_string: str) -> dict[str, Any] | None: """Parses the JSON observation string.""" try: return json.loads(obs_string) except json.JSONDecodeError as e: logging.error(f"Error decoding JSON observation: {e}") return None def _decode_image_from_b64(self, image_b64: str) -> np.ndarray | None: """Decodes a base64 encoded image string to an OpenCV image.""" if not image_b64: return None try: jpg_data = base64.b64decode(image_b64) np_arr = np.frombuffer(jpg_data, dtype=np.uint8) frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) if frame is None: logging.warning("cv2.imdecode returned None for an image.") return frame except (TypeError, ValueError) as e: logging.error(f"Error decoding base64 image data: {e}") return None def _remote_state_from_obs( self, observation: dict[str, Any] ) -> tuple[dict[str, np.ndarray], dict[str, Any]]: """Extracts frames, and state from the parsed observation.""" flat_state = {key: observation.get(key, 0.0) for key in self._state_order} state_vec = np.array([flat_state[key] for key in self._state_order], dtype=np.float32) obs_dict: dict[str, Any] = {**flat_state, "observation.state": state_vec} # Decode images current_frames: dict[str, np.ndarray] = {} for cam_name, image_b64 in observation.items(): if cam_name not in self._cameras_ft: continue frame = self._decode_image_from_b64(image_b64) if frame is not None: current_frames[cam_name] = frame return current_frames, obs_dict def _get_data(self) -> tuple[dict[str, np.ndarray], dict[str, Any], dict[str, Any]]: """ Polls the video socket for the latest observation data. Attempts to retrieve and decode the latest message within a short timeout. If successful, updates and returns the new frames, speed, and arm state. If no new data arrives or decoding fails, returns the last known values. """ # 1. Get the latest message string from the socket latest_message_str = self._poll_and_get_latest_message() # 2. If no message, return cached data if latest_message_str is None: return self.last_frames, self.last_remote_state # 3. Parse the JSON message observation = self._parse_observation_json(latest_message_str) # 4. If JSON parsing failed, return cached data if observation is None: return self.last_frames, self.last_remote_state # 5. Process the valid observation data try: new_frames, new_state = self._remote_state_from_obs(observation) except Exception as e: logging.error(f"Error processing observation data, serving last observation: {e}") return self.last_frames, self.last_remote_state self.last_frames = new_frames self.last_remote_state = new_state return new_frames, new_state def get_observation(self) -> dict[str, Any]: """ Capture observations from the remote robot: current follower arm positions, present wheel speeds (converted to body-frame velocities: x, y, theta), and a camera frame. Receives over ZMQ, translate to body-frame vel """ if not self._is_connected: raise DeviceNotConnectedError("LeKiwiClient is not connected. You need to run `robot.connect()`.") frames, obs_dict = self._get_data() # Loop over each configured camera for cam_name, frame in frames.items(): if frame is None: logging.warning("Frame is None") frame = np.zeros((640, 480, 3), dtype=np.uint8) obs_dict[cam_name] = frame return obs_dict def _from_keyboard_to_base_action(self, pressed_keys: np.ndarray): # Speed control if self.teleop_keys["speed_up"] in pressed_keys: self.speed_index = min(self.speed_index + 1, 2) if self.teleop_keys["speed_down"] in pressed_keys: self.speed_index = max(self.speed_index - 1, 0) speed_setting = self.speed_levels[self.speed_index] xy_speed = speed_setting["xy"] # e.g. 0.1, 0.25, or 0.4 theta_speed = speed_setting["theta"] # e.g. 30, 60, or 90 x_cmd = 0.0 # m/s forward/backward y_cmd = 0.0 # m/s lateral theta_cmd = 0.0 # deg/s rotation if self.teleop_keys["forward"] in pressed_keys: x_cmd += xy_speed if self.teleop_keys["backward"] in pressed_keys: x_cmd -= xy_speed if self.teleop_keys["left"] in pressed_keys: y_cmd += xy_speed if self.teleop_keys["right"] in pressed_keys: y_cmd -= xy_speed if self.teleop_keys["rotate_left"] in pressed_keys: theta_cmd += theta_speed if self.teleop_keys["rotate_right"] in pressed_keys: theta_cmd -= theta_speed return { "x.vel": x_cmd, "y.vel": y_cmd, "theta.vel": theta_cmd, } def configure(self): pass def send_action(self, action: dict[str, Any]) -> dict[str, Any]: """Command lekiwi to move to a target joint configuration. Translates to motor space + sends over ZMQ Args: action (np.ndarray): array containing the goal positions for the motors. Raises: RobotDeviceNotConnectedError: if robot is not connected. Returns: np.ndarray: the action sent to the motors, potentially clipped. """ if not self._is_connected: raise DeviceNotConnectedError( "ManipulatorRobot is not connected. You need to run `robot.connect()`." ) self.zmq_cmd_socket.send_string(json.dumps(action)) # action is in motor space # TODO(Steven): Remove the np conversion when it is possible to record a non-numpy array value actions = np.array([action.get(k, 0.0) for k in self._state_order], dtype=np.float32) action_sent = {key: actions[i] for i, key in enumerate(self._state_order)} action_sent["action"] = actions return action_sent def disconnect(self): """Cleans ZMQ comms""" if not self._is_connected: raise DeviceNotConnectedError( "LeKiwi is not connected. You need to run `robot.connect()` before disconnecting." ) self.zmq_observation_socket.close() self.zmq_cmd_socket.close() self.zmq_context.term() self._is_connected = False
lerobot/src/lerobot/robots/lekiwi/lekiwi_client.py/0
{ "file_path": "lerobot/src/lerobot/robots/lekiwi/lekiwi_client.py", "repo_id": "lerobot", "token_count": 5528 }
209
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from pprint import pformat from lerobot.robots import RobotConfig from .robot import Robot def make_robot_from_config(config: RobotConfig) -> Robot: if config.type == "koch_follower": from .koch_follower import KochFollower return KochFollower(config) elif config.type == "so100_follower": from .so100_follower import SO100Follower return SO100Follower(config) elif config.type == "so100_follower_end_effector": from .so100_follower import SO100FollowerEndEffector return SO100FollowerEndEffector(config) elif config.type == "so101_follower": from .so101_follower import SO101Follower return SO101Follower(config) elif config.type == "lekiwi": from .lekiwi import LeKiwi return LeKiwi(config) elif config.type == "stretch3": from .stretch3 import Stretch3Robot return Stretch3Robot(config) elif config.type == "viperx": from .viperx import ViperX return ViperX(config) elif config.type == "hope_jr_hand": from .hope_jr import HopeJrHand return HopeJrHand(config) elif config.type == "hope_jr_arm": from .hope_jr import HopeJrArm return HopeJrArm(config) elif config.type == "bi_so100_follower": from .bi_so100_follower import BiSO100Follower return BiSO100Follower(config) elif config.type == "mock_robot": from tests.mocks.mock_robot import MockRobot return MockRobot(config) else: raise ValueError(config.type) def ensure_safe_goal_position( goal_present_pos: dict[str, tuple[float, float]], max_relative_target: float | dict[float] ) -> dict[str, float]: """Caps relative action target magnitude for safety.""" if isinstance(max_relative_target, float): diff_cap = dict.fromkeys(goal_present_pos, max_relative_target) elif isinstance(max_relative_target, dict): if not set(goal_present_pos) == set(max_relative_target): raise ValueError("max_relative_target keys must match those of goal_present_pos.") diff_cap = max_relative_target else: raise TypeError(max_relative_target) warnings_dict = {} safe_goal_positions = {} for key, (goal_pos, present_pos) in goal_present_pos.items(): diff = goal_pos - present_pos max_diff = diff_cap[key] safe_diff = min(diff, max_diff) safe_diff = max(safe_diff, -max_diff) safe_goal_pos = present_pos + safe_diff safe_goal_positions[key] = safe_goal_pos if abs(safe_goal_pos - goal_pos) > 1e-4: warnings_dict[key] = { "original goal_pos": goal_pos, "safe goal_pos": safe_goal_pos, } if warnings_dict: logging.warning( "Relative goal position magnitude had to be clamped to be safe.\n" f"{pformat(warnings_dict, indent=4)}" ) return safe_goal_positions
lerobot/src/lerobot/robots/utils.py/0
{ "file_path": "lerobot/src/lerobot/robots/utils.py", "repo_id": "lerobot", "token_count": 1426 }
210
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import logging.handlers import os import time from dataclasses import dataclass from pathlib import Path import torch from lerobot.configs.types import PolicyFeature from lerobot.constants import OBS_IMAGES, OBS_STATE from lerobot.datasets.utils import build_dataset_frame, hw_to_dataset_features # NOTE: Configs need to be loaded for the client to be able to instantiate the policy config from lerobot.policies import ACTConfig, DiffusionConfig, PI0Config, SmolVLAConfig, VQBeTConfig # noqa: F401 from lerobot.robots.robot import Robot from lerobot.utils.utils import init_logging Action = torch.Tensor ActionChunk = torch.Tensor # observation as received from the robot RawObservation = dict[str, torch.Tensor] # observation as those recorded in LeRobot dataset (keys are different) LeRobotObservation = dict[str, torch.Tensor] # observation, ready for policy inference (image keys resized) Observation = dict[str, torch.Tensor] def visualize_action_queue_size(action_queue_size: list[int]) -> None: import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.set_title("Action Queue Size Over Time") ax.set_xlabel("Environment steps") ax.set_ylabel("Action Queue Size") ax.set_ylim(0, max(action_queue_size) * 1.1) ax.grid(True, alpha=0.3) ax.plot(range(len(action_queue_size)), action_queue_size) plt.show() def validate_robot_cameras_for_policy( lerobot_observation_features: dict[str, dict], policy_image_features: dict[str, PolicyFeature] ) -> None: image_keys = list(filter(is_image_key, lerobot_observation_features)) assert set(image_keys) == set(policy_image_features.keys()), ( f"Policy image features must match robot cameras! Received {list(policy_image_features.keys())} != {image_keys}" ) def map_robot_keys_to_lerobot_features(robot: Robot) -> dict[str, dict]: return hw_to_dataset_features(robot.observation_features, "observation", use_video=False) def is_image_key(k: str) -> bool: return k.startswith(OBS_IMAGES) def resize_robot_observation_image(image: torch.tensor, resize_dims: tuple[int, int, int]) -> torch.tensor: assert image.ndim == 3, f"Image must be (C, H, W)! Received {image.shape}" # (H, W, C) -> (C, H, W) for resizing from robot obsevation resolution to policy image resolution image = image.permute(2, 0, 1) dims = (resize_dims[1], resize_dims[2]) # Add batch dimension for interpolate: (C, H, W) -> (1, C, H, W) image_batched = image.unsqueeze(0) # Interpolate and remove batch dimension: (1, C, H, W) -> (C, H, W) resized = torch.nn.functional.interpolate(image_batched, size=dims, mode="bilinear", align_corners=False) return resized.squeeze(0) def raw_observation_to_observation( raw_observation: RawObservation, lerobot_features: dict[str, dict], policy_image_features: dict[str, PolicyFeature], device: str, ) -> Observation: observation = {} observation = prepare_raw_observation(raw_observation, lerobot_features, policy_image_features) for k, v in observation.items(): if isinstance(v, torch.Tensor): # VLAs present natural-language instructions in observations if "image" in k: # Policy expects images in shape (B, C, H, W) observation[k] = prepare_image(v).unsqueeze(0).to(device) else: observation[k] = v.to(device) else: observation[k] = v return observation def prepare_image(image: torch.Tensor) -> torch.Tensor: """Minimal preprocessing to turn int8 images to float32 in [0, 1], and create a memory-contiguous tensor""" image = image.type(torch.float32) / 255 image = image.contiguous() return image def extract_state_from_raw_observation( lerobot_obs: RawObservation, ) -> torch.Tensor: """Extract the state from a raw observation.""" state = torch.tensor(lerobot_obs[OBS_STATE]) if state.ndim == 1: state = state.unsqueeze(0) return state def extract_images_from_raw_observation( lerobot_obs: RawObservation, camera_key: str, ) -> dict[str, torch.Tensor]: """Extract the images from a raw observation.""" return torch.tensor(lerobot_obs[camera_key]) def make_lerobot_observation( robot_obs: RawObservation, lerobot_features: dict[str, dict], ) -> LeRobotObservation: """Make a lerobot observation from a raw observation.""" return build_dataset_frame(lerobot_features, robot_obs, prefix="observation") def prepare_raw_observation( robot_obs: RawObservation, lerobot_features: dict[str, dict], policy_image_features: dict[str, PolicyFeature], ) -> Observation: """Matches keys from the raw robot_obs dict to the keys expected by a given policy (passed as policy_image_features).""" # 1. {motor.pos1:value1, motor.pos2:value2, ..., laptop:np.ndarray} -> # -> {observation.state:[value1,value2,...], observation.images.laptop:np.ndarray} lerobot_obs = make_lerobot_observation(robot_obs, lerobot_features) # 2. Greps all observation.images.<> keys image_keys = list(filter(is_image_key, lerobot_obs)) # state's shape is expected as (B, state_dim) state_dict = {OBS_STATE: extract_state_from_raw_observation(lerobot_obs)} image_dict = { image_k: extract_images_from_raw_observation(lerobot_obs, image_k) for image_k in image_keys } # Turns the image features to (C, H, W) with H, W matching the policy image features. # This reduces the resolution of the images image_dict = { key: resize_robot_observation_image(torch.tensor(lerobot_obs[key]), policy_image_features[key].shape) for key in image_keys } if "task" in robot_obs: state_dict["task"] = robot_obs["task"] return {**state_dict, **image_dict} def get_logger(name: str, log_to_file: bool = True) -> logging.Logger: """ Get a logger using the standardized logging setup from utils.py. Args: name: Logger name (e.g., 'policy_server', 'robot_client') log_to_file: Whether to also log to a file Returns: Configured logger instance """ # Create logs directory if logging to file if log_to_file: os.makedirs("logs", exist_ok=True) log_file = Path(f"logs/{name}_{int(time.time())}.log") else: log_file = None # Initialize the standardized logging init_logging(log_file=log_file, display_pid=False) # Return a named logger return logging.getLogger(name) @dataclass class TimedData: """A data object with timestamp and timestep information. Args: timestamp: Unix timestamp relative to data's creation. data: The actual data to wrap a timestamp around. timestep: The timestep of the data. """ timestamp: float timestep: int def get_timestamp(self): return self.timestamp def get_timestep(self): return self.timestep @dataclass class TimedAction(TimedData): action: Action def get_action(self): return self.action @dataclass class TimedObservation(TimedData): observation: RawObservation must_go: bool = False def get_observation(self): return self.observation @dataclass class FPSTracker: """Utility class to track FPS metrics over time.""" target_fps: float first_timestamp: float = None total_obs_count: int = 0 def calculate_fps_metrics(self, current_timestamp: float) -> dict[str, float]: """Calculate average FPS vs target""" self.total_obs_count += 1 # Initialize first observation time if self.first_timestamp is None: self.first_timestamp = current_timestamp # Calculate overall average FPS (since start) total_duration = current_timestamp - self.first_timestamp avg_fps = (self.total_obs_count - 1) / total_duration if total_duration > 1e-6 else 0.0 return {"avg_fps": avg_fps, "target_fps": self.target_fps} def reset(self): """Reset the FPS tracker state""" self.first_timestamp = None self.total_obs_count = 0 @dataclass class RemotePolicyConfig: policy_type: str pretrained_name_or_path: str lerobot_features: dict[str, PolicyFeature] actions_per_chunk: int device: str = "cpu" def _compare_observation_states(obs1_state: torch.Tensor, obs2_state: torch.Tensor, atol: float) -> bool: """Check if two observation states are similar, under a tolerance threshold""" return bool(torch.linalg.norm(obs1_state - obs2_state) < atol) def observations_similar( obs1: TimedObservation, obs2: TimedObservation, lerobot_features: dict[str, dict], atol: float = 1 ) -> bool: """Check if two observations are similar, under a tolerance threshold. Measures distance between observations as the difference in joint-space between the two observations. NOTE(fracapuano): This is a very simple check, and it is enough for the current use case. An immediate next step is to use (fast) perceptual difference metrics comparing some camera views, to surpass this joint-space similarity check. """ obs1_state = extract_state_from_raw_observation( make_lerobot_observation(obs1.get_observation(), lerobot_features) ) obs2_state = extract_state_from_raw_observation( make_lerobot_observation(obs2.get_observation(), lerobot_features) ) return _compare_observation_states(obs1_state, obs2_state, atol=atol)
lerobot/src/lerobot/scripts/server/helpers.py/0
{ "file_path": "lerobot/src/lerobot/scripts/server/helpers.py", "repo_id": "lerobot", "token_count": 3749 }
211
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging class InputController: """Base class for input controllers that generate motion deltas.""" def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0): """ Initialize the controller. Args: x_step_size: Base movement step size in meters y_step_size: Base movement step size in meters z_step_size: Base movement step size in meters """ self.x_step_size = x_step_size self.y_step_size = y_step_size self.z_step_size = z_step_size self.running = True self.episode_end_status = None # None, "success", or "failure" self.intervention_flag = False self.open_gripper_command = False self.close_gripper_command = False def start(self): """Start the controller and initialize resources.""" pass def stop(self): """Stop the controller and release resources.""" pass def get_deltas(self): """Get the current movement deltas (dx, dy, dz) in meters.""" return 0.0, 0.0, 0.0 def should_quit(self): """Return True if the user has requested to quit.""" return not self.running def update(self): """Update controller state - call this once per frame.""" pass def __enter__(self): """Support for use in 'with' statements.""" self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): """Ensure resources are released when exiting 'with' block.""" self.stop() def get_episode_end_status(self): """ Get the current episode end status. Returns: None if episode should continue, "success" or "failure" otherwise """ status = self.episode_end_status self.episode_end_status = None # Reset after reading return status def should_intervene(self): """Return True if intervention flag was set.""" return self.intervention_flag def gripper_command(self): """Return the current gripper command.""" if self.open_gripper_command == self.close_gripper_command: return "stay" elif self.open_gripper_command: return "open" elif self.close_gripper_command: return "close" class KeyboardController(InputController): """Generate motion deltas from keyboard input.""" def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0): super().__init__(x_step_size, y_step_size, z_step_size) self.key_states = { "forward_x": False, "backward_x": False, "forward_y": False, "backward_y": False, "forward_z": False, "backward_z": False, "quit": False, "success": False, "failure": False, } self.listener = None def start(self): """Start the keyboard listener.""" from pynput import keyboard def on_press(key): try: if key == keyboard.Key.up: self.key_states["forward_x"] = True elif key == keyboard.Key.down: self.key_states["backward_x"] = True elif key == keyboard.Key.left: self.key_states["forward_y"] = True elif key == keyboard.Key.right: self.key_states["backward_y"] = True elif key == keyboard.Key.shift: self.key_states["backward_z"] = True elif key == keyboard.Key.shift_r: self.key_states["forward_z"] = True elif key == keyboard.Key.esc: self.key_states["quit"] = True self.running = False return False elif key == keyboard.Key.enter: self.key_states["success"] = True self.episode_end_status = "success" elif key == keyboard.Key.backspace: self.key_states["failure"] = True self.episode_end_status = "failure" except AttributeError: pass def on_release(key): try: if key == keyboard.Key.up: self.key_states["forward_x"] = False elif key == keyboard.Key.down: self.key_states["backward_x"] = False elif key == keyboard.Key.left: self.key_states["forward_y"] = False elif key == keyboard.Key.right: self.key_states["backward_y"] = False elif key == keyboard.Key.shift: self.key_states["backward_z"] = False elif key == keyboard.Key.shift_r: self.key_states["forward_z"] = False elif key == keyboard.Key.enter: self.key_states["success"] = False elif key == keyboard.Key.backspace: self.key_states["failure"] = False except AttributeError: pass self.listener = keyboard.Listener(on_press=on_press, on_release=on_release) self.listener.start() print("Keyboard controls:") print(" Arrow keys: Move in X-Y plane") print(" Shift and Shift_R: Move in Z axis") print(" Enter: End episode with SUCCESS") print(" Backspace: End episode with FAILURE") print(" ESC: Exit") def stop(self): """Stop the keyboard listener.""" if self.listener and self.listener.is_alive(): self.listener.stop() def get_deltas(self): """Get the current movement deltas from keyboard state.""" delta_x = delta_y = delta_z = 0.0 if self.key_states["forward_x"]: delta_x += self.x_step_size if self.key_states["backward_x"]: delta_x -= self.x_step_size if self.key_states["forward_y"]: delta_y += self.y_step_size if self.key_states["backward_y"]: delta_y -= self.y_step_size if self.key_states["forward_z"]: delta_z += self.z_step_size if self.key_states["backward_z"]: delta_z -= self.z_step_size return delta_x, delta_y, delta_z def should_quit(self): """Return True if ESC was pressed.""" return self.key_states["quit"] def should_save(self): """Return True if Enter was pressed (save episode).""" return self.key_states["success"] or self.key_states["failure"] class GamepadController(InputController): """Generate motion deltas from gamepad input.""" def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0, deadzone=0.1): super().__init__(x_step_size, y_step_size, z_step_size) self.deadzone = deadzone self.joystick = None self.intervention_flag = False def start(self): """Initialize pygame and the gamepad.""" import pygame pygame.init() pygame.joystick.init() if pygame.joystick.get_count() == 0: logging.error("No gamepad detected. Please connect a gamepad and try again.") self.running = False return self.joystick = pygame.joystick.Joystick(0) self.joystick.init() logging.info(f"Initialized gamepad: {self.joystick.get_name()}") print("Gamepad controls:") print(" Left analog stick: Move in X-Y plane") print(" Right analog stick (vertical): Move in Z axis") print(" B/Circle button: Exit") print(" Y/Triangle button: End episode with SUCCESS") print(" A/Cross button: End episode with FAILURE") print(" X/Square button: Rerecord episode") def stop(self): """Clean up pygame resources.""" import pygame if pygame.joystick.get_init(): if self.joystick: self.joystick.quit() pygame.joystick.quit() pygame.quit() def update(self): """Process pygame events to get fresh gamepad readings.""" import pygame for event in pygame.event.get(): if event.type == pygame.JOYBUTTONDOWN: if event.button == 3: self.episode_end_status = "success" # A button (1) for failure elif event.button == 1: self.episode_end_status = "failure" # X button (0) for rerecord elif event.button == 0: self.episode_end_status = "rerecord_episode" # RB button (6) for closing gripper elif event.button == 6: self.close_gripper_command = True # LT button (7) for opening gripper elif event.button == 7: self.open_gripper_command = True # Reset episode status on button release elif event.type == pygame.JOYBUTTONUP: if event.button in [0, 2, 3]: self.episode_end_status = None elif event.button == 6: self.close_gripper_command = False elif event.button == 7: self.open_gripper_command = False # Check for RB button (typically button 5) for intervention flag if self.joystick.get_button(5): self.intervention_flag = True else: self.intervention_flag = False def get_deltas(self): """Get the current movement deltas from gamepad state.""" import pygame try: # Read joystick axes # Left stick X and Y (typically axes 0 and 1) x_input = self.joystick.get_axis(0) # Left/Right y_input = self.joystick.get_axis(1) # Up/Down (often inverted) # Right stick Y (typically axis 3 or 4) z_input = self.joystick.get_axis(3) # Up/Down for Z # Apply deadzone to avoid drift x_input = 0 if abs(x_input) < self.deadzone else x_input y_input = 0 if abs(y_input) < self.deadzone else y_input z_input = 0 if abs(z_input) < self.deadzone else z_input # Calculate deltas (note: may need to invert axes depending on controller) delta_x = -x_input * self.x_step_size # Forward/backward delta_y = y_input * self.y_step_size # Left/right delta_z = -z_input * self.z_step_size # Up/down return delta_x, delta_y, delta_z except pygame.error: logging.error("Error reading gamepad. Is it still connected?") return 0.0, 0.0, 0.0 class GamepadControllerHID(InputController): """Generate motion deltas from gamepad input using HIDAPI.""" def __init__( self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0, deadzone=0.1, ): """ Initialize the HID gamepad controller. Args: step_size: Base movement step size in meters z_scale: Scaling factor for Z-axis movement deadzone: Joystick deadzone to prevent drift """ super().__init__(x_step_size, y_step_size, z_step_size) self.deadzone = deadzone self.device = None self.device_info = None # Movement values (normalized from -1.0 to 1.0) self.left_x = 0.0 self.left_y = 0.0 self.right_x = 0.0 self.right_y = 0.0 # Button states self.buttons = {} self.quit_requested = False self.save_requested = False def find_device(self): """Look for the gamepad device by vendor and product ID.""" import hid devices = hid.enumerate() for device in devices: device_name = device["product_string"] if any(controller in device_name for controller in ["Logitech", "Xbox", "PS4", "PS5"]): return device logging.error( "No gamepad found, check the connection and the product string in HID to add your gamepad" ) return None def start(self): """Connect to the gamepad using HIDAPI.""" import hid self.device_info = self.find_device() if not self.device_info: self.running = False return try: logging.info(f"Connecting to gamepad at path: {self.device_info['path']}") self.device = hid.device() self.device.open_path(self.device_info["path"]) self.device.set_nonblocking(1) manufacturer = self.device.get_manufacturer_string() product = self.device.get_product_string() logging.info(f"Connected to {manufacturer} {product}") logging.info("Gamepad controls (HID mode):") logging.info(" Left analog stick: Move in X-Y plane") logging.info(" Right analog stick: Move in Z axis (vertical)") logging.info(" Button 1/B/Circle: Exit") logging.info(" Button 2/A/Cross: End episode with SUCCESS") logging.info(" Button 3/X/Square: End episode with FAILURE") except OSError as e: logging.error(f"Error opening gamepad: {e}") logging.error("You might need to run this with sudo/admin privileges on some systems") self.running = False def stop(self): """Close the HID device connection.""" if self.device: self.device.close() self.device = None def update(self): """ Read and process the latest gamepad data. Due to an issue with the HIDAPI, we need to read the read the device several times in order to get a stable reading """ for _ in range(10): self._update() def _update(self): """Read and process the latest gamepad data.""" if not self.device or not self.running: return try: # Read data from the gamepad data = self.device.read(64) # Interpret gamepad data - this will vary by controller model # These offsets are for the Logitech RumblePad 2 if data and len(data) >= 8: # Normalize joystick values from 0-255 to -1.0-1.0 self.left_y = (data[1] - 128) / 128.0 self.left_x = (data[2] - 128) / 128.0 self.right_x = (data[3] - 128) / 128.0 self.right_y = (data[4] - 128) / 128.0 # Apply deadzone self.left_y = 0 if abs(self.left_y) < self.deadzone else self.left_y self.left_x = 0 if abs(self.left_x) < self.deadzone else self.left_x self.right_x = 0 if abs(self.right_x) < self.deadzone else self.right_x self.right_y = 0 if abs(self.right_y) < self.deadzone else self.right_y # Parse button states (byte 5 in the Logitech RumblePad 2) buttons = data[5] # Check if RB is pressed then the intervention flag should be set self.intervention_flag = data[6] in [2, 6, 10, 14] # Check if RT is pressed self.open_gripper_command = data[6] in [8, 10, 12] # Check if LT is pressed self.close_gripper_command = data[6] in [4, 6, 12] # Check if Y/Triangle button (bit 7) is pressed for saving # Check if X/Square button (bit 5) is pressed for failure # Check if A/Cross button (bit 4) is pressed for rerecording if buttons & 1 << 7: self.episode_end_status = "success" elif buttons & 1 << 5: self.episode_end_status = "failure" elif buttons & 1 << 4: self.episode_end_status = "rerecord_episode" else: self.episode_end_status = None except OSError as e: logging.error(f"Error reading from gamepad: {e}") def get_deltas(self): """Get the current movement deltas from gamepad state.""" # Calculate deltas - invert as needed based on controller orientation delta_x = -self.left_x * self.x_step_size # Forward/backward delta_y = -self.left_y * self.y_step_size # Left/right delta_z = -self.right_y * self.z_step_size # Up/down return delta_x, delta_y, delta_z def should_quit(self): """Return True if quit button was pressed.""" return self.quit_requested def should_save(self): """Return True if save button was pressed.""" return self.save_requested
lerobot/src/lerobot/teleoperators/gamepad/gamepad_utils.py/0
{ "file_path": "lerobot/src/lerobot/teleoperators/gamepad/gamepad_utils.py", "repo_id": "lerobot", "token_count": 8218 }
212
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc import warnings from lerobot.transport import services_pb2 as lerobot_dot_transport_dot_services__pb2 GRPC_GENERATED_VERSION = '1.73.1' GRPC_VERSION = grpc.__version__ _version_not_supported = False try: from grpc._utilities import first_version_is_lower _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) except ImportError: _version_not_supported = True if _version_not_supported: raise RuntimeError( f'The grpc package installed is at version {GRPC_VERSION},' + f' but the generated code in lerobot/transport/services_pb2_grpc.py depends on' + f' grpcio>={GRPC_GENERATED_VERSION}.' + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' ) class LearnerServiceStub: """LearnerService: the Actor calls this to push transitions. The Learner implements this service. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.StreamParameters = channel.unary_stream( '/transport.LearnerService/StreamParameters', request_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, response_deserializer=lerobot_dot_transport_dot_services__pb2.Parameters.FromString, _registered_method=True) self.SendTransitions = channel.stream_unary( '/transport.LearnerService/SendTransitions', request_serializer=lerobot_dot_transport_dot_services__pb2.Transition.SerializeToString, response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, _registered_method=True) self.SendInteractions = channel.stream_unary( '/transport.LearnerService/SendInteractions', request_serializer=lerobot_dot_transport_dot_services__pb2.InteractionMessage.SerializeToString, response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, _registered_method=True) self.Ready = channel.unary_unary( '/transport.LearnerService/Ready', request_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, _registered_method=True) class LearnerServiceServicer: """LearnerService: the Actor calls this to push transitions. The Learner implements this service. """ def StreamParameters(self, request, context): """Actor -> Learner to store transitions """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SendTransitions(self, request_iterator, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SendInteractions(self, request_iterator, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Ready(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_LearnerServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'StreamParameters': grpc.unary_stream_rpc_method_handler( servicer.StreamParameters, request_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, response_serializer=lerobot_dot_transport_dot_services__pb2.Parameters.SerializeToString, ), 'SendTransitions': grpc.stream_unary_rpc_method_handler( servicer.SendTransitions, request_deserializer=lerobot_dot_transport_dot_services__pb2.Transition.FromString, response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, ), 'SendInteractions': grpc.stream_unary_rpc_method_handler( servicer.SendInteractions, request_deserializer=lerobot_dot_transport_dot_services__pb2.InteractionMessage.FromString, response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, ), 'Ready': grpc.unary_unary_rpc_method_handler( servicer.Ready, request_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'transport.LearnerService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) server.add_registered_method_handlers('transport.LearnerService', rpc_method_handlers) # This class is part of an EXPERIMENTAL API. class LearnerService: """LearnerService: the Actor calls this to push transitions. The Learner implements this service. """ @staticmethod def StreamParameters(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream( request, target, '/transport.LearnerService/StreamParameters', lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, lerobot_dot_transport_dot_services__pb2.Parameters.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) @staticmethod def SendTransitions(request_iterator, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.stream_unary( request_iterator, target, '/transport.LearnerService/SendTransitions', lerobot_dot_transport_dot_services__pb2.Transition.SerializeToString, lerobot_dot_transport_dot_services__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) @staticmethod def SendInteractions(request_iterator, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.stream_unary( request_iterator, target, '/transport.LearnerService/SendInteractions', lerobot_dot_transport_dot_services__pb2.InteractionMessage.SerializeToString, lerobot_dot_transport_dot_services__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) @staticmethod def Ready(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary( request, target, '/transport.LearnerService/Ready', lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, lerobot_dot_transport_dot_services__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) class AsyncInferenceStub: """AsyncInference: from Robot perspective Robot send observations to & executes action received from a remote Policy server """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.SendObservations = channel.stream_unary( '/transport.AsyncInference/SendObservations', request_serializer=lerobot_dot_transport_dot_services__pb2.Observation.SerializeToString, response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, _registered_method=True) self.GetActions = channel.unary_unary( '/transport.AsyncInference/GetActions', request_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, response_deserializer=lerobot_dot_transport_dot_services__pb2.Actions.FromString, _registered_method=True) self.SendPolicyInstructions = channel.unary_unary( '/transport.AsyncInference/SendPolicyInstructions', request_serializer=lerobot_dot_transport_dot_services__pb2.PolicySetup.SerializeToString, response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, _registered_method=True) self.Ready = channel.unary_unary( '/transport.AsyncInference/Ready', request_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, response_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, _registered_method=True) class AsyncInferenceServicer: """AsyncInference: from Robot perspective Robot send observations to & executes action received from a remote Policy server """ def SendObservations(self, request_iterator, context): """Robot -> Policy to share observations with a remote inference server Policy -> Robot to share actions predicted for given observations """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetActions(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SendPolicyInstructions(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Ready(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_AsyncInferenceServicer_to_server(servicer, server): rpc_method_handlers = { 'SendObservations': grpc.stream_unary_rpc_method_handler( servicer.SendObservations, request_deserializer=lerobot_dot_transport_dot_services__pb2.Observation.FromString, response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, ), 'GetActions': grpc.unary_unary_rpc_method_handler( servicer.GetActions, request_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, response_serializer=lerobot_dot_transport_dot_services__pb2.Actions.SerializeToString, ), 'SendPolicyInstructions': grpc.unary_unary_rpc_method_handler( servicer.SendPolicyInstructions, request_deserializer=lerobot_dot_transport_dot_services__pb2.PolicySetup.FromString, response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, ), 'Ready': grpc.unary_unary_rpc_method_handler( servicer.Ready, request_deserializer=lerobot_dot_transport_dot_services__pb2.Empty.FromString, response_serializer=lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'transport.AsyncInference', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) server.add_registered_method_handlers('transport.AsyncInference', rpc_method_handlers) # This class is part of an EXPERIMENTAL API. class AsyncInference: """AsyncInference: from Robot perspective Robot send observations to & executes action received from a remote Policy server """ @staticmethod def SendObservations(request_iterator, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.stream_unary( request_iterator, target, '/transport.AsyncInference/SendObservations', lerobot_dot_transport_dot_services__pb2.Observation.SerializeToString, lerobot_dot_transport_dot_services__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) @staticmethod def GetActions(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary( request, target, '/transport.AsyncInference/GetActions', lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, lerobot_dot_transport_dot_services__pb2.Actions.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) @staticmethod def SendPolicyInstructions(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary( request, target, '/transport.AsyncInference/SendPolicyInstructions', lerobot_dot_transport_dot_services__pb2.PolicySetup.SerializeToString, lerobot_dot_transport_dot_services__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True) @staticmethod def Ready(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary( request, target, '/transport.AsyncInference/Ready', lerobot_dot_transport_dot_services__pb2.Empty.SerializeToString, lerobot_dot_transport_dot_services__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, _registered_method=True)
lerobot/src/lerobot/transport/services_pb2_grpc.py/0
{ "file_path": "lerobot/src/lerobot/transport/services_pb2_grpc.py", "repo_id": "lerobot", "token_count": 8161 }
213
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import os.path as osp import platform import select import subprocess import sys import time from copy import copy, deepcopy from datetime import datetime, timezone from pathlib import Path from statistics import mean import numpy as np import torch def none_or_int(value): if value == "None": return None return int(value) def inside_slurm(): """Check whether the python process was launched through slurm""" # TODO(rcadene): return False for interactive mode `--pty bash` return "SLURM_JOB_ID" in os.environ def auto_select_torch_device() -> torch.device: """Tries to select automatically a torch device.""" if torch.cuda.is_available(): logging.info("Cuda backend detected, using cuda.") return torch.device("cuda") elif torch.backends.mps.is_available(): logging.info("Metal backend detected, using mps.") return torch.device("mps") else: logging.warning("No accelerated backend detected. Using default cpu, this will be slow.") return torch.device("cpu") # TODO(Steven): Remove log. log shouldn't be an argument, this should be handled by the logger level def get_safe_torch_device(try_device: str, log: bool = False) -> torch.device: """Given a string, return a torch.device with checks on whether the device is available.""" try_device = str(try_device) match try_device: case "cuda": assert torch.cuda.is_available() device = torch.device("cuda") case "mps": assert torch.backends.mps.is_available() device = torch.device("mps") case "cpu": device = torch.device("cpu") if log: logging.warning("Using CPU, this will be slow.") case _: device = torch.device(try_device) if log: logging.warning(f"Using custom {try_device} device.") return device def get_safe_dtype(dtype: torch.dtype, device: str | torch.device): """ mps is currently not compatible with float64 """ if isinstance(device, torch.device): device = device.type if device == "mps" and dtype == torch.float64: return torch.float32 else: return dtype def is_torch_device_available(try_device: str) -> bool: try_device = str(try_device) # Ensure try_device is a string if try_device == "cuda": return torch.cuda.is_available() elif try_device == "mps": return torch.backends.mps.is_available() elif try_device == "cpu": return True else: raise ValueError(f"Unknown device {try_device}. Supported devices are: cuda, mps or cpu.") def is_amp_available(device: str): if device in ["cuda", "cpu"]: return True elif device == "mps": return False else: raise ValueError(f"Unknown device '{device}.") def init_logging( log_file: Path | None = None, display_pid: bool = False, console_level: str = "INFO", file_level: str = "DEBUG", ): def custom_format(record: logging.LogRecord) -> str: dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S") fnameline = f"{record.pathname}:{record.lineno}" # NOTE: Display PID is useful for multi-process logging. if display_pid: pid_str = f"[PID: {os.getpid()}]" message = f"{record.levelname} {pid_str} {dt} {fnameline[-15:]:>15} {record.getMessage()}" else: message = f"{record.levelname} {dt} {fnameline[-15:]:>15} {record.getMessage()}" return message formatter = logging.Formatter() formatter.format = custom_format logger = logging.getLogger() logger.setLevel(logging.NOTSET) # Set the logger to the lowest level to capture all messages # Remove unused default handlers for handler in logger.handlers[:]: logger.removeHandler(handler) # Write logs to console console_handler = logging.StreamHandler() console_handler.setFormatter(formatter) console_handler.setLevel(console_level.upper()) logger.addHandler(console_handler) # Additionally write logs to file if log_file is not None: file_handler = logging.FileHandler(log_file) file_handler.setFormatter(formatter) file_handler.setLevel(file_level.upper()) logger.addHandler(file_handler) def format_big_number(num, precision=0): suffixes = ["", "K", "M", "B", "T", "Q"] divisor = 1000.0 for suffix in suffixes: if abs(num) < divisor: return f"{num:.{precision}f}{suffix}" num /= divisor return num def _relative_path_between(path1: Path, path2: Path) -> Path: """Returns path1 relative to path2.""" path1 = path1.absolute() path2 = path2.absolute() try: return path1.relative_to(path2) except ValueError: # most likely because path1 is not a subpath of path2 common_parts = Path(osp.commonpath([path1, path2])).parts return Path( "/".join([".."] * (len(path2.parts) - len(common_parts)) + list(path1.parts[len(common_parts) :])) ) def print_cuda_memory_usage(): """Use this function to locate and debug memory leak.""" import gc gc.collect() # Also clear the cache if you want to fully release the memory torch.cuda.empty_cache() print(f"Current GPU Memory Allocated: {torch.cuda.memory_allocated(0) / 1024**2:.2f} MB") print(f"Maximum GPU Memory Allocated: {torch.cuda.max_memory_allocated(0) / 1024**2:.2f} MB") print(f"Current GPU Memory Reserved: {torch.cuda.memory_reserved(0) / 1024**2:.2f} MB") print(f"Maximum GPU Memory Reserved: {torch.cuda.max_memory_reserved(0) / 1024**2:.2f} MB") def capture_timestamp_utc(): return datetime.now(timezone.utc) def say(text: str, blocking: bool = False): system = platform.system() if system == "Darwin": cmd = ["say", text] elif system == "Linux": cmd = ["spd-say", text] if blocking: cmd.append("--wait") elif system == "Windows": cmd = [ "PowerShell", "-Command", "Add-Type -AssemblyName System.Speech; " f"(New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak('{text}')", ] else: raise RuntimeError("Unsupported operating system for text-to-speech.") if blocking: subprocess.run(cmd, check=True) else: subprocess.Popen(cmd, creationflags=subprocess.CREATE_NO_WINDOW if system == "Windows" else 0) def log_say(text: str, play_sounds: bool = True, blocking: bool = False): logging.info(text) if play_sounds: say(text, blocking) def get_channel_first_image_shape(image_shape: tuple) -> tuple: shape = copy(image_shape) if shape[2] < shape[0] and shape[2] < shape[1]: # (h, w, c) -> (c, h, w) shape = (shape[2], shape[0], shape[1]) elif not (shape[0] < shape[1] and shape[0] < shape[2]): raise ValueError(image_shape) return shape def has_method(cls: object, method_name: str) -> bool: return hasattr(cls, method_name) and callable(getattr(cls, method_name)) def is_valid_numpy_dtype_string(dtype_str: str) -> bool: """ Return True if a given string can be converted to a numpy dtype. """ try: # Attempt to convert the string to a numpy dtype np.dtype(dtype_str) return True except TypeError: # If a TypeError is raised, the string is not a valid dtype return False def enter_pressed() -> bool: if platform.system() == "Windows": import msvcrt if msvcrt.kbhit(): key = msvcrt.getch() return key in (b"\r", b"\n") # enter key return False else: return select.select([sys.stdin], [], [], 0)[0] and sys.stdin.readline().strip() == "" def move_cursor_up(lines): """Move the cursor up by a specified number of lines.""" print(f"\033[{lines}A", end="") class TimerManager: """ Lightweight utility to measure elapsed time. Examples -------- ```python # Example 1: Using context manager timer = TimerManager("Policy", log=False) for _ in range(3): with timer: time.sleep(0.01) print(timer.last, timer.fps_avg, timer.percentile(90)) # Prints: 0.01 100.0 0.01 ``` ```python # Example 2: Using start/stop methods timer = TimerManager("Policy", log=False) timer.start() time.sleep(0.01) timer.stop() print(timer.last, timer.fps_avg, timer.percentile(90)) # Prints: 0.01 100.0 0.01 ``` """ def __init__( self, label: str = "Elapsed-time", log: bool = True, logger: logging.Logger | None = None, ): self.label = label self.log = log self.logger = logger self._start: float | None = None self._history: list[float] = [] def __enter__(self): return self.start() def __exit__(self, exc_type, exc_val, exc_tb): self.stop() def start(self): self._start = time.perf_counter() return self def stop(self) -> float: if self._start is None: raise RuntimeError("Timer was never started.") elapsed = time.perf_counter() - self._start self._history.append(elapsed) self._start = None if self.log: if self.logger is not None: self.logger.info(f"{self.label}: {elapsed:.6f} s") else: logging.info(f"{self.label}: {elapsed:.6f} s") return elapsed def reset(self): self._history.clear() @property def last(self) -> float: return self._history[-1] if self._history else 0.0 @property def avg(self) -> float: return mean(self._history) if self._history else 0.0 @property def total(self) -> float: return sum(self._history) @property def count(self) -> int: return len(self._history) @property def history(self) -> list[float]: return deepcopy(self._history) @property def fps_history(self) -> list[float]: return [1.0 / t for t in self._history] @property def fps_last(self) -> float: return 0.0 if self.last == 0 else 1.0 / self.last @property def fps_avg(self) -> float: return 0.0 if self.avg == 0 else 1.0 / self.avg def percentile(self, p: float) -> float: """ Return the p-th percentile of recorded times. """ if not self._history: return 0.0 return float(np.percentile(self._history, p)) def fps_percentile(self, p: float) -> float: """ FPS corresponding to the p-th percentile time. """ val = self.percentile(p) return 0.0 if val == 0 else 1.0 / val
lerobot/src/lerobot/utils/utils.py/0
{ "file_path": "lerobot/src/lerobot/utils/utils.py", "repo_id": "lerobot", "token_count": 4718 }
214
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import queue import time from multiprocessing import queues from unittest.mock import MagicMock, patch import numpy as np import pytest from PIL import Image from lerobot.datasets.image_writer import ( AsyncImageWriter, image_array_to_pil_image, safe_stop_image_writer, write_image, ) from tests.fixtures.constants import DUMMY_HWC DUMMY_IMAGE = "test_image.png" def test_init_threading(): writer = AsyncImageWriter(num_processes=0, num_threads=2) try: assert writer.num_processes == 0 assert writer.num_threads == 2 assert isinstance(writer.queue, queue.Queue) assert len(writer.threads) == 2 assert len(writer.processes) == 0 assert all(t.is_alive() for t in writer.threads) finally: writer.stop() def test_init_multiprocessing(): writer = AsyncImageWriter(num_processes=2, num_threads=2) try: assert writer.num_processes == 2 assert writer.num_threads == 2 assert isinstance(writer.queue, queues.JoinableQueue) assert len(writer.threads) == 0 assert len(writer.processes) == 2 assert all(p.is_alive() for p in writer.processes) finally: writer.stop() def test_zero_threads(): with pytest.raises(ValueError): AsyncImageWriter(num_processes=0, num_threads=0) def test_image_array_to_pil_image_float_array_wrong_range_0_255(): image = np.random.rand(*DUMMY_HWC) * 255 with pytest.raises(ValueError): image_array_to_pil_image(image) def test_image_array_to_pil_image_float_array_wrong_range_neg_1_1(): image = np.random.rand(*DUMMY_HWC) * 2 - 1 with pytest.raises(ValueError): image_array_to_pil_image(image) def test_image_array_to_pil_image_rgb(img_array_factory): img_array = img_array_factory(100, 100) result_image = image_array_to_pil_image(img_array) assert isinstance(result_image, Image.Image) assert result_image.size == (100, 100) assert result_image.mode == "RGB" def test_image_array_to_pil_image_pytorch_format(img_array_factory): img_array = img_array_factory(100, 100).transpose(2, 0, 1) result_image = image_array_to_pil_image(img_array) assert isinstance(result_image, Image.Image) assert result_image.size == (100, 100) assert result_image.mode == "RGB" def test_image_array_to_pil_image_single_channel(img_array_factory): img_array = img_array_factory(channels=1) with pytest.raises(NotImplementedError): image_array_to_pil_image(img_array) def test_image_array_to_pil_image_4_channels(img_array_factory): img_array = img_array_factory(channels=4) with pytest.raises(NotImplementedError): image_array_to_pil_image(img_array) def test_image_array_to_pil_image_float_array(img_array_factory): img_array = img_array_factory(dtype=np.float32) result_image = image_array_to_pil_image(img_array) assert isinstance(result_image, Image.Image) assert result_image.size == (100, 100) assert result_image.mode == "RGB" assert np.array(result_image).dtype == np.uint8 def test_image_array_to_pil_image_uint8_array(img_array_factory): img_array = img_array_factory(dtype=np.float32) result_image = image_array_to_pil_image(img_array) assert isinstance(result_image, Image.Image) assert result_image.size == (100, 100) assert result_image.mode == "RGB" assert np.array(result_image).dtype == np.uint8 def test_write_image_numpy(tmp_path, img_array_factory): image_array = img_array_factory() fpath = tmp_path / DUMMY_IMAGE write_image(image_array, fpath) assert fpath.exists() saved_image = np.array(Image.open(fpath)) assert np.array_equal(image_array, saved_image) def test_write_image_image(tmp_path, img_factory): image_pil = img_factory() fpath = tmp_path / DUMMY_IMAGE write_image(image_pil, fpath) assert fpath.exists() saved_image = Image.open(fpath) assert list(saved_image.getdata()) == list(image_pil.getdata()) assert np.array_equal(image_pil, saved_image) def test_write_image_exception(tmp_path): image_array = "invalid data" fpath = tmp_path / DUMMY_IMAGE with patch("builtins.print") as mock_print: write_image(image_array, fpath) mock_print.assert_called() assert not fpath.exists() def test_save_image_numpy(tmp_path, img_array_factory): writer = AsyncImageWriter() try: image_array = img_array_factory() fpath = tmp_path / DUMMY_IMAGE fpath.parent.mkdir(parents=True, exist_ok=True) writer.save_image(image_array, fpath) writer.wait_until_done() assert fpath.exists() saved_image = np.array(Image.open(fpath)) assert np.array_equal(image_array, saved_image) finally: writer.stop() def test_save_image_numpy_multiprocessing(tmp_path, img_array_factory): writer = AsyncImageWriter(num_processes=2, num_threads=2) try: image_array = img_array_factory() fpath = tmp_path / DUMMY_IMAGE writer.save_image(image_array, fpath) writer.wait_until_done() assert fpath.exists() saved_image = np.array(Image.open(fpath)) assert np.array_equal(image_array, saved_image) finally: writer.stop() def test_save_image_torch(tmp_path, img_tensor_factory): writer = AsyncImageWriter() try: image_tensor = img_tensor_factory() fpath = tmp_path / DUMMY_IMAGE fpath.parent.mkdir(parents=True, exist_ok=True) writer.save_image(image_tensor, fpath) writer.wait_until_done() assert fpath.exists() saved_image = np.array(Image.open(fpath)) expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8) assert np.array_equal(expected_image, saved_image) finally: writer.stop() def test_save_image_torch_multiprocessing(tmp_path, img_tensor_factory): writer = AsyncImageWriter(num_processes=2, num_threads=2) try: image_tensor = img_tensor_factory() fpath = tmp_path / DUMMY_IMAGE writer.save_image(image_tensor, fpath) writer.wait_until_done() assert fpath.exists() saved_image = np.array(Image.open(fpath)) expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8) assert np.array_equal(expected_image, saved_image) finally: writer.stop() def test_save_image_pil(tmp_path, img_factory): writer = AsyncImageWriter() try: image_pil = img_factory() fpath = tmp_path / DUMMY_IMAGE fpath.parent.mkdir(parents=True, exist_ok=True) writer.save_image(image_pil, fpath) writer.wait_until_done() assert fpath.exists() saved_image = Image.open(fpath) assert list(saved_image.getdata()) == list(image_pil.getdata()) finally: writer.stop() def test_save_image_pil_multiprocessing(tmp_path, img_factory): writer = AsyncImageWriter(num_processes=2, num_threads=2) try: image_pil = img_factory() fpath = tmp_path / DUMMY_IMAGE writer.save_image(image_pil, fpath) writer.wait_until_done() assert fpath.exists() saved_image = Image.open(fpath) assert list(saved_image.getdata()) == list(image_pil.getdata()) finally: writer.stop() def test_save_image_invalid_data(tmp_path): writer = AsyncImageWriter() try: image_array = "invalid data" fpath = tmp_path / DUMMY_IMAGE fpath.parent.mkdir(parents=True, exist_ok=True) with patch("builtins.print") as mock_print: writer.save_image(image_array, fpath) writer.wait_until_done() mock_print.assert_called() assert not fpath.exists() finally: writer.stop() def test_save_image_after_stop(tmp_path, img_array_factory): writer = AsyncImageWriter() writer.stop() image_array = img_array_factory() fpath = tmp_path / DUMMY_IMAGE writer.save_image(image_array, fpath) time.sleep(1) assert not fpath.exists() def test_stop(): writer = AsyncImageWriter(num_processes=0, num_threads=2) writer.stop() assert not any(t.is_alive() for t in writer.threads) def test_stop_multiprocessing(): writer = AsyncImageWriter(num_processes=2, num_threads=2) writer.stop() assert not any(p.is_alive() for p in writer.processes) def test_multiple_stops(): writer = AsyncImageWriter() writer.stop() writer.stop() # Should not raise an exception assert not any(t.is_alive() for t in writer.threads) def test_multiple_stops_multiprocessing(): writer = AsyncImageWriter(num_processes=2, num_threads=2) writer.stop() writer.stop() # Should not raise an exception assert not any(t.is_alive() for t in writer.threads) def test_wait_until_done(tmp_path, img_array_factory): writer = AsyncImageWriter(num_processes=0, num_threads=4) try: num_images = 100 image_arrays = [img_array_factory(height=500, width=500) for _ in range(num_images)] fpaths = [tmp_path / f"frame_{i:06d}.png" for i in range(num_images)] for image_array, fpath in zip(image_arrays, fpaths, strict=True): fpath.parent.mkdir(parents=True, exist_ok=True) writer.save_image(image_array, fpath) writer.wait_until_done() for i, fpath in enumerate(fpaths): assert fpath.exists() saved_image = np.array(Image.open(fpath)) assert np.array_equal(saved_image, image_arrays[i]) finally: writer.stop() def test_wait_until_done_multiprocessing(tmp_path, img_array_factory): writer = AsyncImageWriter(num_processes=2, num_threads=2) try: num_images = 100 image_arrays = [img_array_factory() for _ in range(num_images)] fpaths = [tmp_path / f"frame_{i:06d}.png" for i in range(num_images)] for image_array, fpath in zip(image_arrays, fpaths, strict=True): fpath.parent.mkdir(parents=True, exist_ok=True) writer.save_image(image_array, fpath) writer.wait_until_done() for i, fpath in enumerate(fpaths): assert fpath.exists() saved_image = np.array(Image.open(fpath)) assert np.array_equal(saved_image, image_arrays[i]) finally: writer.stop() def test_exception_handling(tmp_path, img_array_factory): writer = AsyncImageWriter() try: image_array = img_array_factory() with ( patch.object(writer.queue, "put", side_effect=queue.Full("Queue is full")), pytest.raises(queue.Full) as exc_info, ): writer.save_image(image_array, tmp_path / "test.png") assert str(exc_info.value) == "Queue is full" finally: writer.stop() def test_with_different_image_formats(tmp_path, img_array_factory): writer = AsyncImageWriter() try: image_array = img_array_factory() formats = ["png", "jpeg", "bmp"] for fmt in formats: fpath = tmp_path / f"test_image.{fmt}" write_image(image_array, fpath) assert fpath.exists() finally: writer.stop() def test_safe_stop_image_writer_decorator(): class MockDataset: def __init__(self): self.image_writer = MagicMock(spec=AsyncImageWriter) @safe_stop_image_writer def function_that_raises_exception(dataset=None): raise Exception("Test exception") dataset = MockDataset() with pytest.raises(Exception) as exc_info: function_that_raises_exception(dataset=dataset) assert str(exc_info.value) == "Test exception" dataset.image_writer.stop.assert_called_once() def test_main_process_time(tmp_path, img_tensor_factory): writer = AsyncImageWriter() try: image_tensor = img_tensor_factory() fpath = tmp_path / DUMMY_IMAGE start_time = time.perf_counter() writer.save_image(image_tensor, fpath) end_time = time.perf_counter() time_spent = end_time - start_time # Might need to adjust this threshold depending on hardware assert time_spent < 0.01, f"Main process time exceeded threshold: {time_spent}s" writer.wait_until_done() assert fpath.exists() finally: writer.stop()
lerobot/tests/datasets/test_image_writer.py/0
{ "file_path": "lerobot/tests/datasets/test_image_writer.py", "repo_id": "lerobot", "token_count": 5558 }
215
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading import time from mock_serial.mock_serial import Stub class WaitableStub(Stub): """ In some situations, a test might be checking if a stub has been called before `MockSerial` thread had time to read, match, and call the stub. In these situations, the test can fail randomly. Use `wait_called()` or `wait_calls()` to block until the stub is called, avoiding race conditions. Proposed fix: https://github.com/benthorner/mock_serial/pull/3 """ def __init__(self, **kwargs): super().__init__(**kwargs) self._event = threading.Event() def call(self): self._event.set() return super().call() def wait_called(self, timeout: float = 1.0): return self._event.wait(timeout) def wait_calls(self, min_calls: int = 1, timeout: float = 1.0): start = time.perf_counter() while time.perf_counter() - start < timeout: if self.calls >= min_calls: return self.calls time.sleep(0.005) raise TimeoutError(f"Stub not called {min_calls} times within {timeout} seconds.")
lerobot/tests/mocks/mock_serial_patch.py/0
{ "file_path": "lerobot/tests/mocks/mock_serial_patch.py", "repo_id": "lerobot", "token_count": 609 }
216
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from concurrent import futures from unittest.mock import patch import pytest import torch from torch.multiprocessing import Event, Queue from lerobot.utils.transition import Transition from tests.utils import require_package def create_learner_service_stub(): import grpc from lerobot.transport import services_pb2, services_pb2_grpc class MockLearnerService(services_pb2_grpc.LearnerServiceServicer): def __init__(self): self.ready_call_count = 0 self.should_fail = False def Ready(self, request, context): # noqa: N802 self.ready_call_count += 1 if self.should_fail: context.set_code(grpc.StatusCode.UNAVAILABLE) context.set_details("Service unavailable") raise grpc.RpcError("Service unavailable") return services_pb2.Empty() """Fixture to start a LearnerService gRPC server and provide a connected stub.""" servicer = MockLearnerService() # Create a gRPC server and add our servicer to it. server = grpc.server(futures.ThreadPoolExecutor(max_workers=4)) services_pb2_grpc.add_LearnerServiceServicer_to_server(servicer, server) port = server.add_insecure_port("[::]:0") # bind to a free port chosen by OS server.start() # start the server (non-blocking call):contentReference[oaicite:1]{index=1} # Create a client channel and stub connected to the server's port. channel = grpc.insecure_channel(f"localhost:{port}") return services_pb2_grpc.LearnerServiceStub(channel), servicer, channel, server def close_service_stub(channel, server): channel.close() server.stop(None) @require_package("grpc") def test_establish_learner_connection_success(): from lerobot.scripts.rl.actor import establish_learner_connection """Test successful connection establishment.""" stub, _servicer, channel, server = create_learner_service_stub() shutdown_event = Event() # Test successful connection result = establish_learner_connection(stub, shutdown_event, attempts=5) assert result is True close_service_stub(channel, server) @require_package("grpc") def test_establish_learner_connection_failure(): from lerobot.scripts.rl.actor import establish_learner_connection """Test connection failure.""" stub, servicer, channel, server = create_learner_service_stub() servicer.should_fail = True shutdown_event = Event() # Test failed connection with patch("time.sleep"): # Speed up the test result = establish_learner_connection(stub, shutdown_event, attempts=2) assert result is False close_service_stub(channel, server) @require_package("grpc") def test_push_transitions_to_transport_queue(): from lerobot.scripts.rl.actor import push_transitions_to_transport_queue from lerobot.transport.utils import bytes_to_transitions from tests.transport.test_transport_utils import assert_transitions_equal """Test pushing transitions to transport queue.""" # Create mock transitions transitions = [] for i in range(3): transition = Transition( state={"observation": torch.randn(3, 64, 64), "state": torch.randn(10)}, action=torch.randn(5), reward=torch.tensor(1.0 + i), done=torch.tensor(False), truncated=torch.tensor(False), next_state={"observation": torch.randn(3, 64, 64), "state": torch.randn(10)}, complementary_info={"step": torch.tensor(i)}, ) transitions.append(transition) transitions_queue = Queue() # Test pushing transitions push_transitions_to_transport_queue(transitions, transitions_queue) # Verify the data can be retrieved serialized_data = transitions_queue.get() assert isinstance(serialized_data, bytes) deserialized_transitions = bytes_to_transitions(serialized_data) assert len(deserialized_transitions) == len(transitions) for i, deserialized_transition in enumerate(deserialized_transitions): assert_transitions_equal(deserialized_transition, transitions[i]) @require_package("grpc") @pytest.mark.timeout(3) # force cross-platform watchdog def test_transitions_stream(): from lerobot.scripts.rl.actor import transitions_stream """Test transitions stream functionality.""" shutdown_event = Event() transitions_queue = Queue() # Add test data to queue test_data = [b"transition_data_1", b"transition_data_2", b"transition_data_3"] for data in test_data: transitions_queue.put(data) # Collect streamed data streamed_data = [] stream_generator = transitions_stream(shutdown_event, transitions_queue, 0.1) # Process a few items for i, message in enumerate(stream_generator): streamed_data.append(message) if i >= len(test_data) - 1: shutdown_event.set() break # Verify we got messages assert len(streamed_data) == len(test_data) assert streamed_data[0].data == b"transition_data_1" assert streamed_data[1].data == b"transition_data_2" assert streamed_data[2].data == b"transition_data_3" @require_package("grpc") @pytest.mark.timeout(3) # force cross-platform watchdog def test_interactions_stream(): from lerobot.scripts.rl.actor import interactions_stream from lerobot.transport.utils import bytes_to_python_object, python_object_to_bytes """Test interactions stream functionality.""" shutdown_event = Event() interactions_queue = Queue() # Create test interaction data (similar structure to what would be sent) test_interactions = [ {"episode_reward": 10.5, "step": 1, "policy_fps": 30.2}, {"episode_reward": 15.2, "step": 2, "policy_fps": 28.7}, {"episode_reward": 8.7, "step": 3, "policy_fps": 29.1}, ] # Serialize the interaction data as it would be in practice test_data = [ interactions_queue.put(python_object_to_bytes(interaction)) for interaction in test_interactions ] # Collect streamed data streamed_data = [] stream_generator = interactions_stream(shutdown_event, interactions_queue, 0.1) # Process the items for i, message in enumerate(stream_generator): streamed_data.append(message) if i >= len(test_data) - 1: shutdown_event.set() break # Verify we got messages assert len(streamed_data) == len(test_data) # Verify the messages can be deserialized back to original data for i, message in enumerate(streamed_data): deserialized_interaction = bytes_to_python_object(message.data) assert deserialized_interaction == test_interactions[i]
lerobot/tests/rl/test_actor.py/0
{ "file_path": "lerobot/tests/rl/test_actor.py", "repo_id": "lerobot", "token_count": 2616 }
217
.PHONY: style quality # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := src tests # dev dependencies install: uv venv openr1 --python 3.11 . openr1/bin/activate && uv pip install --upgrade pip && \ uv pip install vllm==0.8.5.post1 && \ uv pip install setuptools && \ uv pip install flash-attn --no-build-isolation && \ GIT_LFS_SKIP_SMUDGE=1 uv pip install -e ".[dev]" style: ruff format --line-length 119 --target-version py310 $(check_dirs) setup.py isort $(check_dirs) setup.py quality: ruff check --line-length 119 --target-version py310 $(check_dirs) setup.py isort --check-only $(check_dirs) setup.py flake8 --max-line-length 119 $(check_dirs) setup.py test: pytest -sv --ignore=tests/slow/ tests/ slow_test: pytest -sv -vv tests/slow/ # Evaluation evaluate: $(eval PARALLEL_ARGS := $(if $(PARALLEL),$(shell \ if [ "$(PARALLEL)" = "data" ]; then \ echo "data_parallel_size=$(NUM_GPUS)"; \ elif [ "$(PARALLEL)" = "tensor" ]; then \ echo "tensor_parallel_size=$(NUM_GPUS)"; \ fi \ ),)) $(if $(filter tensor,$(PARALLEL)),export VLLM_WORKER_MULTIPROC_METHOD=spawn &&,) \ MODEL_ARGS="pretrained=$(MODEL),dtype=bfloat16,$(PARALLEL_ARGS),max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" && \ if [ "$(TASK)" = "lcb" ]; then \ lighteval vllm $$MODEL_ARGS "extended|lcb:codegeneration|0|0" \ --use-chat-template \ --output-dir data/evals/$(MODEL); \ else \ lighteval vllm $$MODEL_ARGS "lighteval|$(TASK)|0|0" \ --use-chat-template \ --output-dir data/evals/$(MODEL); \ fi
open-r1/Makefile/0
{ "file_path": "open-r1/Makefile", "repo_id": "open-r1", "token_count": 696 }
218
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Adapted from huggingface/transformers: https://github.com/huggingface/transformers/blob/21a2d900eceeded7be9edc445b56877b95eda4ca/setup.py import re import shutil from pathlib import Path from setuptools import find_packages, setup # Remove stale open_r1.egg-info directory to avoid https://github.com/pypa/pip/issues/5466 stale_egg_info = Path(__file__).parent / "open_r1.egg-info" if stale_egg_info.exists(): print( ( "Warning: {} exists.\n\n" "If you recently updated open_r1, this is expected,\n" "but it may prevent open_r1 from installing in editable mode.\n\n" "This directory is automatically generated by Python's packaging tools.\n" "I will remove it now.\n\n" "See https://github.com/pypa/pip/issues/5466 for details.\n" ).format(stale_egg_info) ) shutil.rmtree(stale_egg_info) # IMPORTANT: all dependencies should be listed here with their version requirements, if any. # * If a dependency is fast-moving (e.g. trl), pin to the exact version _deps = [ "accelerate==1.4.0", "bitsandbytes>=0.43.0", "datasets>=3.2.0", "deepspeed==0.16.8", "distilabel[vllm,ray,openai]>=1.5.2", "e2b-code-interpreter>=1.0.5", "einops>=0.8.0", "flake8>=6.0.0", "hf_transfer>=0.1.4", "huggingface-hub[cli,hf_xet]>=0.30.2,<1.0", "isort>=5.12.0", "jieba", # Needed for Chinese language support "langdetect", # Needed for LightEval's extended tasks "latex2sympy2_extended>=1.0.6", "liger-kernel>=0.5.10", "lighteval @ git+https://github.com/huggingface/lighteval.git@d3da6b9bbf38104c8b5e1acc86f83541f9a502d1", # Critical bug fix for tokenizer revisions: https://github.com/huggingface/lighteval/pull/721 "math-verify==0.5.2", # Used for math verification in grpo "morphcloud==0.1.67", "packaging>=23.0", "parameterized>=0.9.0", "peft>=0.14.0", "pytest", "python-dotenv", "ruff>=0.9.0", "safetensors>=0.3.3", "sentencepiece>=0.1.99", "torch==2.6.0", "transformers==4.52.3", "trl[vllm]==0.18.0", "wandb>=0.19.1", "async-lru>=2.0.5", "aiofiles>=24.1.0", "pandas>=2.2.3", ] # this is a lookup table with items like: # # tokenizers: "tokenizers==0.9.4" # packaging: "packaging" # # some of the values are versioned whereas others aren't. deps = {b: a for a, b in (re.findall(r"^(([^!=<>~ \[\]]+)(?:\[[^\]]+\])?(?:[!=<>~ ].*)?$)", x)[0] for x in _deps)} def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs] extras = {} extras["tests"] = deps_list("pytest", "parameterized", "math-verify", "jieba") extras["torch"] = deps_list("torch") extras["quality"] = deps_list("ruff", "isort", "flake8") extras["code"] = deps_list("e2b-code-interpreter", "python-dotenv", "morphcloud", "jieba", "pandas", "aiofiles") extras["eval"] = deps_list("lighteval", "math-verify") extras["dev"] = extras["quality"] + extras["tests"] + extras["eval"] + extras["code"] # core dependencies shared across the whole project - keep this to a bare minimum :) install_requires = [ deps["accelerate"], deps["bitsandbytes"], deps["einops"], deps["datasets"], deps["deepspeed"], deps["hf_transfer"], deps["huggingface-hub"], deps["langdetect"], deps["latex2sympy2_extended"], deps["math-verify"], deps["liger-kernel"], deps["packaging"], # utilities from PyPA to e.g., compare versions deps["safetensors"], deps["sentencepiece"], deps["transformers"], deps["trl"], deps["wandb"], deps["async-lru"], ] setup( name="open-r1", version="0.1.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) author="The Hugging Face team (past and future)", author_email="lewis@huggingface.co", description="Open R1", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="llm inference-time compute reasoning", license="Apache", url="https://github.com/huggingface/open-r1", package_dir={"": "src"}, packages=find_packages("src"), zip_safe=False, extras_require=extras, python_requires=">=3.10.9", install_requires=install_requires, classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], )
open-r1/setup.py/0
{ "file_path": "open-r1/setup.py", "repo_id": "open-r1", "token_count": 2235 }
219
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from distilabel.llms import OpenAILLM from distilabel.pipeline import Pipeline from distilabel.steps import StepResources from distilabel.steps.tasks import TextGeneration def build_distilabel_pipeline( model: str, base_url: str = "http://localhost:8000/v1", prompt_column: Optional[str] = None, prompt_template: str = "{{ instruction }}", temperature: Optional[float] = None, top_p: Optional[float] = None, max_new_tokens: int = 8192, num_generations: int = 1, input_batch_size: int = 64, client_replicas: int = 1, timeout: int = 900, retries: int = 0, ) -> Pipeline: generation_kwargs = {"max_new_tokens": max_new_tokens} if temperature is not None: generation_kwargs["temperature"] = temperature if top_p is not None: generation_kwargs["top_p"] = top_p with Pipeline().ray() as pipeline: TextGeneration( llm=OpenAILLM( base_url=base_url, api_key="something", model=model, timeout=timeout, max_retries=retries, generation_kwargs=generation_kwargs, ), template=prompt_template, input_mappings=({"instruction": prompt_column} if prompt_column is not None else {}), input_batch_size=input_batch_size, num_generations=num_generations, group_generations=True, resources=StepResources(replicas=client_replicas), ) return pipeline if __name__ == "__main__": import argparse from datasets import load_dataset parser = argparse.ArgumentParser(description="Run distilabel pipeline for generating responses with DeepSeek R1") parser.add_argument( "--hf-dataset", type=str, required=True, help="HuggingFace dataset to load", ) parser.add_argument( "--hf-dataset-config", type=str, required=False, help="Dataset config to use", ) parser.add_argument( "--hf-dataset-split", type=str, default="train", help="Dataset split to use", ) parser.add_argument( "--prompt-column", type=str, default="prompt", ) parser.add_argument( "--prompt-template", type=str, default="{{ instruction }}", help="Template string for formatting prompts.", ) parser.add_argument( "--model", type=str, required=True, help="Model name to use for generation", ) parser.add_argument( "--vllm-server-url", type=str, default="http://localhost:8000/v1", help="URL of the vLLM server", ) parser.add_argument( "--temperature", type=float, help="Temperature for generation", ) parser.add_argument( "--top-p", type=float, help="Top-p value for generation", ) parser.add_argument( "--max-new-tokens", type=int, default=8192, help="Maximum number of new tokens to generate", ) parser.add_argument( "--num-generations", type=int, default=1, help="Number of generations per problem", ) parser.add_argument( "--input-batch-size", type=int, default=64, help="Batch size for input processing", ) parser.add_argument( "--client-replicas", type=int, default=1, help="Number of client replicas for parallel processing", ) parser.add_argument( "--timeout", type=int, default=600, help="Request timeout in seconds (default: 600)", ) parser.add_argument( "--retries", type=int, default=0, help="Number of retries for failed requests (default: 0)", ) parser.add_argument( "--hf-output-dataset", type=str, required=False, help="HuggingFace repo to push results to", ) parser.add_argument( "--private", action="store_true", help="Whether to make the output dataset private when pushing to HF Hub", ) args = parser.parse_args() print("\nRunning with arguments:") for arg, value in vars(args).items(): print(f" {arg}: {value}") print() print(f"Loading '{args.hf_dataset}' (config: {args.hf_dataset_config}, split: {args.hf_dataset_split}) dataset...") dataset = load_dataset(args.hf_dataset, args.hf_dataset_config, split=args.hf_dataset_split) print("Dataset loaded!") pipeline = build_distilabel_pipeline( model=args.model, base_url=args.vllm_server_url, prompt_template=args.prompt_template, prompt_column=args.prompt_column, temperature=args.temperature, top_p=args.top_p, max_new_tokens=args.max_new_tokens, num_generations=args.num_generations, input_batch_size=args.input_batch_size, client_replicas=args.client_replicas, timeout=args.timeout, retries=args.retries, ) print("Running generation pipeline...") distiset = pipeline.run( dataset=dataset, dataset_batch_size=args.input_batch_size * 1000, use_cache=False, ) print("Generation pipeline finished!") if args.hf_output_dataset: print(f"Pushing resulting dataset to '{args.hf_output_dataset}'...") distiset.push_to_hub(args.hf_output_dataset, private=args.private) print("Dataset pushed!")
open-r1/src/open_r1/generate.py/0
{ "file_path": "open-r1/src/open_r1/generate.py", "repo_id": "open-r1", "token_count": 2696 }
220
import subprocess from typing import TYPE_CHECKING, Dict, Union from .hub import get_gpu_count_for_vllm, get_param_count_from_repo_id if TYPE_CHECKING: from trl import GRPOConfig, SFTConfig, ModelConfig import base64 import os # We need a special environment setup to launch vLLM from within Slurm training jobs. # - Reference code: https://github.com/huggingface/brrr/blob/c55ba3505686d690de24c7ace6487a5c1426c0fd/brrr/lighteval/one_job_runner.py#L105 # - Slack thread: https://huggingface.slack.com/archives/C043JTYE1MJ/p1726566494958269 user_home_directory = os.path.expanduser("~") VLLM_SLURM_PREFIX = [ "env", "-i", "bash", "-c", f"for f in /etc/profile.d/*.sh; do source $f; done; export HOME={user_home_directory}; sbatch ", ] def register_lighteval_task( configs: Dict[str, str], eval_suite: str, task_name: str, task_list: str, num_fewshot: int = 0, ): """Registers a LightEval task configuration. - Core tasks can be added from this table: https://github.com/huggingface/lighteval/blob/main/src/lighteval/tasks/tasks_table.jsonl - Custom tasks that require their own metrics / scripts, should be stored in scripts/evaluation/extended_lighteval_tasks Args: configs (Dict[str, str]): The dictionary to store the task configuration. eval_suite (str, optional): The evaluation suite. task_name (str): The name of the task. task_list (str): The comma-separated list of tasks in the format "extended|{task_name}|{num_fewshot}|0" or "lighteval|{task_name}|{num_fewshot}|0". num_fewshot (int, optional): The number of few-shot examples. Defaults to 0. is_custom_task (bool, optional): Whether the task is a custom task. Defaults to False. """ # Format task list in lighteval format task_list = ",".join(f"{eval_suite}|{task}|{num_fewshot}|0" for task in task_list.split(",")) configs[task_name] = task_list LIGHTEVAL_TASKS = {} register_lighteval_task(LIGHTEVAL_TASKS, "lighteval", "math_500", "math_500", 0) register_lighteval_task(LIGHTEVAL_TASKS, "lighteval", "aime24", "aime24", 0) register_lighteval_task(LIGHTEVAL_TASKS, "lighteval", "aime25", "aime25", 0) register_lighteval_task(LIGHTEVAL_TASKS, "lighteval", "gpqa", "gpqa:diamond", 0) register_lighteval_task(LIGHTEVAL_TASKS, "extended", "lcb", "lcb:codegeneration", 0) register_lighteval_task(LIGHTEVAL_TASKS, "extended", "lcb_v4", "lcb:codegeneration_v4", 0) def get_lighteval_tasks(): return list(LIGHTEVAL_TASKS.keys()) SUPPORTED_BENCHMARKS = get_lighteval_tasks() def run_lighteval_job( benchmark: str, training_args: Union["SFTConfig", "GRPOConfig"], model_args: "ModelConfig", ) -> None: task_list = LIGHTEVAL_TASKS[benchmark] model_name = training_args.hub_model_id model_revision = training_args.hub_model_revision # For large models >= 30b params or those running the MATH benchmark, we need to shard them across the GPUs to avoid OOM num_gpus = get_gpu_count_for_vllm(model_name, model_revision) if get_param_count_from_repo_id(model_name) >= 30_000_000_000: tensor_parallel = True else: num_gpus = 2 # Hack while cluster is full tensor_parallel = False cmd = VLLM_SLURM_PREFIX.copy() cmd_args = [ f"--gres=gpu:{num_gpus}", f"--job-name=or1_{benchmark}_{model_name.split('/')[-1]}_{model_revision}", "slurm/evaluate.slurm", benchmark, f'"{task_list}"', model_name, model_revision, f"{tensor_parallel}", f"{model_args.trust_remote_code}", ] if training_args.system_prompt is not None: # encode to base64 to avoid issues with special characters # we decode in the sbatch script prompt_encoded = base64.b64encode(training_args.system_prompt.encode()).decode() cmd_args.append(prompt_encoded) cmd[-1] += " " + " ".join(cmd_args) subprocess.run(cmd, check=True) def run_benchmark_jobs(training_args: Union["SFTConfig", "GRPOConfig"], model_args: "ModelConfig") -> None: benchmarks = training_args.benchmarks if len(benchmarks) == 1 and benchmarks[0] == "all": benchmarks = get_lighteval_tasks() # Evaluate on all supported benchmarks. Later we may want to include a `chat` option # that just evaluates on `ifeval` and `mt_bench` etc. for benchmark in benchmarks: print(f"Launching benchmark `{benchmark}`") if benchmark in get_lighteval_tasks(): run_lighteval_job(benchmark, training_args, model_args) else: raise ValueError(f"Unknown benchmark {benchmark}")
open-r1/src/open_r1/utils/evaluation.py/0
{ "file_path": "open-r1/src/open_r1/utils/evaluation.py", "repo_id": "open-r1", "token_count": 1879 }
221
# Builds GPU docker image of PyTorch # Uses multi-staged approach to reduce size # Stage 1 # Use base conda image to reduce time FROM continuumio/miniconda3:latest AS compile-image # Specify py version ENV PYTHON_VERSION=3.11 # Install apt libs - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile RUN apt-get update && \ apt-get install -y curl git wget software-properties-common git-lfs && \ apt-get clean && \ rm -rf /var/lib/apt/lists* # Install audio-related libraries RUN apt-get update && \ apt install -y ffmpeg RUN apt install -y libsndfile1-dev RUN git lfs install # Create our conda env - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile RUN conda create --name peft python=${PYTHON_VERSION} ipython jupyter pip RUN python3 -m pip install --no-cache-dir --upgrade pip # Below is copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile # We don't install pytorch here yet since CUDA isn't available # instead we use the direct torch wheel ENV PATH /opt/conda/envs/peft/bin:$PATH # Activate our bash shell RUN chsh -s /bin/bash SHELL ["/bin/bash", "-c"] # Stage 2 FROM nvidia/cuda:12.6.3-devel-ubuntu22.04 AS build-image COPY --from=compile-image /opt/conda /opt/conda ENV PATH /opt/conda/bin:$PATH RUN chsh -s /bin/bash SHELL ["/bin/bash", "-c"] # Install apt libs RUN apt-get update && \ apt-get install -y curl git wget cmake && \ apt-get clean && \ rm -rf /var/lib/apt/lists* # Activate the conda env and install transformers + accelerate from latest pypi # Also clone BNB and build it from source. RUN source activate peft && \ python3 -m pip install -U --no-cache-dir \ librosa \ "soundfile>=0.12.1" \ scipy \ transformers \ accelerate \ peft \ optimum \ auto-gptq && \ git clone https://github.com/bitsandbytes-foundation/bitsandbytes && cd bitsandbytes && \ cmake -B . -DCOMPUTE_BACKEND=cuda -S . && \ cmake --build . && \ pip install -e . && \ pip freeze | grep bitsandbytes RUN echo "source activate peft" >> ~/.profile # Activate the virtualenv CMD ["/bin/bash"]
peft/docker/peft-gpu-bnb-latest/Dockerfile/0
{ "file_path": "peft/docker/peft-gpu-bnb-latest/Dockerfile", "repo_id": "peft", "token_count": 817 }
222
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LoRA LoRA is low-rank decomposition method to reduce the number of trainable parameters which speeds up finetuning large models and uses less memory. In PEFT, using LoRA is as easy as setting up a [`LoraConfig`] and wrapping it with [`get_peft_model`] to create a trainable [`PeftModel`]. This guide explores in more detail other options and features for using LoRA. ## Initialization The initialization of LoRA weights is controlled by the parameter `init_lora_weights` in [`LoraConfig`]. By default, PEFT initializes LoRA weights with Kaiming-uniform for weight A and zeros for weight B resulting in an identity transform (same as the reference [implementation](https://github.com/microsoft/LoRA)). It is also possible to pass `init_lora_weights="gaussian"`. As the name suggests, this initializes weight A with a Gaussian distribution and zeros for weight B (this is how [Diffusers](https://huggingface.co/docs/diffusers/index) initializes LoRA weights). ```py from peft import LoraConfig config = LoraConfig(init_lora_weights="gaussian", ...) ``` There is also an option to set `init_lora_weights=False` which is useful for debugging and testing. This should be the only time you use this option. When choosing this option, the LoRA weights are initialized such that they do *not* result in an identity transform. ```py from peft import LoraConfig config = LoraConfig(init_lora_weights=False, ...) ``` ### PiSSA [PiSSA](https://huggingface.co/papers/2404.02948) initializes the LoRA adapter using the principal singular values and singular vectors. This straightforward modification allows PiSSA to converge more rapidly than LoRA and ultimately attain superior performance. Moreover, PiSSA reduces the quantization error compared to QLoRA, leading to further enhancements. Configure the initialization method to "pissa", which may take several minutes to execute SVD on the pre-trained model: ```python from peft import LoraConfig config = LoraConfig(init_lora_weights="pissa", ...) ``` Alternatively, execute fast SVD, which takes only a few seconds. The number of iterations determines the trade-off between the error and computation time: ```python lora_config = LoraConfig(init_lora_weights="pissa_niter_[number of iters]", ...) ``` For detailed instruction on using PiSSA, please follow [these instructions](https://github.com/huggingface/peft/tree/main/examples/pissa_finetuning). ### CorDA [CorDA](https://huggingface.co/papers/2406.05223) builds task-aware LoRA adapters from weight decomposition oriented by the context of downstream task to learn (instruction-previewed mode, IPM) or world knowledge to maintain (knowledge-preserved mode, KPM). The KPM not only achieves better performance than LoRA on fine-tuning tasks, but also mitigates the catastrophic forgetting of pre-trained world knowledge. When preserving pre-trained knowledge is not a concern, the IPM is favored because it can further accelerate convergence and enhance the fine-tuning performance. You need to configure the initialization method to "corda", and specify the mode of IPM or KPM and the dataset to collect covariance matrices. ```py @torch.no_grad() def run_model(): # Assume `model` and `dataset` is in context... model.eval() for batch in dataset: model(**batch) corda_config = CordaConfig( corda_method="kpm", ) lora_config = LoraConfig( init_lora_weights="corda", corda_config=corda_config, ) preprocess_corda(model, lora_config, run_model=run_model) peft_model = get_peft_model(model, lora_config) ``` For detailed instruction on using CorDA, please follow [these instructions](https://github.com/huggingface/peft/tree/main/examples/corda_finetuning). ### OLoRA [OLoRA](https://huggingface.co/papers/2406.01775) utilizes QR decomposition to initialize the LoRA adapters. OLoRA translates the base weights of the model by a factor of their QR decompositions, i.e., it mutates the weights before performing any training on them. This approach significantly improves stability, accelerates convergence speed, and ultimately achieves superior performance. You just need to pass a single additional option to use OLoRA: ```python from peft import LoraConfig config = LoraConfig(init_lora_weights="olora", ...) ``` For more advanced usage, please refer to our [documentation](https://github.com/huggingface/peft/tree/main/examples/olora_finetuning). ### EVA [EVA](https://huggingface.co/papers/2410.07170) performs SVD on the input activations of each layer and uses the right-singular vectors to initialize LoRA weights. It is therefore a data-driven initialization scheme. Furthermore EVA adaptively allocates ranks across layers based on their "explained variance ratio" - a metric derived from the SVD analysis. You can use EVA by setting `init_lora_weights="eva"` and defining [`EvaConfig`] in [`LoraConfig`]: ```python from peft import LoraConfig, EvaConfig peft_config = LoraConfig( init_lora_weights = "eva", eva_config = EvaConfig(rho = 2.0), ... ) ``` The parameter `rho` (≥ 1.0) determines how much redistribution is allowed. When `rho=1.0` and `r=16`, LoRA adapters are limited to exactly 16 ranks, preventing any redistribution from occurring. A recommended value for EVA with redistribution is 2.0, meaning the maximum rank allowed for a layer is 2r. It is recommended to perform EVA initialization on an accelerator(e.g. CUDA GPU, Intel XPU) as it is much faster. To optimize the amount of available memory for EVA, you can use the `low_cpu_mem_usage` flag in [`get_peft_model`]: ```python peft_model = get_peft_model(model, peft_config, low_cpu_mem_usage=True) ``` Then, call [`initialize_lora_eva_weights`] to initialize the EVA weights (in most cases the dataloader used for eva initialization can be the same as the one used for finetuning): ```python initialize_lora_eva_weights(peft_model, dataloader) ``` EVA works out of the box with bitsandbytes. Simply initialize the model with `quantization_config` and call [`initialize_lora_eva_weights`] as usual. <Tip> For further instructions on using EVA, please refer to our [documentation](https://github.com/huggingface/peft/tree/main/examples/eva_finetuning). </Tip> ### LoftQ #### Standard approach When quantizing the base model for QLoRA training, consider using the [LoftQ initialization](https://huggingface.co/papers/2310.08659), which has been shown to improve performance when training quantized models. The idea is that the LoRA weights are initialized such that the quantization error is minimized. To use LoftQ, follow [these instructions](https://github.com/huggingface/peft/tree/main/examples/loftq_finetuning). In general, for LoftQ to work best, it is recommended to target as many layers with LoRA as possible, since those not targeted cannot have LoftQ applied. This means that passing `LoraConfig(..., target_modules="all-linear")` will most likely give the best results. Also, you should use `nf4` as quant type in your quantization config when using 4bit quantization, i.e. `BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4")`. #### A more convenient way An easier but more limited way to apply LoftQ initialization is to use the convenience function `replace_lora_weights_loftq`. This takes the quantized PEFT model as input and replaces the LoRA weights in-place with their LoftQ-initialized counterparts. ```python from peft import replace_lora_weights_loftq from transformers import BitsAndBytesConfig bnb_config = BitsAndBytesConfig(load_in_4bit=True, ...) base_model = AutoModelForCausalLM.from_pretrained(..., quantization_config=bnb_config) # note: don't pass init_lora_weights="loftq" or loftq_config! lora_config = LoraConfig(task_type="CAUSAL_LM") peft_model = get_peft_model(base_model, lora_config) replace_lora_weights_loftq(peft_model) ``` `replace_lora_weights_loftq` also allows you to pass a `callback` argument to give you more control over which layers should be modified or not, which empirically can improve the results quite a lot. To see a more elaborate example of this, check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/loftq_finetuning/LoftQ_weight_replacement.ipynb). `replace_lora_weights_loftq` implements only one iteration step of LoftQ. This means that only the LoRA weights are updated, instead of iteratively updating LoRA weights and quantized base model weights. This may lead to lower performance but has the advantage that we can use the original quantized weights derived from the base model, instead of having to keep an extra copy of modified quantized weights. Whether this tradeoff is worthwhile depends on the use case. At the moment, `replace_lora_weights_loftq` has these additional limitations: - Model files must be stored as a `safetensors` file. - Only bitsandbytes 4bit quantization is supported. <Tip> Learn more about how PEFT works with quantization in the [Quantization](quantization) guide. </Tip> ### Rank-stabilized LoRA Another way to initialize [`LoraConfig`] is with the [rank-stabilized LoRA (rsLoRA)](https://huggingface.co/papers/2312.03732) method. The LoRA architecture scales each adapter during every forward pass by a fixed scalar which is set at initialization and depends on the rank `r`. The scalar is given by `lora_alpha/r` in the original implementation, but rsLoRA uses `lora_alpha/math.sqrt(r)` which stabilizes the adapters and increases the performance potential from using a higher `r`. ```py from peft import LoraConfig config = LoraConfig(use_rslora=True, ...) ``` ### Weight-Decomposed Low-Rank Adaptation (DoRA) This technique decomposes the updates of the weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA, especially at low ranks. For more information on DoRA, see https://huggingface.co/papers/2402.09353. ```py from peft import LoraConfig config = LoraConfig(use_dora=True, ...) ``` If parts of the model or the DoRA adapter are offloaded to CPU you can get a significant speedup at the cost of some temporary (ephemeral) VRAM overhead by using `ephemeral_gpu_offload=True` in `config.runtime_config`. ```py from peft import LoraConfig, LoraRuntimeConfig config = LoraConfig(use_dora=True, runtime_config=LoraRuntimeConfig(ephemeral_gpu_offload=True), ...) ``` A `PeftModel` with a DoRA adapter can also be loaded with `ephemeral_gpu_offload=True` flag using the `from_pretrained` method as well as the `load_adapter` method. ```py from peft import PeftModel model = PeftModel.from_pretrained(base_model, peft_model_id, ephemeral_gpu_offload=True) ``` DoRA is optimized (computes faster and takes less memory) for models in the evaluation mode, or when dropout is set to 0. We reuse the base result at those times to get the speedup. Running [dora finetuning](https://github.com/huggingface/peft/blob/main/examples/dora_finetuning/dora_finetuning.py) with `CUDA_VISIBLE_DEVICES=0 ZE_AFFINITY_MASK=0 time python examples/dora_finetuning/dora_finetuning.py --quantize --lora_dropout 0 --batch_size 16 --eval_step 2 --use_dora` on a 4090 with gradient accumulation set to 2 and max step to 20 resulted with the following observations: | | Without Optimization | With Optimization | | :--: | :--: | :--: | | train_runtime | 359.7298 | **279.2676** | | train_samples_per_second | 1.779 | **2.292** | | train_steps_per_second | 0.056 | **0.072** | #### Caveats - DoRA only supports embedding, linear, and Conv2d layers at the moment. - DoRA introduces a bigger overhead than pure LoRA, so it is recommended to merge weights for inference, see [`LoraModel.merge_and_unload`]. - DoRA should work with weights quantized with bitsandbytes ("QDoRA"). However, issues have been reported when using QDoRA with DeepSpeed Zero2. ### QLoRA-style training The default LoRA settings in PEFT add trainable weights to the query and value layers of each attention block. But [QLoRA](https://hf.co/papers/2305.14314), which adds trainable weights to all the linear layers of a transformer model, can provide performance equal to a fully finetuned model. To apply LoRA to all the linear layers, like in QLoRA, set `target_modules="all-linear"` (easier than specifying individual modules by name which can vary depending on the architecture). ```py config = LoraConfig(target_modules="all-linear", ...) ``` ### Memory efficient Layer Replication with LoRA An approach used to improve the performance of models is to expand a model by duplicating layers in the model to build a larger model from a pretrained model of a given size. For example increasing a 7B model to a 10B model as described in the [SOLAR](https://huggingface.co/papers/2312.15166) paper. PEFT LoRA supports this kind of expansion in a memory efficient manner that supports further fine-tuning using LoRA adapters attached to the layers post replication of the layers. The replicated layers do not take additional memory as they share the underlying weights so the only additional memory required is the memory for the adapter weights. To use this feature you would create a config with the `layer_replication` argument. ```py config = LoraConfig(layer_replication=[[0,4], [2,5]], ...) ``` Assuming the original model had 5 layers `[0, 1, 2 ,3, 4]`, this would create a model with 7 layers arranged as `[0, 1, 2, 3, 2, 3, 4]`. This follows the [mergekit](https://github.com/arcee-ai/mergekit) pass through merge convention where sequences of layers specified as start inclusive and end exclusive tuples are stacked to build the final model. Each layer in the final model gets its own distinct set of LoRA adapters. [Fewshot-Metamath-OrcaVicuna-Mistral-10B](https://huggingface.co/abacusai/Fewshot-Metamath-OrcaVicuna-Mistral-10B) is an example of a model trained using this method on Mistral-7B expanded to 10B. The [adapter_config.json](https://huggingface.co/abacusai/Fewshot-Metamath-OrcaVicuna-Mistral-10B/blob/main/adapter_config.json) shows a sample LoRA adapter config applying this method for fine-tuning. ### Fine grained control over ranks and alpha (scaling) By default, all layers targeted with LoRA will have the same rank `r` and the same `lora_alpha` (which determines the LoRA scaling), depending on what was specified in the [`LoraConfig`]. In some cases, however, you may want to indicate different values for different layers. This is possible by passing the `rank_pattern` and `alpha_pattern` arguments to [`LoraConfig`]. These arguments should be dictionaries with the key being the layer name and the value being the rank/alpha value. The keys can be [regular expressions](https://docs.python.org/3/library/re.html) (regex). All LoRA layers that are not explicitly mentioned in `rank_pattern` and `alpha_pattern` will take the default `r` and `lora_alpha` values. To give an example, let's assume that we have a model with the following structure: ```python >>> print(model) Outer( (foo): Linear(...) (module): Middle( (foo): Linear(...) (foobar): Linear(...) (module): Inner( (foo): Linear(...) (barfoo): Linear(...) ) ) ) ``` - `rank_pattern={"foo": 42}` will match all 3 `foo` layers. Neither `foobar` nor `barfoo` are matched. - `rank_pattern={"^foo": 42}` will only match the `foo` layer of the model, but neither `module.foo` nor `module.module.foo`. This is because the `^` means "start of string" when using regular expressions, and only `foo` starts with `"foo"`, the other layer names have prefixes. - `rank_pattern={"^module.foo": 42}` matches only `module.foo`, but not `module.module.foo`, for the same reason. - `rank_pattern={"module.foo": 42}` matches both `module.foo` and `module.module.foo`, but not `foo`. - `rank_pattern={"^foo": 42, "^module.module.foo": 55}` matches `foo` and `module.module.foo`, respectively, but not `module.foo`. - There is no need to indicate `$` to mark the end of the match, as this is added automatically by PEFT. The same logic applies to `alpha_pattern`. If you're in doubt, don't try to get fancy with regular expressions -- just pass the full name for each module with a different rank/alpha, preceded by the `^` prefix, and you should be good. ### Targeting `nn.Parameter` directly > [!WARNING] > This feature is experimental and subject to change. Generally, you should use `target_modules` to target the module (e.g. `nn.Linear`). However, in some circumstances, this is not possible. E.g., in many mixture of expert (MoE) layers in HF Transformers, instead of using `nn.Linear`, an `nn.Parameter` is used. PEFT normally overwrites the `forward` method for LoRA, but for `nn.Parameter`, there is none. Therefore, to apply LoRA to that parameter, it needs to be targeted with `target_parameters`. As an example, for [Llama4](https://huggingface.co/collections/meta-llama/llama-4-67f0c30d9fe03840bc9d0164), you can pass: `target_parameters=['feed_forward.experts.gate_up_proj', 'feed_forward.experts.down_proj]`. #### Caveats - At the moment, this argument allows to target 2-dim or 3-dim `nn.Parameter`s. It is assumed that in the case of a 3-dim parameter, the 0th dimension is the expert dimension. - It is currently not possible to add multiple LoRA adapters (via `model.add_adapter` or `model.load_adapter`) that use `target_parameters` at the same time. ## Optimizers LoRA training can optionally include special purpose optimizers. Currently PEFT supports LoRA-FA and LoRA+. ### LoRA-FA Optimizer LoRA training can be more effective and efficient using LoRA-FA, as described in [LoRA-FA](https://huggingface.co/papers/2308.03303). LoRA-FA reduces activation memory consumption by fixing the matrix A and only tuning the matrix B. During training, the gradient of B is optimized to approximate the full parameter fine-tuning gradient. Moreover, the memory consumption of LoRA-FA is not sensitive to the rank (since it erases the activation of $A$), therefore it can improve performance by enlarging lora rank without increasing memory consumption. ```py from peft import LoraConfig, get_peft_model from peft.optimizers import create_lorafa_optimizer from transformers import Trainer, get_cosine_schedule_with_warmup base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") config = LoraConfig(...) model = get_peft_model(base_model, config) optimizer = create_lorafa_optimizer( model=model, r=128, lora_alpha=32, lr=7e-5, ) scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=100, num_training_steps=1000, ) trainer = Trainer( ..., optimizers=(optimizer, scheduler), ) ``` ### LoRA+ optimized LoRA LoRA training can be optimized using [LoRA+](https://huggingface.co/papers/2402.12354), which uses different learning rates for the adapter matrices A and B, shown to increase finetuning speed by up to 2x and performance by 1-2%. ```py from peft import LoraConfig, get_peft_model from peft.optimizers import create_loraplus_optimizer from transformers import Trainer import bitsandbytes as bnb base_model = ... config = LoraConfig(...) model = get_peft_model(base_model, config) optimizer = create_loraplus_optimizer( model=model, optimizer_cls=bnb.optim.Adam8bit, lr=5e-5, loraplus_lr_ratio=16, ) scheduler = None ... trainer = Trainer( ..., optimizers=(optimizer, scheduler), ) ``` ## Efficiently train tokens alongside LoRA Sometimes it is necessary to not only change some layer's weights but to add new tokens as well. With larger models this can be a memory-costly endeavour. PEFT LoRA adapters support the `trainable_token_indices` parameter which allows tuning of other tokens alongside fine-tuning of specific layers with LoRA. This method only trains the tokens you specify and leaves all other tokens untouched. This saves memory and doesn't throw away learned context of existing token embeddings in contrast to when training the whole embedding matrix. Under the hood this method uses the layer of [`TrainableTokensModel`]. ```py # for layer 'embed_tokens' config = LoraConfig(trainable_token_indices=[idx_1, idx_2, ...], ...) # specific embedding layer config = LoraConfig(trainable_token_indices={'emb_tokens': [idx_1, idx_2, ...]}, ...) ``` In the snippet below we show how to add new tokens to the model and how to train it alongside the other layers in the model. ```py from transformers import AutoTokenizer, AutoModelForCausalLM from peft import get_peft_model, LoraConfig base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") # we define our new tokens and add them to the tokenizer as special tokens special_tokens = ['<|start_think|>', '<|stop_think|>'] tokenizer.add_special_tokens({'additional_special_tokens': special_tokens}) # make room for new tokens in the embedding matrix if it isn't big enough already base_model.resize_token_embeddings(max(len(tokenizer), base_model.model.embed_tokens.num_embeddings)) # typical LoRA config with `trainable_token_indices` targeting embedding layer `embed_tokens` # and specifically our new tokens we just added lora_config = LoraConfig( target_modules='all-linear', trainable_token_indices={'embed_tokens': tokenizer.convert_tokens_to_ids(special_tokens)}, ) peft_model = get_peft_model(base_model, lora_config) # proceed to train the model like normal [...] ``` The token weights are part of your adapter state dict and saved alongside the LoRA weights. If we would have used full fine-tuning with `modules_to_save=['embed_tokens']` we would have stored the full embedding matrix in the checkpoint, leading to a much bigger file. To give a bit of an indication how much VRAM can be saved, a rudimentary comparison of the above example was made between training the embedding matrix fully (`modules_to_save=["embed_tokens"]`), using a LoRA for the embedding matrix (`target_modules=[..., "embed_tokens"]`, rank 32) and trainable tokens (`trainable_token_indices=[...]`, 6 tokens). Trainable tokens used about as much VRAM (15,562MB vs. 15,581MB) as LoRA while being specific to the tokens and saved ~1GB of VRAM over fully training the embedding matrix. ## Merge LoRA weights into the base model While LoRA is significantly smaller and faster to train, you may encounter latency issues during inference due to separately loading the base model and the LoRA adapter. To eliminate latency, use the [`~LoraModel.merge_and_unload`] function to merge the adapter weights with the base model. This allows you to use the newly merged model as a standalone model. The [`~LoraModel.merge_and_unload`] function doesn't keep the adapter weights in memory. Below is a diagram that explains the intuition of LoRA adapter merging: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/lora_diagram.png"/> </div> We show in the snippets below how to run that using PEFT. ```py from transformers import AutoModelForCausalLM from peft import PeftModel base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") peft_model_id = "alignment-handbook/zephyr-7b-sft-lora" model = PeftModel.from_pretrained(base_model, peft_model_id) model.merge_and_unload() ``` If you need to keep a copy of the weights so you can unmerge the adapter later or delete and load different ones, you should use the [`~LoraModel.merge_adapter`] function instead. Now you have the option to use [`~LoraModel.unmerge_adapter`] to return the base model. ```py from transformers import AutoModelForCausalLM from peft import PeftModel base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") peft_model_id = "alignment-handbook/zephyr-7b-sft-lora" model = PeftModel.from_pretrained(base_model, peft_model_id) model.merge_adapter() # unmerge the LoRA layers from the base model model.unmerge_adapter() ``` The [`~LoraModel.add_weighted_adapter`] function is useful for merging multiple LoRAs into a new adapter based on a user provided weighting scheme in the `weights` parameter. Below is an end-to-end example. First load the base model: ```python from transformers import AutoModelForCausalLM from peft import PeftModel import torch base_model = AutoModelForCausalLM.from_pretrained( "mistralai/Mistral-7B-v0.1", torch_dtype=torch.float16, device_map="auto" ) ``` Then we load the first adapter: ```python peft_model_id = "alignment-handbook/zephyr-7b-sft-lora" model = PeftModel.from_pretrained(base_model, peft_model_id, adapter_name="sft") ``` Then load a different adapter and merge it with the first one: ```python weighted_adapter_name = "sft-dpo" model.load_adapter("alignment-handbook/zephyr-7b-dpo-lora", adapter_name="dpo") model.add_weighted_adapter( adapters=["sft", "dpo"], weights=[0.7, 0.3], adapter_name=weighted_adapter_name, combination_type="linear" ) model.set_adapter(weighted_adapter_name) ``` <Tip> There are several supported methods for `combination_type`. Refer to the [documentation](../package_reference/lora#peft.LoraModel.add_weighted_adapter) for more details. Note that "svd" as the `combination_type` is not supported when using `torch.float16` or `torch.bfloat16` as the datatype. </Tip> Now, perform inference: ```python device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") prompt = "Hey, are you conscious? Can you talk to me?" inputs = tokenizer(prompt, return_tensors="pt") inputs = {k: v.to(device) for k, v in inputs.items()} with torch.no_grad(): generate_ids = model.generate(**inputs, max_length=30) outputs = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] print(outputs) ``` ## Load adapters Adapters can be loaded onto a pretrained model with [`~PeftModel.load_adapter`], which is useful for trying out different adapters whose weights aren't merged. Set the active adapter weights with the [`~LoraModel.set_adapter`] function. ```py from transformers import AutoModelForCausalLM from peft import PeftModel base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") peft_model_id = "alignment-handbook/zephyr-7b-sft-lora" model = PeftModel.from_pretrained(base_model, peft_model_id) # load different adapter model.load_adapter("alignment-handbook/zephyr-7b-dpo-lora", adapter_name="dpo") # set adapter as active model.set_adapter("dpo") ``` To return the base model, you could use [`~LoraModel.unload`] to unload all of the LoRA modules or [`~LoraModel.delete_adapter`] to delete the adapter entirely. ```py # unload adapter model.unload() # delete adapter model.delete_adapter("dpo") ``` ## Inference with different LoRA adapters in the same batch Normally, each inference batch has to use the same adapter(s) in PEFT. This can sometimes be annoying, because we may have batches that contain samples intended to be used with different LoRA adapters. For example, we could have a base model that works well in English and two more LoRA adapters, one for French and one for German. Usually, we would have to split our batches such that each batch only contains samples of one of the languages, we cannot combine different languages in the same batch. Thankfully, it is possible to mix different LoRA adapters in the same batch using the `adapter_name` argument. Below, we show an example of how this works in practice. First, let's load the base model, English, and the two adapters, French and German, like this: ```python from transformers import AutoTokenizer, AutoModelForCausalLM from peft import PeftModel model_id = ... tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) # load the LoRA adapter for French peft_model = PeftModel.from_pretrained(model, <path>, adapter_name="adapter_fr") # next, load the LoRA adapter for German peft_model.load_adapter(<path>, adapter_name="adapter_de") ``` Now, we want to generate text on a sample that contains all three languages: The first three samples are in English, the next three are in French, and the last three are in German. We can use the `adapter_names` argument to specify which adapter to use for each sample. Since our base model is used for English, we use the special string `"__base__"` for these samples. For the next three samples, we indicate the adapter name of the French LoRA fine-tune, in this case `"adapter_fr"`. For the last three samples, we indicate the adapter name of the German LoRA fine-tune, in this case `"adapter_de"`. This way, we can use the base model and the two adapters in a single batch. ```python inputs = tokenizer( [ "Hello, my dog is cute", "Hello, my cat is awesome", "Hello, my fish is great", "Salut, mon chien est mignon", "Salut, mon chat est génial", "Salut, mon poisson est super", "Hallo, mein Hund ist süß", "Hallo, meine Katze ist toll", "Hallo, mein Fisch ist großartig", ], return_tensors="pt", padding=True, ) adapter_names = [ "__base__", "__base__", "__base__", "adapter_fr", "adapter_fr", "adapter_fr", "adapter_de", "adapter_de", "adapter_de", ] output = peft_model.generate(**inputs, adapter_names=adapter_names, max_new_tokens=20) ``` Note that the order does not matter here, i.e. the samples in the batch don't need to be grouped by adapter as in the example above. We just need to ensure that the `adapter_names` argument is aligned correctly with the samples. Additionally, the same approach also works with the `modules_to_save` feature, which allows for saving and reusing specific neural network layers, such as custom heads for classification tasks, across different LoRA adapters. ### Caveats Using this feature has some drawbacks, namely: - It only works for inference, not for training. - Disabling adapters using the `with model.disable_adapter()` context takes precedence over `adapter_names`. - You cannot pass `adapter_names` when some adapter weights were merged with base weight using the `merge_adapter` method. Please unmerge all adapters first by calling `model.unmerge_adapter()`. - For obvious reasons, this cannot be used after calling `merge_and_unload()`, since all the LoRA adapters will be merged into the base weights in this case. - This feature does not currently work with DoRA, so set `use_dora=False` in your `LoraConfig` if you want to use it. - The `modules_to_save` feature is currently only supported for the layers of types `Linear`, `Embedding`, `Conv2d` and `Conv1d`. - There is an expected overhead for inference with `adapter_names`, especially if the amount of different adapters in the batch is high. This is because the batch size is effectively reduced to the number of samples per adapter. If runtime performance is your top priority, try the following: - Increase the batch size. - Try to avoid having a large number of different adapters in the same batch, prefer homogeneous batches. This can be achieved by buffering samples with the same adapter and only perform inference with a small handful of different adapters. - Take a look at alternative implementations such as [LoRAX](https://github.com/predibase/lorax), [punica](https://github.com/punica-ai/punica), or [S-LoRA](https://github.com/S-LoRA/S-LoRA), which are specialized to work with a large number of different adapters.
peft/docs/source/developer_guides/lora.md/0
{ "file_path": "peft/docs/source/developer_guides/lora.md", "repo_id": "peft", "token_count": 9665 }
223
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Models [`PeftModel`] is the base model class for specifying the base Transformer model and configuration to apply a PEFT method to. The base `PeftModel` contains methods for loading and saving models from the Hub. ## PeftModel [[autodoc]] PeftModel - all ## PeftModelForSequenceClassification A `PeftModel` for sequence classification tasks. [[autodoc]] PeftModelForSequenceClassification - all ## PeftModelForTokenClassification A `PeftModel` for token classification tasks. [[autodoc]] PeftModelForTokenClassification - all ## PeftModelForCausalLM A `PeftModel` for causal language modeling. [[autodoc]] PeftModelForCausalLM - all ## PeftModelForSeq2SeqLM A `PeftModel` for sequence-to-sequence language modeling. [[autodoc]] PeftModelForSeq2SeqLM - all ## PeftModelForQuestionAnswering A `PeftModel` for question answering. [[autodoc]] PeftModelForQuestionAnswering - all ## PeftModelForFeatureExtraction A `PeftModel` for getting extracting features/embeddings from transformer models. [[autodoc]] PeftModelForFeatureExtraction - all ## PeftMixedModel A `PeftModel` for mixing different adapter types (e.g. LoRA and LoHa). [[autodoc]] PeftMixedModel - all ## Utilities [[autodoc]] utils.cast_mixed_precision_params [[autodoc]] get_peft_model [[autodoc]] inject_adapter_in_model [[autodoc]] utils.get_peft_model_state_dict [[autodoc]] utils.prepare_model_for_kbit_training [[autodoc]] get_layer_status [[autodoc]] get_model_status
peft/docs/source/package_reference/peft_model.md/0
{ "file_path": "peft/docs/source/package_reference/peft_model.md", "repo_id": "peft", "token_count": 564 }
224
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Prompt-based methods A prompt can describe a task or provide an example of a task you want the model to learn. Instead of manually creating these prompts, soft prompting methods add learnable parameters to the input embeddings that can be optimized for a specific task while keeping the pretrained model's parameters frozen. This makes it both faster and easier to finetune large language models (LLMs) for new downstream tasks. The PEFT library supports several types of prompting methods (p-tuning, prefix tuning, prompt tuning) and you can learn more about how these methods work conceptually in the [Soft prompts](../conceptual_guides/prompting) guide. If you're interested in applying these methods to other tasks and use cases, take a look at our [notebook collection](https://huggingface.co/spaces/PEFT/soft-prompting)! This guide will show you how to train a causal language model - with a soft prompting method - to *generate a classification* for whether a tweet is a complaint or not. <Tip> Some familiarity with the general process of training a causal language model would be really helpful and allow you to focus on the soft prompting methods. If you're new, we recommend taking a look at the [Causal language modeling](https://huggingface.co/docs/transformers/tasks/language_modeling) guide first from the Transformers documentation. When you're ready, come back and see how easy it is to drop PEFT in to your training! </Tip> Before you begin, make sure you have all the necessary libraries installed. ```bash pip install -q peft transformers datasets ``` ## Dataset For this guide, you'll use the `twitter_complaints` subset of the [RAFT](https://huggingface.co/datasets/ought/raft) dataset. The `twitter_complaints` subset contains tweets labeled as `complaint` and `no complaint` and you can check out the [dataset viewer](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints) for a better idea of what the data looks like. Use the [`~datasets.load_dataset`] function to load the dataset and create a new `text_label` column so it is easier to understand what the `Label` values, `1` and `2` mean. ```py from datasets import load_dataset ds = load_dataset( "parquet", data_files={ "train": "hf://datasets/ought/raft@refs/convert/parquet/twitter_complaints/train/0000.parquet", "test": "hf://datasets/ought/raft@refs/convert/parquet/twitter_complaints/test/0000.parquet" } ) classes = [k.replace("_", " ") for k in ds["train"].features["Label"].names] ds = ds.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) ds["train"][0] {"Tweet text": "@HMRCcustomers No this is my first job", "ID": 0, "Label": 2, "text_label": "no complaint"} ``` Load a tokenizer, define the padding token to use, and determine the maximum length of the tokenized label. ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) print(target_max_length) ``` Create a preprocessing function that tokenizes the tweet text and labels, pad the inputs and labels in each batch, create an attention mask, and truncate sequences to the `max_length`. Then convert the `input_ids`, `attention_mask`, and `labels` to PyTorch tensors. ```py import torch max_length = 64 def preprocess_function(examples, text_column="Tweet text", label_column="text_label"): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] targets = [str(x) for x in examples[label_column]] model_inputs = tokenizer(inputs) labels = tokenizer(targets) classes = [k.replace("_", " ") for k in ds["train"].features["Label"].names] for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] labels["input_ids"][i] = [-100] * (max_length - len(label_input_ids)) + label_input_ids model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length]) model_inputs["labels"] = labels["input_ids"] return model_inputs ``` Apply the preprocessing function to the entire dataset with the [`~datasets.Dataset.map`] function, and remove the unprocessed columns because the model won't need them. ```py processed_ds = ds.map( preprocess_function, batched=True, num_proc=1, remove_columns=ds["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) ``` Finally, create a training and evaluation [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). You can set `pin_memory=True` to speed up the data transfer to the GPU during training if the samples in your dataset are on a CPU. ```py from torch.utils.data import DataLoader from transformers import default_data_collator train_ds = processed_ds["train"] eval_ds = processed_ds["test"] batch_size = 16 train_dataloader = DataLoader(train_ds, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) eval_dataloader = DataLoader(eval_ds, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) ``` ## Model Now let's load a pretrained model to use as the base model for the soft prompt method. This guide uses the [bigscience/bloomz-560m](https://huggingface.co/bigscience/bloomz-560m) model, but you can use any causal language model you want. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") ``` ### PEFT configuration and model For any PEFT method, you'll need to create a configuration which contains all the parameters that specify how the PEFT method should be applied. Once the configuration is setup, pass it to the [`~peft.get_peft_model`] function along with the base model to create a trainable [`PeftModel`]. <Tip> Call the [`~PeftModel.print_trainable_parameters`] method to compare the number of trainable parameters of [`PeftModel`] versus the number of parameters in the base model! </Tip> <hfoptions id="configurations"> <hfoption id="p-tuning"> [P-tuning](../conceptual_guides/prompting#p-tuning) adds a trainable embedding tensor where the prompt tokens can be added anywhere in the input sequence. Create a [`PromptEncoderConfig`] with the task type, the number of virtual tokens to add and learn, and the hidden size of the encoder for learning the prompt parameters. ```py from peft import PromptEncoderConfig, get_peft_model peft_config = PromptEncoderConfig(task_type="CAUSAL_LM", num_virtual_tokens=20, encoder_hidden_size=128) model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 300,288 || all params: 559,514,880 || trainable%: 0.05366935013417338" ``` </hfoption> <hfoption id="prefix tuning"> [Prefix tuning](../conceptual_guides/prompting#prefix-tuning) adds task-specific parameters in all of the model layers, which are optimized by a separate feed-forward network. Create a [`PrefixTuningConfig`] with the task type and number of virtual tokens to add and learn. ```py from peft import PrefixTuningConfig, get_peft_model peft_config = PrefixTuningConfig(task_type="CAUSAL_LM", num_virtual_tokens=20) model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 983,040 || all params: 560,197,632 || trainable%: 0.1754809274167014" ``` </hfoption> <hfoption id="prompt tuning"> [Prompt tuning](../conceptual_guides/prompting#prompt-tuning) formulates all tasks as a *generation* task and it adds a task-specific prompt to the input which is updated independently. The `prompt_tuning_init_text` parameter specifies how to finetune the model (in this case, it is classifying whether tweets are complaints or not). For the best results, the `prompt_tuning_init_text` should have the same number of tokens that should be predicted. To do this, you can set `num_virtual_tokens` to the number of tokens of the `prompt_tuning_init_text`. Create a [`PromptTuningConfig`] with the task type, the initial prompt tuning text to train the model with, the number of virtual tokens to add and learn, and a tokenizer. ```py from peft import PromptTuningConfig, PromptTuningInit, get_peft_model prompt_tuning_init_text = "Classify if the tweet is a complaint or no complaint.\n" peft_config = PromptTuningConfig( task_type="CAUSAL_LM", prompt_tuning_init=PromptTuningInit.TEXT, num_virtual_tokens=len(tokenizer(prompt_tuning_init_text)["input_ids"]), prompt_tuning_init_text=prompt_tuning_init_text, tokenizer_name_or_path="bigscience/bloomz-560m", ) model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 8,192 || all params: 559,222,784 || trainable%: 0.0014648902430985358" ``` </hfoption> </hfoptions> ### Training Set up an optimizer and learning rate scheduler. ```py from transformers import get_linear_schedule_with_warmup lr = 3e-2 num_epochs = 50 optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) ``` Move the model to the GPU and create a training loop that reports the loss and perplexity for each epoch. ```py from tqdm import tqdm device = "cuda" model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") ``` ## Share your model Once training is complete, you can upload your model to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method. You'll need to login to your Hugging Face account first and enter your token when prompted. ```py from huggingface_hub import notebook_login account = <your-hf-account-name> peft_model_id = f"{account}/bloomz-560-m-peft-method" model.push_to_hub(peft_model_id) ``` If you check the model file size in the repository, you’ll see that it is a lot smaller than a full sized model! <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/> <figcaption class="text-center">For example, the adapter weights for a opt-350m model stored on the Hub are only ~6MB compared to the full model size which can be ~700MB.</figcaption> </div> ## Inference Let's load the model for inference and test it out on a tweet! ```py from peft import AutoPeftModelForCausalLM model = AutoPeftModelForCausalLM.from_pretrained("peft_model_id").to("cuda") tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") i = 15 inputs = tokenizer(f'{text_column} : {ds["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(ds["test"][i]["Tweet text"]) "@NYTsupport i have complained a dozen times &amp; yet my papers are still thrown FAR from my door. Why is this so hard to resolve?" ``` Call the [`~transformers.GenerationMixin.generate`] method to generate the predicted classification label. ```py with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)) "['Tweet text : @NYTsupport i have complained a dozen times &amp; yet my papers are still thrown FAR from my door. Why is this so hard to resolve? Label : complaint']" ```
peft/docs/source/task_guides/prompt_based_methods.md/0
{ "file_path": "peft/docs/source/task_guides/prompt_based_methods.md", "repo_id": "peft", "token_count": 4708 }
225
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Callable, Optional, Union import numpy as np import PIL.Image import torch from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline from diffusers.utils import BaseOutput, logging from torch.nn import functional as F from utils.light_controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class LightControlNetPipelineOutput(BaseOutput): """ Output class for Stable Diffusion pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. nsfw_content_detected (`List[bool]`) List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, or `None` if safety checking could not be performed. """ images: Union[list[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[list[bool]] class LightControlNetPipeline(StableDiffusionControlNetPipeline): _optional_components = ["safety_checker", "feature_extractor"] def check_inputs( self, prompt, image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, controlnet_conditioning_scale=1.0, ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # `prompt` needs more sophisticated handling when there are multiple # conditionings. if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning( f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" " prompts. The conditionings will be fixed across the prompts." ) # Check `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): self.check_image(image, prompt, prompt_embeds) elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if not isinstance(image, list): raise TypeError("For multiple controlnets: `image` must be type `list`") # When `image` is a nested list: # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) elif any(isinstance(i, list) for i in image): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif len(image) != len(self.controlnet.nets): raise ValueError( "For multiple controlnets: `image` must have the same length as the number of controlnets." ) for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False @torch.no_grad() def __call__( self, prompt: Union[str, list[str]] = None, image: Union[ torch.FloatTensor, PIL.Image.Image, np.ndarray, list[torch.FloatTensor], list[PIL.Image.Image], list[np.ndarray], ] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, list[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, list[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, list[float]] = 1.0, guess_mode: bool = False, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in init, images must be passed as a list such that each element of the list can be correctly batched for input to a single controlnet. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original unet. If multiple ControlNets are specified in init, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): In this mode, the ControlNet encoder will try best to recognize the content of the input image even if you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, image, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, controlnet_conditioning_scale, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 controlnet = self.controlnet._orig_mod if hasattr(self.controlnet, "_orig_mod") else self.controlnet if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 4. Prepare image if isinstance(controlnet, ControlNetModel): image = self.prepare_image( image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) height, width = image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): images = [] for image_ in image: image_ = self.prepare_image( image=image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) images.append(image_) image = images height, width = image[0].shape[-2:] else: assert False # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # controlnet(s) inference if guess_mode and do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) else: control_model_input = latent_model_input # Get the guided hint for the UNet (320 dim) guided_hint = self.controlnet( controlnet_cond=image, ) # Predict the noise residual noise_pred = self.unet( latent_model_input, t, guided_hint=guided_hint, encoder_hidden_states=prompt_embeds, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) # If we do sequential model offloading, let's offload unet and controlnet # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") self.controlnet.to("cpu") if torch.cuda.is_available(): torch.cuda.empty_cache() elif torch.xpu.is_available(): torch.xpu.empty_cache() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return LightControlNetPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
peft/examples/boft_controlnet/utils/pipeline_controlnet.py/0
{ "file_path": "peft/examples/boft_controlnet/utils/pipeline_controlnet.py", "repo_id": "peft", "token_count": 10001 }
226
compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true use_cpu: false
peft/examples/causal_language_modeling/accelerate_ds_zero3_cpu_offload_config.yaml/0
{ "file_path": "peft/examples/causal_language_modeling/accelerate_ds_zero3_cpu_offload_config.yaml", "repo_id": "peft", "token_count": 198 }
227
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from transformers import AutoTokenizer class TokenizerMetaMath: PROMPT_NO_INPUT = ( "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n" "### Instruction:\n{query}\n\n### Response: " ) PROMPT = ( "Below is an instruction that describes a task, paired with an input that provides further context. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{query}\n\n### Input:\n{input}\n\n### Response: " ) def format_prompt(self, query): query = query.split("\n", 1) if len(query) == 1 or query[1].strip("\n") == "": return self.PROMPT_NO_INPUT.format(query=query[0]) else: return self.PROMPT.format(query=query[0], input=query[1]) def __init__(self, tokenizer_path): self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) def __call__(self, examples): prompts = [self.format_prompt(text) for text in examples["query"]] completions = examples["response"] return self._tokenize_fn(prompts, completions) def _tokenize_fn(self, prompts, completions): prompt_tokens = self.tokenizer(prompts, add_special_tokens=False)["input_ids"] input_tokens = self.tokenizer([x + y for x, y in zip(prompts, completions)], add_special_tokens=False)[ "input_ids" ] input_tokens = [[self.tokenizer.bos_token_id] + x + [self.tokenizer.eos_token_id] for x in input_tokens] prompt_length = [len(x) + 1 for x in prompt_tokens] # +1 for the bos token input_length = [len(x) for x in input_tokens] return {"input_ids": input_tokens, "prompt_length": prompt_length, "input_length": input_length} class DataCollator: def __init__(self, eos_token_id, max_length=None): self.eos_token_id = eos_token_id self.max_length = max_length def __call__(self, batch): batch = {k: [item[k] for item in batch] for k in batch[0]} input_lengths = torch.stack(batch["input_length"]) prompt_lengths = torch.stack(batch["prompt_length"]) input_ids = torch.nn.utils.rnn.pad_sequence( batch["input_ids"], batch_first=True, padding_value=self.eos_token_id ) col_indices = torch.arange(input_ids.size(1)).unsqueeze(0) attention_mask = col_indices < input_lengths.unsqueeze(1) label_mask = torch.logical_or(col_indices < prompt_lengths.unsqueeze(1), ~attention_mask) labels = input_ids.masked_fill(label_mask, -100) if self.max_length is not None: input_ids = input_ids[:, : self.max_length] attention_mask = attention_mask[:, : self.max_length] labels = labels[:, : self.max_length] return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels}
peft/examples/eva_finetuning/utils.py/0
{ "file_path": "peft/examples/eva_finetuning/utils.py", "repo_id": "peft", "token_count": 1382 }
228
# Fine-tuning for image classification using LoRA and 🤗 PEFT ## Vision Transformer model from transformers [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_peft_lora.ipynb) We provide a notebook (`image_classification_peft_lora.ipynb`) where we learn how to use [LoRA](https://huggingface.co/papers/2106.09685) from 🤗 PEFT to fine-tune an image classification model by ONLY using **0.7%** of the original trainable parameters of the model. LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://huggingface.co/papers/2106.09685). ## PoolFormer model from timm [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_timm_peft_lora.ipynb) The notebook `image_classification_timm_peft_lora.ipynb` showcases fine-tuning an image classification model using from the [timm](https://huggingface.co/docs/timm/index) library. Again, LoRA is used to reduce the numberof trainable parameters to a fraction of the total.
peft/examples/image_classification/README.md/0
{ "file_path": "peft/examples/image_classification/README.md", "repo_id": "peft", "token_count": 459 }
229
import argparse import os from collections import Counter from dataclasses import dataclass from typing import Optional import safetensors import torch from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoraConfig, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict # Default kohya_ss LoRA replacement modules # https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L661 UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"] UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"] TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"] LORA_PREFIX_UNET = "lora_unet" LORA_PREFIX_TEXT_ENCODER = "lora_te" @dataclass class LoRAInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lora_A: Optional[torch.Tensor] = None lora_B: Optional[torch.Tensor] = None def peft_state_dict(self) -> dict[str, torch.Tensor]: if self.lora_A is None or self.lora_B is None: raise ValueError("At least one of lora_A or lora_B is None, they must both be provided") return {f"{peft_key}.lora_A.weight": self.lora_A, f"{peft_key}.lora_B.weight": self.lora_A} def construct_peft_loraconfig(info: dict[str, LoRAInfo]) -> LoraConfig: """Constructs LoraConfig from data extracted from kohya checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from kohya checkpoint Returns: LoraConfig: config for constructing LoRA """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = list(info.keys()) # Determine most common rank and alpha r = Counter(ranks.values()).most_common(1)[0] lora_alpha = Counter(alphas.values()).most_common(1)[0] # Determine which modules have different rank and alpha rank_pattern = dict(filter(lambda x: x[1] != r, ranks.items())) alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, alphas.items())) config = LoraConfig( r=r, lora_alpha=lora_alpha, target_modules=target_modules, lora_dropout=0.0, bias="none", init_lora_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, ) return config def combine_peft_state_dict(info: dict[str, LoRAInfo]) -> dict[str, torch.Tensor]: result = {} for key_name, key_info in info.items(): result[f"base_model.model.{key_name}.lora_A.weight"] = key_info.lora_A result[f"base_model.model.{key_name}.lora_B.weight"] = key_info.lora_B return result if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--sd_checkpoint", default=None, type=str, required=True, help="SD checkpoint to use") parser.add_argument( "--kohya_lora_path", default=None, type=str, required=True, help="Path to kohya_ss trained LoRA" ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") args = parser.parse_args() # Load all models that we need to add adapter to text_encoder = CLIPTextModel.from_pretrained(args.sd_checkpoint, subfolder="text_encoder") unet = UNet2DConditionModel.from_pretrained(args.sd_checkpoint, subfolder="unet") # Construct possible mapping from kohya keys to peft keys models_keys = {} for model, model_key, model_name in [ (text_encoder, LORA_PREFIX_TEXT_ENCODER, "text_encoder"), (unet, LORA_PREFIX_UNET, "unet"), ]: models_keys.update( { f"{model_key}.{peft_key}".replace(".", "_"): peft_key for peft_key in (x[0] for x in model.named_modules()) } ) # Store conversion info (model_type -> peft_key -> LoRAInfo) lora_info: dict[str, dict[str, LoRAInfo]] = { "text_encoder": {}, "unet": {}, } # Open kohya_ss checkpoint with safetensors.safe_open(args.kohya_lora_path, framework="pt", device="cpu") as f: # Extract information about LoRA structure metadata = f.metadata() # Iterate through available info and unpack all the values for key in f.keys(): kohya_key, kohya_type = key.split(".")[:2] # Find which model this key belongs to if kohya_key.startswith(LORA_PREFIX_TEXT_ENCODER): model_type = "text_encoder" elif kohya_key.startswith(LORA_PREFIX_UNET): model_type = "unet" else: raise ValueError(f"Cannot determine model for key: {key}") # Find corresponding peft key if kohya_key not in models_keys: raise ValueError(f"Cannot find corresponding key for diffusers/transformers model: {kohya_key}") peft_key = models_keys[kohya_key] if peft_key not in lora_info[model_type]: lora_info[model_type][peft_key] = LoRAInfo(kohya_key=kohya_key, peft_key=peft_key) if kohya_type == "alpha": lora_info[model_type][peft_key].alpha = f.get_tensor(key).item() elif kohya_type == "lora_down": tensor = f.get_tensor(key) lora_info[model_type][peft_key].lora_A = tensor lora_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lora_up": tensor = f.get_tensor(key) lora_info[model_type][peft_key].lora_B = f.get_tensor(key) lora_info[model_type][peft_key].rank = tensor.shape[1] else: raise ValueError(f"Unknown weight name in key: {key} - {kohya_type}") # Process each model for model, model_name in [(text_encoder, "text_encoder"), (unet, "unet")]: config = construct_peft_loraconfig(lora_info[model_name]) model = get_peft_model(model, config) keys_peft = list(get_peft_model_state_dict(model).keys()) keys_new = list(combine_peft_state_dict(lora_info[model_name]).keys()) set_peft_model_state_dict(model, combine_peft_state_dict(lora_info[model_name])) if args.half: model.to(torch.float16) # Save model to disk model.save_pretrained(os.path.join(args.dump_path, model_name))
peft/examples/lora_dreambooth/convert_kohya_ss_sd_lora_to_peft.py/0
{ "file_path": "peft/examples/lora_dreambooth/convert_kohya_ss_sd_lora_to_peft.py", "repo_id": "peft", "token_count": 2938 }
230
# OLoRA: Orthonormal Low Rank Adaptation of Large Language Models ## Introduction [OLoRA](https://huggingface.co/papers/2406.01775) is a novel approach that leverages orthonormal low rank adaptation through QR decomposition. Unlike the default LoRA implementation, OLoRA decomposes original weights into their $\mathbf{Q}$ and $\mathbf{R}$ parts, and then uses the first `rank` rows of $\mathbf{R}$ and the first `rank` columns of $\mathbf{Q}$ to initialize $\mathbf{A}$ and $\mathbf{B}$, respectively. This results in significantly faster convergence, more stable training, and superior performance. ## Quick start ```python import torch from peft import LoraConfig, get_peft_model from transformers import AutoTokenizer, AutoModelForCausalLM from trl import SFTConfig, SFTTrainer from datasets import load_dataset model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.bfloat16, device_map="auto") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") dataset = load_dataset("imdb", split="train[:1%]") lora_config = LoraConfig( init_lora_weights="olora" ) peft_model = get_peft_model(model, lora_config) training_args = SFTConfig(dataset_text_field="text", max_seq_length=128) trainer = SFTTrainer( model=peft_model, train_dataset=dataset, processing_class=tokenizer, ) trainer.train() peft_model.save_pretrained("olora-opt-350m") ``` There is no additional change needed to your standard LoRA procedure, except for specifying `init_lora_weights = "olora"` option in your lora configuration. Additionally you can refer to olora finetuning script. Run the script simply by running: ```bash python3 examples/olora_finetuning/olora_finetuning.py --base_model facebook/opt-350m ``` OLoRA also supports quantization. To use 4-bit quantization try: ```bash python3 examples/olora_finetuning/olora_finetuning.py --base_model facebook/opt-350m --quantize ``` or you can just pass a quantized model without the quantize flag. If you want to run DDP by [accelerate](https://huggingface.co/docs/accelerate/en/index), please run `accelerate config` to set your ddp config, and run: ```bash accelerate launch examples/olora_finetuning/olora_finetuning.py --base_model facebook/opt-350m ``` please add `--device_map cpu` if you want to run finetune on CPU. If you want to train a quantized model like AWQ and GPTQ which do not support olora init method, please pass `--init_lora_weights gaussian`. For example: ```bash python3 examples/olora_finetuning/olora_finetuning.py --base_model hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4 --init_lora_weights gaussian ``` ## Use the model You can load and use the model as any other 🤗 PEFT model ```python from peft import PeftModel model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") olora_model = PeftModel.from_pretrained(model, "olora-opt-350m") ``` ## OLoRA and LoRA OLoRA differs from LoRA in that it mutates the original weights. To utilize multiple adapters simultaneously, you can leverage the `path_initial_model_for_weight_conversion` option. Below is a simple template illustrating how to convert OLoRA to conventional LoRA: ```python base_model = AutoModel.from_pretrained("facebook/opt-350m") olora_config = LoraConfig( ... init_lora_weights = "olora" # Initialize the model with OLoRA ) olora_model = get_peft_model(base_model, olora_config) init_path = <path-to-untrained-olora-model> olora_model.save_pretrained(init_path) # Save the model *before* performing any training # Train the model train(olora_model) # Your training loop #Save the model after training olora_model.save_pretrained(output_dir, path_initial_model_for_weight_conversion=init_path) ``` After completing training, you can save and convert your OLoRA model to a conventional LoRA model by setting `path_initial_model_for_weight_conversion` to `init_path`, that is the path of your untrained OLoRA model. This conversion enables you to use multiple adapters with your LoRA model. Note that this conversion is not supported if `rslora` is used in combination with `rank_pattern` or `alpha_pattern`. ## Citation ``` @misc{büyükakyüz2024olora, title={OLoRA: Orthonormal Low-Rank Adaptation of Large Language Models}, author={Kerim Büyükakyüz}, year={2024}, eprint={2406.01775}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
peft/examples/olora_finetuning/README.md/0
{ "file_path": "peft/examples/olora_finetuning/README.md", "repo_id": "peft", "token_count": 1445 }
231
import argparse import json import logging import os from collections import Counter from dataclasses import dataclass from operator import attrgetter from typing import Optional, Union import safetensors import torch import torch.nn as nn from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoHaConfig, LoKrConfig, LoraConfig, PeftType, get_peft_model, set_peft_model_state_dict from peft.tuners.lokr.layer import factorization # Default kohya_ss LoRA replacement modules # https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L661 UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"] UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"] TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"] PREFIX_UNET = "lora_unet" PREFIX_TEXT_ENCODER = "lora_te" @dataclass class LoRAInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lora_A: Optional[torch.Tensor] = None lora_B: Optional[torch.Tensor] = None def peft_state_dict(self) -> dict[str, torch.Tensor]: if self.lora_A is None or self.lora_B is None: raise ValueError("At least one of lora_A or lora_B is None, they must both be provided") return { f"base_model.model.{self.peft_key}.lora_A.weight": self.lora_A, f"base_model.model.{self.peft_key}.lora_B.weight": self.lora_B, } @dataclass class LoHaInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None hada_w1_a: Optional[torch.Tensor] = None hada_w1_b: Optional[torch.Tensor] = None hada_w2_a: Optional[torch.Tensor] = None hada_w2_b: Optional[torch.Tensor] = None hada_t1: Optional[torch.Tensor] = None hada_t2: Optional[torch.Tensor] = None def peft_state_dict(self) -> dict[str, torch.Tensor]: if self.hada_w1_a is None or self.hada_w1_b is None or self.hada_w2_a is None or self.hada_w2_b is None: raise ValueError( "At least one of hada_w1_a, hada_w1_b, hada_w2_a, hada_w2_b is missing, they all must be provided" ) state_dict = { f"base_model.model.{self.peft_key}.hada_w1_a": self.hada_w1_a, f"base_model.model.{self.peft_key}.hada_w1_b": self.hada_w1_b, f"base_model.model.{self.peft_key}.hada_w2_a": self.hada_w2_a, f"base_model.model.{self.peft_key}.hada_w2_b": self.hada_w2_b, } if not ( (self.hada_t1 is None and self.hada_t2 is None) or (self.hada_t1 is not None and self.hada_t2 is not None) ): raise ValueError("hada_t1 and hada_t2 must be either both present or not present at the same time") if self.hada_t1 is not None and self.hada_t2 is not None: state_dict[f"base_model.model.{self.peft_key}.hada_t1"] = self.hada_t1 state_dict[f"base_model.model.{self.peft_key}.hada_t2"] = self.hada_t2 return state_dict @dataclass class LoKrInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lokr_w1: Optional[torch.Tensor] = None lokr_w1_a: Optional[torch.Tensor] = None lokr_w1_b: Optional[torch.Tensor] = None lokr_w2: Optional[torch.Tensor] = None lokr_w2_a: Optional[torch.Tensor] = None lokr_w2_b: Optional[torch.Tensor] = None lokr_t2: Optional[torch.Tensor] = None def peft_state_dict(self) -> dict[str, torch.Tensor]: if (self.lokr_w1 is None) and ((self.lokr_w1_a is None) or (self.lokr_w1_b is None)): raise ValueError("Either lokr_w1 or both lokr_w1_a and lokr_w1_b should be provided") if (self.lokr_w2 is None) and ((self.lokr_w2_a is None) or (self.lokr_w2_b is None)): raise ValueError("Either lokr_w2 or both lokr_w2_a and lokr_w2_b should be provided") state_dict = {} if self.lokr_w1 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w1"] = self.lokr_w1 elif self.lokr_w1_a is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w1_a"] = self.lokr_w1_a state_dict[f"base_model.model.{self.peft_key}.lokr_w1_b"] = self.lokr_w1_b if self.lokr_w2 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w2"] = self.lokr_w2 elif self.lokr_w2_a is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w2_a"] = self.lokr_w2_a state_dict[f"base_model.model.{self.peft_key}.lokr_w2_b"] = self.lokr_w2_b if self.lokr_t2 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_t2"] = self.lokr_t2 return state_dict def construct_peft_loraconfig(info: dict[str, LoRAInfo], **kwargs) -> LoraConfig: """Constructs LoraConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from adapter checkpoint Returns: LoraConfig: config for constructing LoRA """ # Unpack all ranks and alphas ranks = {key: val.rank for key, val in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) lora_alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != lora_alpha, alphas.items()), key=lambda x: x[0])) config = LoraConfig( r=r, lora_alpha=lora_alpha, target_modules=target_modules, lora_dropout=0.0, bias="none", init_lora_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, ) return config def construct_peft_lohaconfig(info: dict[str, LoHaInfo], **kwargs) -> LoHaConfig: """Constructs LoHaConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoHaInfo]): Information extracted from adapter checkpoint Returns: LoHaConfig: config for constructing LoHA """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != alpha, alphas.items()), key=lambda x: x[0])) # Determine whether any of modules have effective conv2d decomposition use_effective_conv2d = any((val.hada_t1 is not None) or (val.hada_t2 is not None) for val in info.values()) config = LoHaConfig( r=r, alpha=alpha, target_modules=target_modules, rank_dropout=0.0, module_dropout=0.0, init_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, use_effective_conv2d=use_effective_conv2d, ) return config def construct_peft_lokrconfig(info: dict[str, LoKrInfo], decompose_factor: int = -1, **kwargs) -> LoKrConfig: """Constructs LoKrConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoKrInfo]): Information extracted from adapter checkpoint Returns: LoKrConfig: config for constructing LoKr """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != alpha, alphas.items()), key=lambda x: x[0])) # Determine whether any of modules have effective conv2d decomposition use_effective_conv2d = any((val.lokr_t2 is not None) for val in info.values()) # decompose_both should be enabled if any w1 matrix in any layer is decomposed into 2 decompose_both = any((val.lokr_w1_a is not None and val.lokr_w1_b is not None) for val in info.values()) # Determining decompose factor is a bit tricky (but it is most often -1) # Check that decompose_factor is equal to provided for val in info.values(): # Determine shape of first matrix if val.lokr_w1 is not None: w1_shape = tuple(val.lokr_w1.shape) else: w1_shape = (val.lokr_w1_a.shape[0], val.lokr_w1_b.shape[1]) # Determine shape of second matrix if val.lokr_w2 is not None: w2_shape = tuple(val.lokr_w2.shape[:2]) elif val.lokr_t2 is not None: w2_shape = (val.lokr_w2_a.shape[1], val.lokr_w2_b.shape[1]) else: # We may iterate over Conv2d layer, for which second item in shape is multiplied by ksize^2 w2_shape = (val.lokr_w2_a.shape[0], val.lokr_w2_b.shape[1]) # We need to check, whether decompose_factor is really -1 or not shape = (w1_shape[0], w2_shape[0]) if factorization(shape[0] * shape[1], factor=-1) != shape: raise ValueError("Cannot infer decompose_factor, probably it is not equal to -1") config = LoKrConfig( r=r, alpha=alpha, target_modules=target_modules, rank_dropout=0.0, module_dropout=0.0, init_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, use_effective_conv2d=use_effective_conv2d, decompose_both=decompose_both, decompose_factor=decompose_factor, ) return config def combine_peft_state_dict(info: dict[str, Union[LoRAInfo, LoHaInfo]]) -> dict[str, torch.Tensor]: result = {} for key_info in info.values(): result.update(key_info.peft_state_dict()) return result def detect_adapter_type(keys: list[str]) -> PeftType: # Detect type of adapter by keys # Inspired by this: # https://github.com/bmaltais/kohya_ss/blob/ed4e3b0239a40506de9a17e550e6cf2d0b867a4f/tools/lycoris_utils.py#L312 for key in keys: if "alpha" in key: continue elif any(x in key for x in ["lora_down", "lora_up"]): # LoRA return PeftType.LORA elif any(x in key for x in ["hada_w1", "hada_w2", "hada_t1", "hada_t2"]): # LoHa may have the following keys: # hada_w1_a, hada_w1_b, hada_w2_a, hada_w2_b, hada_t1, hada_t2 return PeftType.LOHA elif any(x in key for x in ["lokr_w1", "lokr_w2", "lokr_t1", "lokr_t2"]): # LoKr may have the following keys: # lokr_w1, lokr_w2, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t1, lokr_t2 return PeftType.LOKR elif "diff" in key: raise ValueError("Currently full diff adapters are not implemented") else: raise ValueError("Unknown adapter type, probably not implemented") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--sd_checkpoint", default=None, type=str, required=True, help="SD checkpoint to use") parser.add_argument( "--adapter_path", default=None, type=str, required=True, help="Path to downloaded adapter to convert", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output peft adapter.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--loha_conv2d_weights_fix", action="store_true", help="""LoHa checkpoints trained with lycoris-lora<=1.9.0 contain a bug described in this PR https://github.com/KohakuBlueleaf/LyCORIS/pull/115. This option fixes this bug during weight conversion (replaces hada_t2 with hada_t1 for Conv2d 3x3 layers). The output results may differ from webui, but in general, they should be better in terms of quality. This option should be set to True in case the provided checkpoint has been trained with lycoris-lora version for which the mentioned PR wasn't merged. This option should be set to False in case the provided checkpoint has been trained with lycoris-lora version for which the mentioned PR is merged or full compatibility with webui outputs is required.""", ) args = parser.parse_args() # Load all models that we need to add adapter to text_encoder = CLIPTextModel.from_pretrained(args.sd_checkpoint, subfolder="text_encoder") unet = UNet2DConditionModel.from_pretrained(args.sd_checkpoint, subfolder="unet") # Construct possible mapping from kohya keys to peft keys models_keys = {} for model, model_key, model_name in [ (text_encoder, PREFIX_TEXT_ENCODER, "text_encoder"), (unet, PREFIX_UNET, "unet"), ]: models_keys.update( { f"{model_key}.{peft_key}".replace(".", "_"): peft_key for peft_key in (x[0] for x in model.named_modules()) } ) # Store conversion info (model_type -> peft_key -> LoRAInfo | LoHaInfo | LoKrInfo) adapter_info: dict[str, dict[str, Union[LoRAInfo, LoHaInfo, LoKrInfo]]] = { "text_encoder": {}, "unet": {}, } # Store decompose_factor for LoKr decompose_factor = -1 # Open adapter checkpoint with safetensors.safe_open(args.adapter_path, framework="pt", device="cpu") as f: # Extract information about adapter structure metadata = f.metadata() # It may be difficult to determine rank for LoKr adapters # If checkpoint was trained with large rank it may not be utilized during weights creation at all # So we need to get it from checkpoint metadata (along with decompose_factor) rank, conv_rank = None, None if metadata is not None: rank = metadata.get("ss_network_dim", None) rank = int(rank) if rank else None if "ss_network_args" in metadata: network_args = json.loads(metadata["ss_network_args"]) conv_rank = network_args.get("conv_dim", None) conv_rank = int(conv_rank) if conv_rank else rank decompose_factor = network_args.get("factor", -1) decompose_factor = int(decompose_factor) # Detect adapter type based on keys adapter_type = detect_adapter_type(f.keys()) adapter_info_cls = { PeftType.LORA: LoRAInfo, PeftType.LOHA: LoHaInfo, PeftType.LOKR: LoKrInfo, }[adapter_type] # Iterate through available info and unpack all the values for key in f.keys(): kohya_key, kohya_type = key.split(".")[:2] # Find which model this key belongs to if kohya_key.startswith(PREFIX_TEXT_ENCODER): model_type, model = "text_encoder", text_encoder elif kohya_key.startswith(PREFIX_UNET): model_type, model = "unet", unet else: raise ValueError(f"Cannot determine model for key: {key}") # Find corresponding peft key if kohya_key not in models_keys: raise ValueError(f"Cannot find corresponding key for diffusers/transformers model: {kohya_key}") peft_key = models_keys[kohya_key] # Retrieve corresponding layer of model layer = attrgetter(peft_key)(model) # Create a corresponding adapter info if peft_key not in adapter_info[model_type]: adapter_info[model_type][peft_key] = adapter_info_cls(kohya_key=kohya_key, peft_key=peft_key) tensor = f.get_tensor(key) if kohya_type == "alpha": adapter_info[model_type][peft_key].alpha = tensor.item() elif kohya_type == "lora_down": adapter_info[model_type][peft_key].lora_A = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lora_up": adapter_info[model_type][peft_key].lora_B = tensor adapter_info[model_type][peft_key].rank = tensor.shape[1] elif kohya_type == "hada_w1_a": adapter_info[model_type][peft_key].hada_w1_a = tensor elif kohya_type == "hada_w1_b": adapter_info[model_type][peft_key].hada_w1_b = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "hada_w2_a": adapter_info[model_type][peft_key].hada_w2_a = tensor elif kohya_type == "hada_w2_b": adapter_info[model_type][peft_key].hada_w2_b = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type in {"hada_t1", "hada_t2"}: if args.loha_conv2d_weights_fix: if kohya_type == "hada_t1": # This code block fixes a bug that exists for some LoHa checkpoints # that resulted in accidentally using hada_t1 weight instead of hada_t2, see # https://github.com/KohakuBlueleaf/LyCORIS/pull/115 adapter_info[model_type][peft_key].hada_t1 = tensor adapter_info[model_type][peft_key].hada_t2 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] else: if kohya_type == "hada_t1": adapter_info[model_type][peft_key].hada_t1 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "hada_t2": adapter_info[model_type][peft_key].hada_t2 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lokr_t2": adapter_info[model_type][peft_key].lokr_t2 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lokr_w1": adapter_info[model_type][peft_key].lokr_w1 = tensor if isinstance(layer, nn.Linear) or ( isinstance(layer, nn.Conv2d) and tuple(layer.weight.shape[2:]) == (1, 1) ): adapter_info[model_type][peft_key].rank = rank elif isinstance(layer, nn.Conv2d): adapter_info[model_type][peft_key].rank = conv_rank elif kohya_type == "lokr_w2": adapter_info[model_type][peft_key].lokr_w2 = tensor if isinstance(layer, nn.Linear) or ( isinstance(layer, nn.Conv2d) and tuple(layer.weight.shape[2:]) == (1, 1) ): adapter_info[model_type][peft_key].rank = rank elif isinstance(layer, nn.Conv2d): adapter_info[model_type][peft_key].rank = conv_rank elif kohya_type == "lokr_w1_a": adapter_info[model_type][peft_key].lokr_w1_a = tensor adapter_info[model_type][peft_key].rank = tensor.shape[1] elif kohya_type == "lokr_w1_b": adapter_info[model_type][peft_key].lokr_w1_b = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lokr_w2_a": adapter_info[model_type][peft_key].lokr_w2_a = tensor elif kohya_type == "lokr_w2_b": adapter_info[model_type][peft_key].lokr_w2_b = tensor else: raise ValueError(f"Unknown weight name in key: {key} - {kohya_type}") # Get function which will create adapter config based on extracted info construct_config_fn = { PeftType.LORA: construct_peft_loraconfig, PeftType.LOHA: construct_peft_lohaconfig, PeftType.LOKR: construct_peft_lokrconfig, }[adapter_type] # Process each model sequentially for model, model_name in [(text_encoder, "text_encoder"), (unet, "unet")]: # Skip model if no data was provided if len(adapter_info[model_name]) == 0: continue config = construct_config_fn(adapter_info[model_name], decompose_factor=decompose_factor) # Output warning for LoHa with use_effective_conv2d if ( isinstance(config, LoHaConfig) and getattr(config, "use_effective_conv2d", False) and args.loha_conv2d_weights_fix is False ): logging.warning( 'lycoris-lora<=1.9.0 LoHa implementation contains a bug, which can be fixed with "--loha_conv2d_weights_fix".\n' "For more info, please refer to https://github.com/huggingface/peft/pull/1021 and https://github.com/KohakuBlueleaf/LyCORIS/pull/115" ) model = get_peft_model(model, config) missing_keys, unexpected_keys = set_peft_model_state_dict( model, combine_peft_state_dict(adapter_info[model_name]) ) if len(unexpected_keys) > 0: raise ValueError(f"Unexpected keys {unexpected_keys} found during conversion") if args.half: model.to(torch.float16) # Save model to disk model.save_pretrained(os.path.join(args.dump_path, model_name))
peft/examples/stable_diffusion/convert_sd_adapter_to_peft.py/0
{ "file_path": "peft/examples/stable_diffusion/convert_sd_adapter_to_peft.py", "repo_id": "peft", "token_count": 10375 }
232
{ "auto_mapping": null, "base_model_name_or_path": null, "bias": "none", "boft_block_num": 0, "boft_block_size": 4, "boft_dropout": 0.0, "boft_n_butterfly_factor": 1, "exclude_modules": null, "fan_in_fan_out": false, "inference_mode": false, "init_weights": true, "layers_pattern": null, "layers_to_transform": null, "modules_to_save": null, "peft_type": "BOFT", "revision": null, "target_modules": null, "task_type": null }
peft/method_comparison/MetaMathQA/experiments/boft/llama-3.2-3B-default/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/boft/llama-3.2-3B-default/adapter_config.json", "repo_id": "peft", "token_count": 202 }
233
{ "optimizer_type": "lora-fa", "optimizer_kwargs": { "r": 32, "lora_alpha": 64, "lr": 1e-4, "weight_decay": 0.1 } }
peft/method_comparison/MetaMathQA/experiments/lora/llama-3.2-3B-rank32-lorafa/training_params.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/lora/llama-3.2-3B-rank32-lorafa/training_params.json", "repo_id": "peft", "token_count": 77 }
234
{ "auto_mapping": null, "base_model_name_or_path": null, "peft_type": "TRAINABLE_TOKENS", "token_indices": [128000, 128001], "task_type": "CAUSAL_LM" }
peft/method_comparison/MetaMathQA/experiments/trainable_tokens/llama-3.2-3B-sos+eos/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/trainable_tokens/llama-3.2-3B-sos+eos/adapter_config.json", "repo_id": "peft", "token_count": 77 }
235
{ "short": [ "Explain quantum computing in one paragraph.", "Write a haiku about machine learning.", "What's the difference between supervised and unsupervised learning?", "Define parameter-efficient fine-tuning in one sentence.", "List three applications of natural language processing." ], "medium": [ "Explain the concept of low-rank adaptation (LoRA) for large language models. Include its benefits and limitations.", "Compare and contrast prompt tuning and prefix tuning approaches for adapting large language models.", "What are the key differences between full fine-tuning and parameter-efficient methods? Explain with examples.", "Describe the process of quantization for neural networks and how it affects model size and inference speed.", "Explain how sparse expert models like Mixture of Experts work and their advantages over dense models." ], "long": [ "Analyze the evolution of parameter-efficient fine-tuning methods from 2020 to present. Include a detailed comparison of at least five different approaches, their theoretical foundations, and practical implications for deploying large language models.", "Provide a comprehensive tutorial on implementing LoRA for a transformer-based language model. Include code examples, hyperparameter selection guidance, and best practices for training and deployment.", "Compare the computational efficiency, parameter count, and performance characteristics of different PEFT methods (LoRA, Prefix Tuning, Prompt Tuning, IA3, AdaLoRA) across various downstream tasks. Include a discussion of when each method is most appropriate.", "Explain the mathematical foundations of various parameter-efficient fine-tuning techniques. Discuss how each technique modifies the original neural network architecture and the optimization challenges involved.", "Discuss the ethical implications of parameter-efficient fine-tuning methods in democratizing access to large language models. Include considerations about computational resources, environmental impact, and accessibility for researchers in resource-constrained settings." ] }
peft/method_comparison/text_generation_benchmark/configs/prompts.json/0
{ "file_path": "peft/method_comparison/text_generation_benchmark/configs/prompts.json", "repo_id": "peft", "token_count": 459 }
236
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script trains a model on a small text dataset and measures the memory consumption, as well as a few other useful metrics. Example: Get help: ```bash python train_memory.py --help ``` Train the google/gemma-2-2b model with a LoRA config json at the indicated location. ```bash python train_memory.py "google/gemma-2-2b" --max_seq_length 256 --batch_size 1 --rank 32 --dtype bfloat16 --path_config <path-to-adapter-config.json> ``` Fully fine-tune the model (i.e. without LoRA) by setting the rank to 0: ```bash python train_memory.py "google/gemma-2-2b" --rank 0 ``` Get an estimate of the size of the hidden states by passing `--monitor_tensors`. This trains just for a single epoch. For realistic estimates, the batch size for this: ```bash python train_memory.py "google/gemma-2-2b" --max_seq_length 256 --batch_size 32 --rank 32 --dtype bfloat16 --path_config configs/lora_rank-32_embedding-lora/ --monitor_tensors ``` """ import argparse import gc import os import sys import tempfile import time import warnings from collections import Counter from contextlib import nullcontext from functools import partial import torch from datasets import load_dataset from torch import nn from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, ) from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from peft.utils import CONFIG_NAME, SAFETENSORS_WEIGHTS_NAME # suppress all warnings warnings.filterwarnings("ignore") device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" dtype_to_bytes_linear = {"float32": 4, "float16": 2, "bfloat16": 2, "int8": 1, "int4": 0.5} def init_accelerator(): torch.manual_seed(0) if device == "cpu": return device_module = getattr(torch, device, torch.cuda) device_module.reset_peak_memory_stats() device_module.manual_seed_all(0) # might not be necessary, but just to be sure nn.Linear(1, 1).to(device) def get_data(tokenizer): def tokenize(samples): # For some reason, the max sequence length is not honored by the tokenizer, resulting in IndexErrors. Thus, # manually ensure that sequences are not too long. tokenized = tokenizer(samples["quote"]) tokenized["input_ids"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized["input_ids"]] tokenized["attention_mask"] = [ input_ids[: tokenizer.model_max_length] for input_ids in tokenized["attention_mask"] ] return tokenized data = load_dataset("ybelkada/english_quotes_copy") data = data.map(tokenize, batched=True) # We need to manually remove unused columns. This is because we cannot use remove_unused_columns=True in the # Trainer, as this leads to errors with torch.compile. We also cannot just leave them in, as they contain # strings. Therefore, manually remove all unused columns. data = data.remove_columns(["quote", "author", "tags"]) return data def train(model_id, rank, dtype, monitor_tensors, max_seq_length, batch_size, max_steps, path_config): init_accelerator() device_module = getattr(torch, device, torch.cuda) accelerator_memory_init = device_module.max_memory_allocated() accelerator_memory_log = [] tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.model_max_length = max_seq_length if not tokenizer.pad_token: tokenizer.pad_token = tokenizer.eos_token data = get_data(tokenizer) if dtype == "int4": quant_config = BitsAndBytesConfig(load_in_4bit=True) model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device, quantization_config=quant_config) model = prepare_model_for_kbit_training(model) elif dtype == "int8": quant_config = BitsAndBytesConfig(load_in_8bit=True) model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device, quantization_config=quant_config) model = prepare_model_for_kbit_training(model) elif dtype == "bfloat16": model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device, torch_dtype=torch.bfloat16) elif dtype == "float16": model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device, torch_dtype=torch.float16) elif dtype == "float32": model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device) else: raise ValueError(f"Invalid dtype: {dtype}") if rank > 0: if path_config is None: raise RuntimeError("LoRA rank > 0 requires a path to a LoRA config") if path_config.endswith(CONFIG_NAME): path_config = path_config.removesuffix(CONFIG_NAME) config = LoraConfig.from_pretrained(path_config) model = get_peft_model(model, config) model.print_trainable_parameters() else: print("Not using LoRA") model.config.use_cache = False storage = [] def pack(x): storage.append(x) return len(storage) - 1 def unpack(x): return storage[x] train_ctx = partial(torch.autograd.graph.saved_tensors_hooks, pack, unpack) if monitor_tensors else nullcontext optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5) losses = [] sample = 0 tic_total = time.perf_counter() for i in range(0, max_steps): storage.clear() tic = time.perf_counter() try: batch = tokenizer.pad(data["train"][sample : sample + batch_size], return_tensors="pt").to(model.device) sample += batch_size # add targets batch["labels"] = batch["input_ids"].clone() optimizer.zero_grad() with train_ctx(): outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() losses.append(loss.item()) accelerator_memory_log.append(device_module.memory_allocated() - accelerator_memory_init) device_module.empty_cache() gc.collect() toc = time.perf_counter() print(f"step {i:3d} loss {loss.item():.6f} time {toc - tic:.2f}s", file=sys.stderr) except KeyboardInterrupt: print("canceled training") break if monitor_tensors: break toc_total = time.perf_counter() accelerator_memory_final = device_module.max_memory_allocated() accelerator_memory_avg = int(sum(accelerator_memory_log) / len(accelerator_memory_log)) print(f"{model.device.type} memory avg: {accelerator_memory_avg // 2**20}MB") print(f"{model.device.type} memory max: {(accelerator_memory_final - accelerator_memory_init) // 2**20}MB") print(f"total time: {toc_total - tic_total:.2f}s") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) stat = os.stat(os.path.join(tmp_dir, SAFETENSORS_WEIGHTS_NAME)) file_size = stat.st_size print(f"file size: {file_size / 2**20:.1f}MB") if monitor_tensors: dtype_counts = Counter(t.dtype for t in storage) shape_counts = Counter(t.shape for t in storage) param_shape_counts = Counter(p.shape for p in model.parameters()) param_shape_counts_copy = dict(param_shape_counts).copy() # shape counts includes the params, so we need to subtract them; note that they can be transposed # this is an approximation diff_shape_counts = {} for shape, count in shape_counts.items(): if shape in param_shape_counts_copy: diff_count = count - param_shape_counts[shape] if diff_count > 0: diff_shape_counts[shape] = diff_count param_shape_counts_copy[shape] = max(0, param_shape_counts_copy[shape] - diff_count) elif shape[::-1] in param_shape_counts: diff_count = count - param_shape_counts[shape[::-1]] if diff_count > 0: diff_shape_counts[shape] = diff_count param_shape_counts_copy[shape[::-1]] = max(0, param_shape_counts_copy[shape[::-1]] - diff_count) else: diff_shape_counts[shape] = count total_size = sum(t.numel() * t.element_size() for t in storage) total_size_mb = f"{total_size // 2**20}MB" diff_size = 0 for shape, count in diff_shape_counts.items(): diff_size += count * torch.zeros(shape).numel() * dtype_to_bytes_linear[dtype] param_size = total_size - diff_size diff_size_mb = f"{diff_size // 2**20}MB" param_size_mb = f"{param_size // 2**20}MB" print(f"Dtype counts: {dtype_counts.most_common()}") print(f"Total size of tensors: {total_size_mb: >12}") print(f"Total size of activations: {diff_size_mb: >12}") print(f"Total size of parameters: {param_size_mb: >12}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("model_id", type=str, help="Model name on Hugging Face Hub") parser.add_argument("--rank", type=int, default=8, help="Rank of LoRA, 0 => no LoRA, default 8") parser.add_argument( "--dtype", type=str, default="float32", help="Data type, one of float32, float16, bfloat16, int8, int4, default float32", ) parser.add_argument( "--monitor_tensors", action="store_true", help="Monitor tensor sizes during training for a single training step, off by default", ) parser.add_argument("--max_seq_length", type=int, default=128, help="Maximum sequence length, default 128") parser.add_argument("--batch_size", type=int, default=1, help="Batch size, default 1") parser.add_argument("--max_steps", type=int, default=50, help="Maximum number of training steps, default 50") parser.add_argument("--path_config", type=str, default=None, help="Path to LoRA config") args = parser.parse_args() train( model_id=args.model_id, rank=args.rank, dtype=args.dtype, monitor_tensors=args.monitor_tensors, max_seq_length=args.max_seq_length, batch_size=args.batch_size, max_steps=args.max_steps, path_config=args.path_config, )
peft/scripts/train_memory.py/0
{ "file_path": "peft/scripts/train_memory.py", "repo_id": "peft", "token_count": 4392 }
237
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Adapted from https://botorch.org/api/_modules/botorch/utils/torch.html # TODO: To be removed once (if) https://github.com/pytorch/pytorch/pull/37385 lands from __future__ import annotations import collections from collections import OrderedDict import torch from torch.nn import Module class BufferDict(Module): r""" Holds buffers in a dictionary. BufferDict can be indexed like a regular Python dictionary, but buffers it contains are properly registered, and will be visible by all Module methods. `torch.nn.BufferDict` is an **ordered** dictionary that respects * the order of insertion, and * in `torch.nn.BufferDict.update`, the order of the merged `OrderedDict` or another `torch.nn.BufferDict` (the argument to `torch.nn.BufferDict.update`). Note that `torch.nn.BufferDict.update` with other unordered mapping types (e.g., Python's plain `dict`) does not preserve the order of the merged mapping. Args: buffers (iterable, optional): a mapping (dictionary) of (string : `torch.Tensor`) or an iterable of key-value pairs of type (string, `torch.Tensor`) ```python class MyModule(nn.Module): def __init__(self): super().__init__() self.buffers = nn.BufferDict({"left": torch.randn(5, 10), "right": torch.randn(5, 10)}) def forward(self, x, choice): x = self.buffers[choice].mm(x) return x ``` """ def __init__(self, buffers=None, persistent: bool = False): r""" Args: buffers (`dict`): A mapping (dictionary) from string to `torch.Tensor`, or an iterable of key-value pairs of type (string, `torch.Tensor`). """ super().__init__() self.persistent = persistent if buffers is not None: self.update(buffers) def __getitem__(self, key): return self._buffers[key] def __setitem__(self, key, buffer): self.register_buffer(key, buffer, persistent=self.persistent) def __delitem__(self, key): del self._buffers[key] def __len__(self): return len(self._buffers) def __iter__(self): return iter(self._buffers.keys()) def __contains__(self, key): return key in self._buffers def clear(self): """Remove all items from the BufferDict.""" self._buffers.clear() def pop(self, key): r"""Remove key from the BufferDict and return its buffer. Args: key (`str`): Key to pop from the BufferDict """ v = self[key] del self[key] return v def keys(self): r"""Return an iterable of the BufferDict keys.""" return self._buffers.keys() def items(self): r"""Return an iterable of the BufferDict key/value pairs.""" return self._buffers.items() def values(self): r"""Return an iterable of the BufferDict values.""" return self._buffers.values() def update(self, buffers): r""" Update the `torch.nn.BufferDict` with the key-value pairs from a mapping or an iterable, overwriting existing keys. Note: If `buffers` is an `OrderedDict`, a `torch.nn.BufferDict`, or an iterable of key-value pairs, the order of new elements in it is preserved. Args: buffers (iterable): a mapping (dictionary) from string to `torch.Tensor`, or an iterable of key-value pairs of type (string, `torch.Tensor`). """ if not isinstance(buffers, collections.abc.Iterable): raise TypeError( "BuffersDict.update should be called with an " "iterable of key/value pairs, but got " + type(buffers).__name__ ) if isinstance(buffers, (OrderedDict, BufferDict)): for key, buffer in buffers.items(): self[key] = buffer elif isinstance(buffers, collections.abc.Mapping): for key, buffer in sorted(buffers.items()): self[key] = buffer else: for j, p in enumerate(buffers): if not isinstance(p, collections.abc.Iterable): raise TypeError( "BufferDict update sequence element #" + str(j) + " should be Iterable; is" + type(p).__name__ ) if not len(p) == 2: raise ValueError( "BufferDict update sequence element " "#" + str(j) + " has length " + str(len(p)) + "; 2 is required" ) self[p[0]] = p[1] def extra_repr(self): child_lines = [] for k, p in self._buffers.items(): size_str = "x".join(str(size) for size in p.size()) device_type = p.device.type device_str = "" if device_type == "cpu" else f" ({device_type.upper()} {p.get_device()})" parastr = f"Buffer containing: [{torch.typename(p)} of size {size_str}{device_str}]" child_lines.append(" (" + k + "): " + parastr) tmpstr = "\n".join(child_lines) return tmpstr def __call__(self, input): raise RuntimeError("BufferDict should not be called.")
peft/src/peft/tuners/_buffer_dict.py/0
{ "file_path": "peft/src/peft/tuners/_buffer_dict.py", "repo_id": "peft", "token_count": 2436 }
238
// Author: Yao Feng // Date: 2023/08 // Description: cuda kernel for fast block diag #include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace{ template <typename scalar_t> __global__ void forward_fast_block_diag_cuda_kernel( const scalar_t* __restrict__ input, //[z, N, b, b] scalar_t* output, //[z, Nxb, Nxb] int z, int N, int b ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= z*N*b*b) { return; } const int zi = i/(N*b*b); const int Ni = (i%(N*b*b))/(b*b); const int x = ((i%(N*b*b))%(b*b))/b; const int y = ((i%(N*b*b))%(b*b))%b; output[zi*N*b*N*b + (Ni*b+x)*N*b + Ni*b + y] = input[zi*N*b*b + Ni*b*b + x*b + y]; } template <typename scalar_t> __global__ void backward_fast_block_diag_cuda_kernel( const scalar_t* __restrict__ grad_output, scalar_t* grad_input, int z, int N, int b ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= z*N*b*b) { return; } const int zi = i/(N*b*b); const int Ni = (i%(N*b*b))/(b*b); const int x = ((i%(N*b*b))%(b*b))/b; const int y = ((i%(N*b*b))%(b*b))%b; grad_input[zi*N*b*b + Ni*b*b + x*b + y] = grad_output[zi*N*b*N*b + (Ni*b+x)*N*b + Ni*b + y]; } // namespace } std::vector<at::Tensor> forward_fast_block_diag_cuda( at::Tensor input ){ const auto z = input.size(0); const auto N = input.size(1); const auto b = input.size(2); // print(channel_size) const int threads = 512; const dim3 blocks_1 ((z*N*b*b - 1) / threads +1); // initlaize output auto output = at::zeros({z, N*b, N*b}, input.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "forward_fast_block_diag1", ([&] { forward_fast_block_diag_cuda_kernel<scalar_t><<<blocks_1, threads>>>( input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), z, N, b); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in forward_fast_block_diag_cuda_kernel: %s\n", cudaGetErrorString(err)); return {output}; } std::vector<at::Tensor> backward_fast_block_diag_cuda( at::Tensor grad_output, at::Tensor input ){ const auto z = input.size(0); const auto N = input.size(1); const auto b = input.size(2); // print(channel_size) const int threads = 512; const dim3 blocks_1 ((z*N*b*b - 1) / threads +1); // initialize grad input auto grad_input = at::zeros_like(input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_output.type(), "backward_fast_block_diag", ([&] { backward_fast_block_diag_cuda_kernel<scalar_t><<<blocks_1, threads>>>( grad_output.data_ptr<scalar_t>(), grad_input.data_ptr<scalar_t>(), z, N, b); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in backward_fast_block_diag_cuda_kernel: %s\n", cudaGetErrorString(err)); return {grad_input}; }
peft/src/peft/tuners/boft/fbd/fbd_cuda_kernel.cu/0
{ "file_path": "peft/src/peft/tuners/boft/fbd/fbd_cuda_kernel.cu", "repo_id": "peft", "token_count": 1511 }
239
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class FourierFTConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`FourierFTModel`]. Args: n_frequency (`int`): Num of learnable frequencies for the Discrete Fourier Transform. 'n_frequency' is an integer that is greater than 0 and less than or equal to d^2 (assuming the weight W has dimensions of d by d). Additionally, it is the number of trainable parameters required to update each delta W weight. 'n_frequency' will affect the performance and efficiency for PEFT. Specifically, it has little impact on training speed, but higher values of it (typically) result in larger GPU memory costs and better accuracy. With the same `target_modules`, the number of parameters of LoRA is (2*d*r/n_frequency) times that of FourierFT. The following examples of settings regarding 'n_frequency' can be used as reference for users. For NLU tasks with the RoBERTa-large model, adopting 'n_frequency': 1000 can almost achieve similar results as 'r': 8 in LoRA. At this time, the number of parameters of LoRA is about 16 times that of FourierFT. For image classification tasks with Vit-large models, adopting 'n_frequency': 3000 can almost achieve similar results as 'r': 16 in LoRA, where the number of parameters of LoRA is about 11 times that of FourierFT. scaling (`float`): The scaling value for the delta W matrix. This is an important hyperparameter used for scaling, similar to the 'lora_alpha' parameter in the LoRA method. 'scaling' can be determined during the hyperparameter search process. However, if users want to skip this process, one can refer to the settings in the following scenarios. This parameter can be set to 100.0 or 150.0 for both RoBERTa-base and RoBERTa-large models across all NLU (GLUE) tasks. This parameter can be set to 300.0 for both LLaMA family models for all instruction tuning. This parameter can be set to 300.0 for both ViT-base and ViT-large models across all image classification tasks. random_loc_seed (`int`): Seed for the random location of the frequencies, i.e., the spectral entry matrix. target_modules (`Union[list[str],str]`): List of module names or regex expression of the module names to replace with FourierFT. For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. Only linear layers are supported. exclude_modules (`Optional[Union[List[str], str]]`): The names of the modules to not apply the adapter. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). bias (`str`): Bias type for FourierFT. Can be 'none', 'all' or 'fourier_only'. modules_to_save (`list[str]`): List of modules apart from FourierFT layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved. layers_to_transform (`Union[list[int],int]`): The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. layers_pattern (`Optional[Union[List[str], str]]`): The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern. This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`. n_frequency_pattern (`dict`): The mapping from layer names or regexp expression to n_frequency which are different from the default specified. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 1000`}. init_weights (`bool`): The initialization of the Fourier weights. Set this to False (the default) if the spectrum are initialized to a standard normal distribution. Set this to True if the spectrum are initialized to zeros. """ n_frequency: int = field( default=1000, metadata={ "help": ( "Num of learnable frequencies for the Discrete Fourier Transform. 'n_frequency' is an integer that is" "greater than 0 and less than or equal to d^2 (assuming the weight W has dimensions of d by d)." "Additionally, it is the number of trainable parameters required to update each delta W weight." "'n_frequency' will affect the performance and efficiency for PEFT. Specifically, it has little impact on" "training speed, but higher values of it (typically) result in larger GPU memory costs and better accuracy." "With the same `target_modules`, the number of parameters of LoRA is (2*d*r/n_frequency) times that of FourierFT." "The following examples of settings regarding 'n_frequency' can be used as reference for users. For NLU" "tasks with the RoBERTa-large model, adopting 'n_frequency': 1000 can almost achieve similar results as" "'r': 8 in LoRA. At this time, the number of parameters of LoRA is about 16 times that of FourierFT." "For image classification tasks with Vit-large models, adopting 'n_frequency': 3000 can almost achieve" "similar results as 'r': 16 in LoRA, where the number of parameters of LoRA is about 11 times that of FourierFT." ) }, ) scaling: float = field( default=150.0, metadata={ "help": ( "The scaling value for the delta W matrix. This is an important hyperparameter used for scaling, similar to the" "'lora_alpha' parameter in the LoRA method. 'scaling' can be determined during the hyperparameter search process." "However, if users want to skip this process, one can refer to the settings in the following scenarios." "This parameter can be set to 100.0 or 150.0 for both RoBERTa-base and RoBERTa-large models across all NLU (GLUE) tasks." "This parameter can be set to 300.0 for both LLaMA family models for all instruction tuning." "This parameter can be set to 300.0 for both ViT-base and ViT-large models across all image classification tasks." ) }, ) random_loc_seed: Optional[int] = field( default=777, metadata={"help": "Seed for the random location of the frequencies."} ) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with FourierFT." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. " "Only linear layers are supported." ) }, ) exclude_modules: Optional[Union[list[str], str]] = field( default=None, metadata={"help": "List of module names or regex expression of the module names to exclude from fourierft."}, ) bias: str = field( default="none", metadata={"help": "Bias type for FourierFT. Can be 'none', 'all' or 'fourier_only'."} ) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": ( "List of modules apart from FourierFT layers to be set as trainable and saved in the final checkpoint. For" " example, in Sequence Classification or Token Classification tasks, the final layer" " `classifier/score` are randomly initialized and as such need to be trainable and saved." ) }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": ( "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers" " indexes that are specified inside this list. If a single integer is passed, PEFT will transform only" " the layer at this index." ) }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer" " pattern is not in the common layers pattern. This should target the `nn.ModuleList` of the " "model, which is often called `'layers'` or `'h'`." ) }, ) n_frequency_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to n_frequency which are different from the default specified." "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 500`}." ) }, ) init_weights: bool = field( default=False, metadata={ "help": ( "The initialization of the Fourier weights. Set this to False (the default) if the spectrum should be " "initialized to a standard normal distribution. Set this to True if the spectrum should be initialized " "to zeros." ) }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.FOURIERFT self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.exclude_modules = ( set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules ) # if target_modules is a regex expression, then layers_to_transform should be None if isinstance(self.target_modules, str) and self.layers_to_transform is not None: raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") # if target_modules is a regex expression, then layers_pattern should be None if isinstance(self.target_modules, str) and self.layers_pattern is not None: raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.") # check for layers_to_transform and layers_pattern if self.layers_pattern and not self.layers_to_transform: raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
peft/src/peft/tuners/fourierft/config.py/0
{ "file_path": "peft/src/peft/tuners/fourierft/config.py", "repo_id": "peft", "token_count": 4510 }
240
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from collections import Counter, defaultdict from collections.abc import Iterable, Mapping from contextlib import nullcontext from copy import deepcopy from functools import partial from itertools import cycle from typing import Optional, Union import torch import torch.distributed as dist from tqdm import tqdm from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import _find_minimal_target_modules, check_target_module_exists from peft.utils.constants import MIN_TARGET_MODULES_FOR_OPTIMIZATION from peft.utils.incremental_pca import IncrementalPCA from peft.utils.other import _get_submodules, get_pattern_key from .config import LoraConfig from .layer import Embedding, LoraLayer, MultiheadAttention, _ConvNd UNSUPPORTED_LORA_MODULES = (Embedding, MultiheadAttention, _ConvNd) class _Hook: """ A base class for hooks that prepares layer inputs for EVA. """ def __init__( self, name: str, prepare_layer_inputs_fn: Optional[callable] = None, gather_distributed_inputs: bool = True, ): self.name = name self.gather_distributed_inputs = gather_distributed_inputs if prepare_layer_inputs_fn is None: self._prepare_layer_inputs_fn = self._prepare_layer_inputs_fn_default else: self._prepare_layer_inputs_fn = prepare_layer_inputs_fn self.model_input = None @staticmethod def _prepare_layer_inputs_fn_default(layer_input, model_input, layer_name) -> torch.Tensor: if isinstance(layer_input, torch.Tensor): pass elif isinstance(layer_input, (tuple, list)): layer_input = layer_input[0] else: raise ValueError( f"unsupported input type {type(layer_input)} for prepare_layer_inputs_fn in layer {layer_name}, " "please provide a custom prepare_layer_inputs_fn" ) # if the input has more than 2 dimensions, we flatten all but the last dimension if layer_input.ndim > 2: layer_input = layer_input.view(-1, layer_input.size(-1)) return layer_input @torch.no_grad() def prepare_layer_inputs(self, layer_input): return self._prepare_layer_inputs_fn(layer_input, self.model_input, self.name) def gather_layer_inputs(self, layer_input): if dist.is_initialized() and self.gather_distributed_inputs: world_size = dist.get_world_size() # First gather sizes from all processes more efficiently local_size = torch.tensor([layer_input.shape[0]], device=layer_input.device) all_sizes = torch.empty(world_size, dtype=local_size.dtype, device=layer_input.device) dist.all_gather_into_tensor(all_sizes, local_size) all_sizes = all_sizes.tolist() # Find maximum size and pad tensors padded_input = layer_input.new_zeros((max(all_sizes), *layer_input.shape[1:])) padded_input[: layer_input.shape[0]] = layer_input # Gather padded tensors gathered_inputs = [torch.zeros_like(padded_input) for _ in range(world_size)] dist.all_gather(gathered_inputs, padded_input.contiguous()) # Remove padding for each gathered tensor gathered_inputs = [tensor[:size] for tensor, size in zip(gathered_inputs, all_sizes)] # Concatenate along batch dimension return torch.cat(gathered_inputs, dim=0) return layer_input class SVDHook(_Hook): """ A forward hook for calculating incremental SVD on layer inputs. The hook is designed to be registered to a PyTorch module using the `register_forward_hook` method. This hook performs a step of incremental Singular Value Decomposition (SVD) on the inputs of a specified layer during the forward pass of a neural network. The hook also tracks convergence of the computed components using cosine similarity between the current and previous components. Args: name (str): Name of the layer to which this hook is attached. n_components (int): Number of principal components to compute. sim_thresh (Union[float, torch.Tensor]): Similarity threshold for convergence. prepare_layer_inputs_fn (Optional[callable]): Function to prepare layer inputs for SVD. """ def __init__( self, n_components: int, sim_thresh: Union[float, torch.Tensor], **base_class_kwargs, ): super().__init__(**base_class_kwargs) self.n_components = n_components self.sim_thresh = sim_thresh if isinstance(sim_thresh, torch.Tensor) and len(sim_thresh.shape) > 0: check1 = sim_thresh.size(0) == n_components or sim_thresh.size(0) == 1 check2 = len(sim_thresh.shape) == 1 if not (check1 and check2): raise ValueError( "if sim_thresh is a tensor with more than 0 dimensions it must have shape (n_components,) or (1,)" ) self.svd = IncrementalPCA( n_components=n_components, copy=True, lowrank=True, lowrank_seed=42, ) self.model_input = None self.converged = torch.zeros((n_components,), dtype=torch.bool) @torch.no_grad() def __call__(self, model, input, output): previous_components = None if hasattr(self.svd, "components_"): previous_components = self.svd.components_.clone().detach() states = self.prepare_layer_inputs(input) states = self.gather_layer_inputs(states) # check if batch sizes is more than the number of components if states.size(0) < self.n_components: print(f"skipping SVD for {self.name} because there are less than {self.n_components} examples") return self.svd.partial_fit(states.to(torch.float32)) # add if statement to check if we are in the first step where previous_components is None if previous_components is None: return components = self.svd.components_ if len(components.shape) == 1: components = components.reshape(1, -1) previous_components = previous_components.reshape(1, -1) # consider as converged if enough components have converged via cossim sim = torch.nn.functional.cosine_similarity(components, previous_components) self.converged = sim >= self.sim_thresh # This is used to determine if inputs of two different layers are equal. For such cases, SVD # needs to be done for only for one of the equal inputs. class HashHook(_Hook): """ A forward hook for hashing layer inputs. The hook is designed to be registered to a PyTorch module using the `register_forward_hook` method. This hook hashes the inputs of a specified layer during the forward pass of a neural network and stores the hash values for later analysis or comparison. Args: name (str): Name of the layer to which this hook is attached. hashed_inputs (list): List of hashed inputs. prepare_layer_inputs_fn (Optional[callable]): Function to prepare layer inputs for hashing. """ def __init__(self, **base_class_kwargs): super().__init__(**base_class_kwargs) self.hashed_inputs = [] @staticmethod def hash_fn(tensor): return hash(tuple(tensor.view(-1).tolist())) @torch.no_grad() def __call__(self, model, input, output): x = self.prepare_layer_inputs(input) x = self.gather_layer_inputs(x) self.hashed_inputs.append(self.hash_fn(x.cpu())) def find_equal_values(dictionary: dict) -> dict: """ Find keys in a dictionary that have the same value. This function takes a dictionary and returns a new dictionary containing keys that have the same value. The keys in the output dictionary are the values from the input dictionary, and the values are lists of keys that share the same value. """ value_dict = defaultdict(list) for k, v in dictionary.items(): value_dict[v].append(k) return {k: v for k, v in value_dict.items() if len(v) > 1} def get_device_with_meta_params(model: torch.nn.Module) -> torch.device: """ Get the device of the model's parameters. Useful if some parameters are on meta device. """ devices = list({p.device for p in model.parameters() if p.device.type != "meta"}) if len(devices) > 1: warnings.warn(f"Could not determine device, model has multiple devices: {devices}") return return devices[0] def move_inputs_to_device(inputs, device: Union[str, torch.device]): """ Move the inputs to the specified device. Adapted from hf.Trainer. """ if hasattr(inputs, "to"): return inputs.to(device) if isinstance(inputs, Mapping): return type(inputs)({k: move_inputs_to_device(v, device) for k, v in inputs.items()}) elif isinstance(inputs, (tuple, list)): return type(inputs)(move_inputs_to_device(v, device) for v in inputs) else: warnings.warn(f"input of type {type(inputs)} could not be moved to the correct device") return inputs def prepare_model_inputs_fn_language_modeling(model_input, peft_config: LoraConfig): """ Get the indices of the items that should be used for SVD. Attributes: model_input (dict): The model inputs. peft_config (LoraConfig): The configuration for the LoRA layers. """ if not isinstance(model_input, dict): raise ValueError("When using `prepare_model_inputs_fn_language_modeling` inputs must be a dictionary") mask = model_input.get("attention_mask", torch.ones_like(model_input["input_ids"])).bool() if peft_config.eva_config.use_label_mask and hasattr(model_input, "labels"): mask = torch.logical_and(mask, model_input["labels"] != peft_config.eva_config.label_mask_value) return mask.nonzero() def prepare_layer_inputs_fn_language_modeling(layer_input, model_input, layer_name) -> torch.Tensor: """ if not all items in the input should be used for SVD, this function can be used to get the indices of the items that should be used. Attributes: layer_input (torch.Tensor): The layer inputs. model_input (torch.Tensor): The model inputs or if `prepare_model_inputs_fn` is not None the output of this function. layer_name (str): The name of the layer. Returns: torch.Tensor: The input to the SVD. """ # if layer inputs are not a tensor, we simply get the first item if isinstance(layer_input, torch.Tensor): pass elif isinstance(layer_input, (tuple, list)): layer_input = layer_input[0] else: raise ValueError( f"unsupported input type {type(layer_input)} for prepare_layer_inputs_fn in layer {layer_name}, " "please provide a custom prepare_layer_inputs_fn" ) # in this case model_input is the output of `prepare_model_inputs_fn_language_modeling` return layer_input[model_input.T.unbind()] def forward_fn_dict(model, inputs): return model(**inputs) def _get_eva_state_dict( model: torch.nn.Module, dataloader: Iterable, peft_config: Optional[LoraConfig], target_module_check_fn: callable, forward_fn: Optional[callable], prepare_model_inputs_fn: Optional[callable], prepare_layer_inputs_fn: Union[callable, dict[str, callable], None], gather_distributed_inputs: bool, show_progress_bar: bool, ) -> dict: # Computes the rank distribution for each layer based on the explained variance ratio. # when rank_pattern flag is False, all values in max_components are the same def _get_rank_distribution(hooks, layer_hook_map, equal_inputs_map, rank_budget, max_components): exp_vars = {k: h[0].svd.explained_variance_ratio_[: max_components[k]] for k, h in hooks.items()} keys, values = zip(*[(k, c) for k, name in layer_hook_map.items() for c in exp_vars[name]]) idx = torch.stack(values).argsort(descending=True) counts = Counter([keys[i] for i in idx[:rank_budget]]) counts = {k: counts.get(k, 0) for k in layer_hook_map.keys()} # add layers with 0 rank for k, k_hook in equal_inputs_map.items(): # ensure hook layers have the highest rank if they are equal to another layer rank, rank_hook = counts[k], counts[k_hook] if rank_hook >= rank: continue counts[k_hook], counts[k] = rank, rank_hook return counts # dataloader is not empty if len(dataloader) == 0: raise ValueError("dataloader is empty") # check if dist is initialized if dist.is_initialized() and gather_distributed_inputs: warnings.warn( "torch.distributed is initialized and `gather_distributed_inputs` is True, " "therefore EVA initialization will gather tensors from all ranks. " "Ensure the model does not receive the same inputs on different ranks." ) # for unusually high rho values, define an upper limit rho_threshold = 1000 rho = peft_config.eva_config.rho if rho > rho_threshold: max_dim = max(max(p.shape) for p in model.parameters()) rho_ceil = max_dim // peft_config.r rho = min(rho, rho_ceil) training = model.training device = get_device_with_meta_params(model) model.eval() # get model inputs inputs = next(iter(dataloader)) if device is not None: inputs = move_inputs_to_device(inputs, device) if prepare_model_inputs_fn is not None: model_inputs_for_hooks = prepare_model_inputs_fn(inputs, peft_config) else: model_inputs_for_hooks = deepcopy(inputs) hooks = {} max_components = {} rank_budget = 0 for name, module in model.named_modules(): if not target_module_check_fn(name, module): continue if isinstance(prepare_layer_inputs_fn, Mapping): fn = prepare_layer_inputs_fn.pop(name, None) else: fn = prepare_layer_inputs_fn hook = HashHook(name=name, prepare_layer_inputs_fn=fn, gather_distributed_inputs=gather_distributed_inputs) hook.model_input = model_inputs_for_hooks handle = module.register_forward_hook(hook) hooks[name] = (hook, handle) layer_rank = peft_config.rank_pattern.get( get_pattern_key(peft_config.rank_pattern.keys(), name), peft_config.r ) max_components[name] = round(layer_rank * rho) rank_budget += layer_rank if isinstance(prepare_layer_inputs_fn, Mapping) and len(prepare_layer_inputs_fn) > 0: raise ValueError( "prepare_layer_inputs_fn is a mapping but the following module names were not found in the model: " f"{prepare_layer_inputs_fn.keys()}" ) # forward for one batch to check which layer inputs are equal to avoid unneeded svd calculations forward_fn(model, inputs) hash_dict = {k: h[0].hashed_inputs[0] for k, h in hooks.items()} # equal input maps groups layers which receive the same input. One layer is defined as the key and receives an svd # hook. For the remaining layers the svd results can be skipped. equal_inputs = list(find_equal_values(hash_dict).values()) equal_inputs_map = {vv: v[0] for v in equal_inputs for vv in v[1:]} # for layers with equal inputs we need to make sure that the max_components are the same for names in equal_inputs: max_value = max(max_components[n] for n in names) for n in names: max_components[n] = max_value # initialize svd hooks for name in list(hooks.keys()): hook, handle = hooks.pop(name) handle.remove() if name in equal_inputs_map: continue hook = SVDHook( n_components=max_components[name], sim_thresh=peft_config.eva_config.tau, name=name, prepare_layer_inputs_fn=hook._prepare_layer_inputs_fn, gather_distributed_inputs=gather_distributed_inputs, ) module = model.get_submodule(name) handle = module.register_forward_hook(hook) hooks[name] = (hook, handle) # adding the old handle here so we dont get errors in the first forward pass layer_hook_map = {**dict(zip(hooks.keys(), hooks.keys())), **equal_inputs_map} # start svd calculation if show_progress_bar and (not dist.is_initialized() or dist.get_rank() == 0): pbar = tqdm(iter(cycle(dataloader)), position=0, leave=False) use_tqdm = True else: pbar = iter(cycle(dataloader)) use_tqdm = False convergence_dict = {k: False for k in hooks.keys()} rank_dist = max_components.copy() for inputs in pbar: if device is not None: inputs = move_inputs_to_device(inputs, device) if prepare_model_inputs_fn is not None: model_inputs_for_hooks = prepare_model_inputs_fn(inputs, peft_config) else: model_inputs_for_hooks = deepcopy(inputs) for name in list(hooks.keys()): hook, handle = hooks[name] # check if all components that are needed for the rank distribution have converged converged = torch.all(hook.converged[: rank_dist[name]]) # if a layer has switched from not converged to converged in the current step if (not convergence_dict[name]) and converged and handle: handle.remove() handle = None convergence_dict[name] = True continue # if a layer has switched from converged to not converged in the current step elif convergence_dict[name] and not converged: module = model.get_submodule(name) handle = module.register_forward_hook(hook) convergence_dict[name] = False hook.model_input = model_inputs_for_hooks hooks[name] = (hook, handle) if use_tqdm: layer_converged = list(convergence_dict.values()) + [ convergence_dict[v] for v in equal_inputs_map.values() ] pbar.set_description(f"{sum(layer_converged)}/{len(layer_converged)} layers have converged") if all(convergence_dict.values()): break forward_fn(model, inputs) # in case some hooks have to skip the svd calculation because the number of tokens is less than the number of # components if not all(hasattr(h[0].svd, "components_") for h in hooks.values()): continue rank_dist = _get_rank_distribution(hooks, layer_hook_map, equal_inputs_map, rank_budget, max_components) # check all custom hooks have been removed remaining_hooks = {n for n, m in model.named_modules() for v in m._forward_hooks.values() if isinstance(v, _Hook)} if len(remaining_hooks) > 0: raise ValueError( f"Found active hooks added by EVA that weren't properly removed: {remaining_hooks}. " "Please report this issue at https://github.com/huggingface/peft/issues" ) eva_state_dict = {} for name, rank in rank_dist.items(): hook = hooks[layer_hook_map[name]][0] if not torch.all(hook.converged[:rank]): raise ValueError( f"Layer {name} has not converged but was assigned rank {rank}. " "Please report this issue at https://github.com/huggingface/peft/issues" ) u = hook.svd.components_[:rank] if peft_config.eva_config.whiten: u /= hook.svd.singular_values_[:rank].sqrt().reshape(-1, 1) eva_state_dict[name] = u # restore model state model.train(training) # move tensors to device if device is not None: eva_state_dict = {k: v.to(device) for k, v in eva_state_dict.items()} return eva_state_dict def _load_eva_state_dict( model: torch.nn.Module, eva_state_dict: dict, adapter_name: str, ): peft_config = model.peft_config[adapter_name] update_layer_kwargs = { "adapter_name": adapter_name, "lora_dropout": peft_config.lora_dropout, "use_rslora": peft_config.use_rslora, "use_dora": peft_config.use_dora, "lora_bias": peft_config.lora_bias, } missing_eva_inits = [] new_target_modules = [] other_module_names = [] rank_pattern = {} alpha_pattern = {} for name, module in model.named_modules(): name_in_base_model = name.replace("base_model.model.", "") if not isinstance(module, LoraLayer): other_module_names.append(name_in_base_model) continue # Regexp matching - Find key which matches current target_name in patterns provided r = peft_config.rank_pattern.get(get_pattern_key(peft_config.rank_pattern.keys(), name), peft_config.r) alpha = peft_config.alpha_pattern.get( get_pattern_key(peft_config.alpha_pattern.keys(), name), peft_config.lora_alpha ) if name in eva_state_dict: w = eva_state_dict.pop(name) new_rank = w.size(0) if new_rank == 0: parent, _, target_name = _get_submodules(model, name) setattr(parent, target_name, module.get_base_layer()) continue elif new_rank != r: if peft_config.eva_config.adjust_scaling_factors: alpha *= new_rank / r if new_rank != r or module.lora_A[adapter_name].weight.device.type == "meta": module.update_layer(r=new_rank, lora_alpha=alpha, init_lora_weights="eva", **update_layer_kwargs) module.lora_A[adapter_name].weight.copy_(w) new_target_modules.append(name_in_base_model) else: module.update_layer(r=r, lora_alpha=alpha, init_lora_weights=True, **update_layer_kwargs) missing_eva_inits.append(name_in_base_model) new_rank = r # update rank pattern and alpha pattern if new_rank != peft_config.r: rank_pattern[name_in_base_model] = new_rank if alpha != peft_config.lora_alpha: alpha_pattern[name_in_base_model] = alpha # update target modules if some lora layers have been removed due to their EVA rank being 0 new_target_modules = new_target_modules + missing_eva_inits if len(new_target_modules) >= MIN_TARGET_MODULES_FOR_OPTIMIZATION: new_target_modules = _find_minimal_target_modules(new_target_modules, other_module_names) model.peft_config[adapter_name].target_modules = new_target_modules # set rank pattern obtained from EVA model.peft_config[adapter_name].rank_pattern = rank_pattern # when adjust_scaling_factors is True, lora scaling factors have been adjusted after the rank redistribution model.peft_config[adapter_name].alpha_pattern = alpha_pattern if missing_eva_inits: warnings.warn( "the following layers were initialized with init_lora_weights=True because they " f"were not found in the eva state_dict: {missing_eva_inits}\ncurrently the " f"following lora modules are not supported by EVA: {UNSUPPORTED_LORA_MODULES}" ) @torch.no_grad() def get_eva_state_dict( model: torch.nn.Module, dataloader: Iterable, peft_config: Optional[LoraConfig] = None, forward_fn: Optional[callable] = forward_fn_dict, prepare_model_inputs_fn: Optional[callable] = prepare_model_inputs_fn_language_modeling, prepare_layer_inputs_fn: Union[callable, dict[str, callable], None] = prepare_layer_inputs_fn_language_modeling, adapter_name: str = "default", gather_distributed_inputs: bool = True, show_progress_bar: bool = True, ) -> dict: """ Compute the SVD for each layer in the model. This function computes the Singular Value Decomposition (SVD) for each layer in the model. It uses the incremental PCA method to compute the SVD components. The function also checks for convergence of the computed components using cosine similarity. The rank distribution for each layer is determined based on the explained variance ratio. Args: model (torch.nn.Module): The model to compute the SVD for. Does not need to be a PeftModel. dataloader (Iterable): The dataloader to use for the forward pass. peft_config (Optional[LoraConfig]): The configuration for the LoRA layers. Only required if `model` is not a PeftModel. forward_fn (callable): The forward function to use for the forward pass. Takes two arguments: `model` and `inputs`. Default behavior is `return model(**inputs)` prepare_model_inputs_fn (Optional[callable]): This function receives the model inputs and the peft_config and passes the output to `prepare_layer_inputs_fn`. Can be used to modify the input to the SVD computation based on the original model inputs. For example for language modeling the attention mask is used to determine which indices are padding tokens and should not be used for SVD. Any function defined here expects two arguments: `model_input` and `peft_config`. `peft.tuners.lora.eva.prepare_model_inputs_fn_language_modeling` is used by default. prepare_layer_inputs_fn (Union[callable, Dict[str, callable], None]): This function receives the layer inputs, the model inputs (potentially modified by `prepare_model_inputs_fn`) and the name of the layer and returns the inputs that should be used for SVD for that particular layer. Any custom function defined here expects three arguments: `layer_input`, `model_input`, and `layer_name` and should return a 2d tensor. The default logic can be found in peft.tuners.lora.eva.prepare_layer_inputs_fn_language_modeling and works for language modeling. In this case model_inputs is the mask used to determine which indices should be used for SVD (created by `prepare_model_inputs_fn_language_modeling`). adapter_name (str): The name of the adapter to compute the SVD for. gather_distributed_inputs (bool): Whether to gather the layer inputs from all ranks. Default is True meaning in a distributed setting the layer inputs will be gathered from all ranks for the SVD computation. For non-distributed settings this argument is ignored. Set to False if you are using a non-distributed dataloader in a distributed setting. show_progress_bar (bool): Whether to show a progress bar. Default is True. Returns: eva_state_dict (dict): The state dictionary containing the SVD components for each layer. """ def target_module_check_fn_peft_model(name, module, unsupported_lora_modules): "check if a module is an adapter module via base_layer attribute" return hasattr(module, "base_layer") and not isinstance(module, unsupported_lora_modules) def target_module_check_fn_default(name, module, peft_config): "check if a module is an adapter module via target_modules" is_target_module = True if peft_config.target_modules is not None: is_target_module = check_target_module_exists(peft_config, name) # Conv1D for GPT2 support return isinstance(module, (torch.nn.Linear, Conv1D)) and is_target_module is_peft_model = hasattr(model, "peft_config") # get peft_config if is_peft_model and peft_config is None: peft_config = model.peft_config[adapter_name] elif peft_config is None: raise ValueError("peft_config is required if model is not a PeftModel") # setup context and target module check function if is_peft_model: ctx = model.disable_adapter() target_module_check_fn = partial( target_module_check_fn_peft_model, unsupported_lora_modules=UNSUPPORTED_LORA_MODULES ) else: ctx = nullcontext() target_module_check_fn = partial(target_module_check_fn_default, peft_config=peft_config) with ctx: eva_state_dict = _get_eva_state_dict( model=model, dataloader=dataloader, peft_config=peft_config, target_module_check_fn=target_module_check_fn, forward_fn=forward_fn, prepare_model_inputs_fn=prepare_model_inputs_fn, prepare_layer_inputs_fn=prepare_layer_inputs_fn, gather_distributed_inputs=gather_distributed_inputs, show_progress_bar=show_progress_bar, ) return eva_state_dict @torch.no_grad() def initialize_lora_eva_weights( model: torch.nn.Module, dataloader: Optional[Iterable] = None, eva_state_dict: Optional[dict] = None, forward_fn: Optional[callable] = forward_fn_dict, prepare_model_inputs_fn: Optional[callable] = prepare_model_inputs_fn_language_modeling, prepare_layer_inputs_fn: Union[callable, dict[str, callable], None] = prepare_layer_inputs_fn_language_modeling, adapter_name: str = "default", gather_distributed_inputs: bool = True, show_progress_bar: bool = True, ): """ Initialize the weights of the LoRA layers using the EVA method. This function initializes the weights of the LoRA layers using the EVA method. It computes the SVD for each adapter layer and updates the weights accordingly. Args: model (PeftModel): The peft model to compute the SVD for. dataloader (Optional[Iterable]): The dataloader to use for the forward pass. If None, eva_state_dict needs to be provided. eva_state_dict (Optional[dict]): The state_dict to load into the model. If None, a dataloader needs to be provided and the state_dict will be computed using `get_eva_state_dict`. forward_fn (callable): The forward function to use for the forward pass. Takes two arguments: `model` and `inputs`. Default behavior is `return model(**inputs)` prepare_model_inputs_fn (Optional[callable]): This function receives the model inputs and the peft_config and passes the output to `prepare_layer_inputs_fn`. Can be used to modify the input to the SVD computation based on the original model inputs. For example for language modeling the attention mask is used to determine which indices are padding tokens and should not be used for SVD. Any function defined here expects two arguments: `model_input` and `peft_config`. `peft.tuners.lora.eva.prepare_model_inputs_fn_language_modeling` is used by default. prepare_layer_inputs_fn (Union[callable, Dict[str, callable], None]): This function receives the layer inputs, the model inputs (potentially modified by `prepare_model_inputs_fn`) and the name of the layer and returns the inputs that should be used for SVD for that particular layer. Any custom function defined here expects three arguments: `layer_input`, `model_input`, and `layer_name` and should return a 2d tensor. The default logic can be found in peft.tuners.lora.eva.prepare_layer_inputs_fn_language_modeling and works for language modeling. In this case model_inputs is the mask used to determine which indices should be used for SVD (created by `prepare_model_inputs_fn_language_modeling`). adapter_name (str): The name of the adapter to initialize the weights for. gather_distributed_inputs (bool): Whether to gather the layer inputs from all ranks. Default is True meaning in a distributed setting the layer inputs will be gathered from all ranks for the SVD computation. For non-distributed settings this argument is ignored. Set to False if you are using a non-distributed dataloader in a distributed setting. show_progress_bar (bool): Whether to show a progress bar. Default is True. Returns: model (torch.nn.Module): The model with the initialized LoRA weights. """ if not hasattr(model, "peft_config"): raise ValueError("model must be a PeftModel") # eva currently only works with a single active adapter # Important: when removing this requirement, make sure eva init works correctly if the new rank is 0. if len(model.active_adapters) > 1: raise ValueError("`initialize_lora_eva_weights` currently only works with a single active adapter") # initialize_lora_eva_weights only works with `init_lora_weights='eva'` if model.peft_config[adapter_name].init_lora_weights != "eva": raise ValueError("`initialize_lora_eva_weights` can only be used with `init_lora_weights='eva'`") # compute svd if eva_state_dict is None: if dataloader is None: raise ValueError("dataloader is required if eva_state_dict is not provided") eva_state_dict = get_eva_state_dict( model=model, dataloader=dataloader, forward_fn=forward_fn, prepare_model_inputs_fn=prepare_model_inputs_fn, prepare_layer_inputs_fn=prepare_layer_inputs_fn, adapter_name=adapter_name, gather_distributed_inputs=gather_distributed_inputs, show_progress_bar=show_progress_bar, ) _load_eva_state_dict(model, eva_state_dict, adapter_name)
peft/src/peft/tuners/lora/eva.py/0
{ "file_path": "peft/src/peft/tuners/lora/eva.py", "repo_id": "peft", "token_count": 13577 }
241
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Based on https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/modules/common/prompt_encoder.py # with some refactor import warnings import torch from .config import PromptEncoderConfig, PromptEncoderReparameterizationType class PromptEncoder(torch.nn.Module): """ The prompt encoder network that is used to generate the virtual token embeddings for p-tuning. Args: config ([`PromptEncoderConfig`]): The configuration of the prompt encoder. Example: ```py >>> from peft import PromptEncoder, PromptEncoderConfig >>> config = PromptEncoderConfig( ... peft_type="P_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... encoder_reparameterization_type="MLP", ... encoder_hidden_size=768, ... ) >>> prompt_encoder = PromptEncoder(config) ``` **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder. - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`. - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and `encoder_reparameterization_type="LSTM"`. - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model. - **input_size** (`int`) -- The input size of the prompt encoder. - **output_size** (`int`) -- The output size of the prompt encoder. - **hidden_size** (`int`) -- The hidden size of the prompt encoder. - **total_virtual_tokens** (`int`): The total number of virtual tokens of the prompt encoder. - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt encoder. Input shape: (`batch_size`, `total_virtual_tokens`) Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`) """ def __init__(self, config): super().__init__() self.token_dim = config.token_dim self.input_size = self.token_dim self.output_size = self.token_dim self.hidden_size = config.encoder_hidden_size self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules self.encoder_type = config.encoder_reparameterization_type # embedding self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim) if not config.inference_mode: if self.encoder_type == PromptEncoderReparameterizationType.LSTM: lstm_dropout = config.encoder_dropout num_layers = config.encoder_num_layers # LSTM self.lstm_head = torch.nn.LSTM( input_size=self.input_size, hidden_size=self.hidden_size, num_layers=num_layers, dropout=lstm_dropout, bidirectional=True, batch_first=True, ) self.mlp_head = torch.nn.Sequential( torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size * 2, self.output_size), ) elif self.encoder_type == PromptEncoderReparameterizationType.MLP: encoder_num_layers_default = PromptEncoderConfig.encoder_num_layers if config.encoder_num_layers != encoder_num_layers_default: warnings.warn( f"for {self.encoder_type.value}, the argument `encoder_num_layers` is ignored. " f"Exactly {encoder_num_layers_default} MLP layers are used." ) layers = [ torch.nn.Linear(self.input_size, self.hidden_size), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size, self.hidden_size), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size, self.output_size), ] self.mlp_head = torch.nn.Sequential(*layers) else: raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") def forward(self, indices): input_embeds = self.embedding(indices) if self.encoder_type == PromptEncoderReparameterizationType.LSTM: output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0]) elif self.encoder_type == PromptEncoderReparameterizationType.MLP: output_embeds = self.mlp_head(input_embeds) else: raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") return output_embeds
peft/src/peft/tuners/p_tuning/model.py/0
{ "file_path": "peft/src/peft/tuners/p_tuning/model.py", "repo_id": "peft", "token_count": 2476 }
242
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import math import warnings from dataclasses import asdict from enum import Enum from typing import Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import is_bf16_available from tqdm import tqdm from transformers.pytorch_utils import Conv1D from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists from peft.utils import ( TRANSFORMERS_MODELS_TO_RANDLORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules, ) from .._buffer_dict import BufferDict from ..tuners_utils import _maybe_include_all_linear_layers from .config import RandLoraConfig from .layer import Linear, RandLoraLayer def _kaiming_init( tensor_or_shape: Union[torch.Tensor, tuple[int, ...]], generator: torch.Generator, ) -> torch.Tensor: """ Kaiming Uniform Initialisation adapted to accept a `torch.Generator` object for PRNG. Args: tensor_or_shape (`Union[torch.Tensor, tuple[int, ...]]`): Tensor to initialise, or shape of new tensor to create and then initialise. generator: (`torch.Generator`): Generator object that manages the state of the PRNG algorithm in use. Returns: `torch.Tensor`: The initialised tensor. """ if isinstance(tensor_or_shape, tuple): tensor = torch.empty( tensor_or_shape, dtype=torch.bfloat16 if is_bf16_available() else torch.float16, ) else: tensor = tensor_or_shape with torch.no_grad(): basis = torch.nn.init.kaiming_uniform_(tensor, a=math.sqrt(5), generator=generator) return basis class RandLoraModel(BaseTuner): """ Creates a RandLoRA model from a pretrained transformers model. Args: model ([`~transformers.PreTrainedModel`]): The model to be adapted. config ([`RandLoraConfig`]): The configuration of the RandLora model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. Returns: `torch.nn.Module`: The RandLora model. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import RandLoraConfig, get_peft_model >>> base_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") >>> config = RandLoraConfig(r=32) >>> model = get_peft_model(base_model, config) ``` **Attributes**: - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`RandLoraConfig`]): The configuration of the RandLora model. """ prefix: str = "randlora_" def _find_dim(self, config) -> tuple[int, int]: """ Finds the largest input and output dimensions across linear layers that have been wrapped with RandLora. This will be used for determining the size of the shared randlora_A and randlora_B matrices. """ model_config = self.get_model_config(self.model) peft_config = self._prepare_adapter_config(config, model_config) peft_config = _maybe_include_all_linear_layers(peft_config, self.model) largest_shape = None for key, module in self.model.named_modules(): if not self._check_target_module_exists(peft_config, key): continue if isinstance(module, nn.Linear): module_shape = module.out_features, module.in_features elif isinstance(module, Conv1D): module_shape = module.weight.ds_shape if hasattr(module.weight, "ds_shape") else module.weight.shape module_shape = module_shape[::-1] else: continue if largest_shape is None: largest_shape = module_shape continue if module_shape != largest_shape: largest_shape = tuple(max(a, b) for a, b in zip(largest_shape, module_shape)) if largest_shape is None: msg = "No layers types compatible with RandLora were found. Please check `peft_config.target_modules`." raise ValueError(msg) return largest_shape def _init_randlora_A_randlora_B_sparse(self, config: RandLoraConfig, adapter_name: str, sparsity: int = 3) -> None: """ Sparse random projections as described in https://cs-people.bu.edu/evimaria/cs565/kdd-rp.pdf """ linear_out_dim, linear_in_dim = self._find_dim(config) max_dim, min_dim = max(linear_out_dim, linear_in_dim), min(linear_out_dim, linear_in_dim) # use of persistent to exclude randlora_A and randlora_B from the state dict if we choose not to save them. self.randlora_A = BufferDict({}, persistent=config.save_projection) self.randlora_B = BufferDict({}, persistent=config.save_projection) # deterministic init of randlora_A and randlora_B if we know the key generator = torch.Generator(device="cpu").manual_seed(config.projection_prng_key) # The gamma matrix is applied on A meaning it can be unique (shared) across the n scaling matrices. # We also set randlora_A as the smallest matrix to reduce trainable parameters. randlora_A = torch.rand((config.r, 1, min_dim), generator=generator) # Number of bases to ensure full rank num_bases = min_dim / config.r num_bases = int(num_bases) if num_bases.is_integer() else int(num_bases) + 1 # Ensure full rank randlora_B = torch.rand((max_dim, num_bases, config.r), generator=generator) # The current implementation is a proof of concept and does take into consideration # the sparsity to reduce memory usage or speed up compute randlora_B_sparse = torch.zeros(randlora_B.shape) randlora_A_sparse = torch.zeros(randlora_A.shape) randlora_B_sparse[randlora_B < 1 / (2 * sparsity)] = -1 randlora_B_sparse[randlora_B > 1 - 1 / (2 * sparsity)] = 1 randlora_A_sparse[randlora_A < 1 / (2 * sparsity)] = -1 randlora_A_sparse[randlora_A > 1 - 1 / (2 * sparsity)] = 1 # Std normalization is empirically found to be the best randlora_A, randlora_B = ( randlora_A_sparse / randlora_A_sparse.std(), randlora_B_sparse / randlora_B_sparse.std(), ) self.randlora_A[adapter_name] = randlora_A self.randlora_B[adapter_name] = randlora_B def _init_randlora_A_randlora_B(self, config: RandLoraConfig, adapter_name: str) -> None: linear_out_dim, linear_in_dim = self._find_dim(config) max_dim, min_dim = max(linear_out_dim, linear_in_dim), min(linear_out_dim, linear_in_dim) # use of persistent to exclude randlora_A and randlora_B from the state dict if we choose not to save them. self.randlora_A = BufferDict({}, persistent=config.save_projection) self.randlora_B = BufferDict({}, persistent=config.save_projection) # deterministic init of randlora_A and randlora_B if we know the key generator = torch.Generator(device="cpu").manual_seed(config.projection_prng_key) # The gamma matrix is applied on A meaning it can be unique (shared) across the n scaling matrices. # We also set randlora_A as the smallest matrix to reduce trainable parameters. randlora_A = _kaiming_init((config.r, 1, min_dim), generator=generator) # Ensure full rank num_bases = min(linear_out_dim, linear_in_dim) / config.r num_bases = int(num_bases) if num_bases.is_integer() else int(num_bases) + 1 randlora_B = torch.cat( [_kaiming_init((max_dim, 1, config.r), generator=generator) for _ in range(num_bases)], dim=1 ) # Std normalization is empirically found to be the best randlora_A, randlora_B = randlora_A / randlora_A.std(), randlora_B / randlora_B.std() self.randlora_A[adapter_name] = randlora_A self.randlora_B[adapter_name] = randlora_B def _pre_injection_hook(self, model: nn.Module, config: RandLoraConfig, adapter_name: str) -> None: if config.very_sparse: linear_out_dim, linear_in_dim = self._find_dim(config) self._init_randlora_A_randlora_B_sparse( config, adapter_name, sparsity=math.sqrt(min(linear_out_dim, linear_in_dim)) ) elif config.sparse: self._init_randlora_A_randlora_B_sparse(config, adapter_name, sparsity=3) else: self._init_randlora_A_randlora_B(config, adapter_name) def _check_new_adapter_config(self, config: RandLoraConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ # the below todo is copied from LoRA # TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check # does not fully correspond to the error message. if (len(self.peft_config) > 1) and (config.bias != "none"): raise ValueError( f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " "set bias to 'none' for all adapters." ) for existing_config in self.peft_config.values(): if existing_config is config: # skip the current config continue if existing_config.projection_prng_key != config.projection_prng_key: raise ValueError( f"RandLora PRNG initialisation key must be the same for all adapters. Got {config.projection_prng_key=} but " f"previous config had {existing_config.projection_prng_key}." ) save_project_unique_values = sorted({config.save_projection for config in self.peft_config.values()}) if len(save_project_unique_values) > 1: raise ValueError( "RandLora projection weights must be saved for all adapters or none, but got multiple different values: " f"{save_project_unique_values}" ) @staticmethod def _check_target_module_exists(randlora_config, key): return check_target_module_exists(randlora_config, key) def _create_and_replace( self, randlora_config, adapter_name, target, target_name, parent, current_key, **optional_kwargs, ): if current_key is None: raise ValueError("Current Key shouldn't be `None`") r = randlora_config.r bias = hasattr(target, "bias") and target.bias is not None kwargs = { "r": r, "randlora_alpha": randlora_config.randlora_alpha, "randlora_dropout": randlora_config.randlora_dropout, "fan_in_fan_out": randlora_config.fan_in_fan_out, "init_weights": randlora_config.init_weights, "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False), "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False), } kwargs["bias"] = bias if isinstance(target, Linear): target.update_layer( adapter_name, self.randlora_A, self.randlora_B, r, randlora_config.randlora_alpha, randlora_config.randlora_dropout, randlora_config.init_weights, ) else: new_module = self._create_new_module( randlora_config, self.randlora_A, self.randlora_B, adapter_name, target, **kwargs ) if adapter_name not in self.active_adapter: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) @staticmethod def _replace_module(parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.base_layer if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) meta = torch.device("meta") # dispatch to correct device for name, module in new_module.named_modules(): if "randlora_" in name: if not any(p.device == meta for p in module.parameters()): module.to(child.weight.device) def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if self.prefix not in n: p.requires_grad = False for active_adapter in self.active_adapters: bias = self.peft_config[active_adapter].bias if bias == "none": continue if bias == "all": for n, p in model.named_parameters(): if "bias" in n: p.requires_grad = True elif bias == "randlora_only": for m in model.modules(): if isinstance(m, RandLoraLayer) and hasattr(m, "bias") and m.bias is not None: m.bias.requires_grad = True else: raise NotImplementedError(f"Requested bias: {bias}, is not implemented.") @staticmethod def _create_new_module(randlora_config, randlora_A, randlora_B, adapter_name, target, **kwargs): # avoid eager bnb import if is_bnb_available(): import bitsandbytes as bnb from .bnb import Linear8bitLt if is_bnb_4bit_available(): from .bnb import Linear4bit bias = kwargs.pop("bias", False) loaded_in_8bit = kwargs.get("loaded_in_8bit", False) loaded_in_4bit = kwargs.get("loaded_in_4bit", False) if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): eightbit_kwargs = kwargs.copy() eightbit_kwargs.update( { "has_fp16_weights": target_base_layer.state.has_fp16_weights, "threshold": target_base_layer.state.threshold, "index": target_base_layer.index, } ) return Linear8bitLt(target, adapter_name, randlora_A, randlora_B, **eightbit_kwargs) elif loaded_in_4bit and isinstance(target_base_layer, bnb.nn.Linear4bit): fourbit_kwargs = kwargs.copy() fourbit_kwargs.update( { "compute_dtype": target_base_layer.compute_dtype, "compress_statistics": target_base_layer.weight.compress_statistics, "quant_type": target_base_layer.weight.quant_type, } ) return Linear4bit(target, adapter_name, randlora_A, randlora_B, **fourbit_kwargs) elif isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = randlora_config.fan_in_fan_out = False elif isinstance(target_base_layer, Conv1D): kwargs["is_target_conv_1d_layer"] = True if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = randlora_config.fan_in_fan_out = True else: raise ValueError( f"Target module {target} is not supported. Currently, only the following modules are supported: " "`torch.nn.Linear`, `transformers.pytorch_utils.Conv1D`." ) new_module = Linear( target, randlora_A, randlora_B, adapter_name, bias=bias, **kwargs, ) return new_module def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) def get_peft_config_as_dict(self, inference: bool = False): config_dict = {} for key, value in self.peft_config.items(): config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} if inference: config["inference_mode"] = True config_dict[key] = config return config def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self): self._set_adapter_layers(enabled=True) def disable_adapter_layers(self): for active_adapter in self.active_adapters: val = self.peft_config[active_adapter].bias if val != "none": msg = ( f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " "output as the base model would without adaption." ) warnings.warn(msg) self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name): for module in self.model.modules(): if isinstance(module, RandLoraLayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_RANDLORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_RANDLORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None, ): # we cannot use self.prefix as we want to include non-trainable randlora parameters key_list = [key for key, _ in self.model.named_modules() if "randlora" not in key] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` setattr(parent, target_name, target.modules_to_save[target.active_adapter]) return self.model def delete_adapter(self, adapter_name: str): """ Deletes an existing adapter. Args: adapter_name (str): Name of the adapter to be deleted. """ if adapter_name not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] # we cannot use self.prefix as we want to include non-trainable randlora parameters key_list = [key for key, _ in self.model.named_modules() if "randlora" not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, RandLoraLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapter[:] self.active_adapter = new_adapter or [] self._delete_auxiliary_adapter(adapter_name, new_active_adapters=new_adapter) def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ): r""" This method merges the RandLora layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModel >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b") >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample" >>> model = PeftModel.from_pretrained(base_model, peft_model_id) >>> merged_model = model.merge_and_unload() ``` """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self): """ Gets back the base model by removing all the RandLora modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False)
peft/src/peft/tuners/randlora/model.py/0
{ "file_path": "peft/src/peft/tuners/randlora/model.py", "repo_id": "peft", "token_count": 10778 }
243
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import math import warnings from operator import attrgetter from typing import Literal, Optional import torch from peft.config import PeftConfig from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING, PEFT_TYPE_TO_PREFIX_MAPPING from peft.tuners.lora import Conv2d, Linear, LoraConfig, LoraLayer from .other import get_pattern_key, infer_device from .peft_types import PeftType from .save_and_load import _insert_adapter_name_into_state_dict, load_peft_weights # so far only LoRA is supported CONFIG_KEYS_TO_CHECK = {PeftType.LORA: ["use_rslora", "lora_dropout", "alpha_pattern", "use_dora"]} def _update_scaling(lora_module, adapter_name, scaling=None): """ Update the value of the scalings of the LoRA module. Takes into consideration that scalings can be tensors from prepare_model_for_compiled_hotswap. """ if lora_module.scaling[adapter_name] == scaling: return if isinstance(lora_module.scaling[adapter_name], torch.Tensor): lora_module.scaling[adapter_name].fill_(scaling) elif isinstance(lora_module.scaling[adapter_name], (float, int)): lora_module.scaling[adapter_name] = scaling else: raise ValueError( "Something went wrong when trying to set the new scale value, expected to find the old value to be of type " f"float or torch.Tensor, got {type(lora_module.scaling[adapter_name])} instead." ) def _convert_scalings_to_tensor(model) -> bool: """ Convert the LoRA scaling values into torch.tensors to prevent recompilation if they change. Returns: bool: Returns `True` if an appropriate adapter was found, else `False`. """ found_adapter = False for module in model.modules(): if not isinstance(module, LoraLayer): continue found_adapter = True scaling = module.scaling for key, val in scaling.items(): if isinstance(val, float): # no need to deal with dtype as scalars are coerced scaling[key] = torch.tensor(val, device=module.weight.device) elif not isinstance(val, torch.Tensor): raise ValueError( "Something went wrong while trying to convert the scalings, expected to find values of type float " f"but found {type(val)} instead." ) return found_adapter def _get_padded_linear(lora_module: torch.nn.Module, target_rank: int, is_lora_A: bool) -> torch.nn.Linear: """ Get a new Linear layer for LoRA with padded weights according to the target rank. Args: lora_module (nn.Module): The LoRA sub-module (e.g. module.lora_A[adapter_name]). target_rank (int): The desired rank to pad to. is_lora_A (bool): True if this is the LoRA A matrix, False if LoRA B. Returns: nn.Linear: A newly created and padded Linear layer. If the rank already fit, the original layer is returned. """ weight = lora_module.weight # For LoRA A, the "rank dimension" is weight.size(0) (out_features). # For LoRA B, it is weight.size(1) (in_features). original_rank = weight.size(0) if is_lora_A else weight.size(1) # If no padding needed if original_rank == target_rank: return lora_module if original_rank > target_rank: raise ValueError( f"Trying to pad the adapter to the target rank {target_rank}, but the original rank is larger " f"({original_rank}). This is not possible." ) out_features, in_features = weight.shape # lora_A and lora_B are always nn.Linear if is_lora_A: # LoRA A affects out_features padded = torch.zeros(target_rank, in_features, device=weight.device, dtype=weight.dtype) padded[:original_rank, :] = weight new_layer = torch.nn.Linear(in_features, target_rank, bias=lora_module.bias is not None) else: # LoRA B affects in_features padded = torch.zeros(out_features, target_rank, device=weight.device, dtype=weight.dtype) padded[:, :original_rank] = weight new_layer = torch.nn.Linear(target_rank, out_features, bias=lora_module.bias is not None) # Sanity check if new_layer.weight.shape != padded.shape: raise ValueError( "Something went wrong when trying to pad the LoRA Linear weights, the new shape should be " f"{padded.shape} but {new_layer.weight.shape} was found. Please open an issue on PEFT " "(https://github.com/huggingface/peft/issues) and report this error." ) if (lora_module.bias is not None) and (new_layer.bias.shape != lora_module.bias.shape): raise ValueError( "Something went wrong when trying to pad the LoRA Linear bias, the new shape should be " f"{lora_module.bias.shape} but {new_layer.bias.shape} was found. Please open an issue on PEFT " "(https://github.com/huggingface/peft/issues) and report this error." ) new_layer.weight.data = padded # Copy bias if present if lora_module.bias is not None: new_layer.bias.data = lora_module.bias.data return new_layer def _get_padded_conv2d(lora_module: torch.nn.Module, target_rank: int, is_lora_A: bool) -> torch.nn.Conv2d: """ Get a new Conv2d layer for LoRA with padded weights according to the target rank. Args: lora_module (nn.Module): The LoRA sub-module (e.g. module.lora_A[adapter_name]). target_rank (int): The desired rank to pad to. is_lora_A (bool): True if this is the LoRA A matrix, False if LoRA B. Returns: nn.Conv2d: A newly created and padded Conv2d layer. If the rank already fit, the original layer is returned. """ weight = lora_module.weight # For Conv2d: [out_channels, in_channels, kernel_height, kernel_width] out_channels, in_channels, kh, kw = weight.shape original_rank = out_channels if is_lora_A else in_channels if original_rank == target_rank: return lora_module if original_rank > target_rank: raise ValueError( f"Trying to pad the adapter to the target rank {target_rank}, but the original rank is larger " f"({original_rank}). This is not possible." ) # lora_A and lora_B are always nn.Conv2d if is_lora_A: # LoRA A affects out_channels padded = torch.zeros(target_rank, in_channels, kh, kw, device=weight.device, dtype=weight.dtype) padded[:out_channels, :, :, :] = weight new_layer = torch.nn.Conv2d( in_channels, target_rank, kernel_size=lora_module.kernel_size, stride=lora_module.stride, padding=lora_module.padding, bias=lora_module.bias is not None, groups=lora_module.groups, ) else: # LoRA B affects in_channels padded = torch.zeros(out_channels, target_rank, kh, kw, device=weight.device, dtype=weight.dtype) padded[:, :in_channels, :, :] = weight new_layer = torch.nn.Conv2d( target_rank, out_channels, kernel_size=lora_module.kernel_size, stride=lora_module.stride, padding=lora_module.padding, bias=lora_module.bias is not None, groups=lora_module.groups, ) # Sanity check if new_layer.weight.shape != padded.shape: raise ValueError( "Something went wrong when trying to pad the LoRA weights, the new shape should be " f"{padded.shape} but {new_layer.weight.shape} was found. Please open an issue on PEFT " "(https://github.com/huggingface/peft/issues) and report this error." ) if (lora_module.bias is not None) and (new_layer.bias.shape != lora_module.bias.shape): raise ValueError( "Something went wrong when trying to pad the LoRA Conv2d bias, the new shape should be " f"{lora_module.bias.shape} but {new_layer.bias.shape} was found. Please open an issue on PEFT " "(https://github.com/huggingface/peft/issues) and report this error." ) new_layer.weight.data = padded # Copy bias if present if lora_module.bias is not None: new_layer.bias.data = lora_module.bias.data return new_layer def _pad_lora_weights(model: torch.nn.Module, target_rank: int) -> bool: """ Pad LoRA weights in a model to a target rank while preserving the original behavior. Args: model (nn.Module): The model containing LoRA modules (with lora_A and lora_B). target_rank (int): The target rank to pad to. Returns: bool: Returns `True` if an appropriate adapter was found, else `False`. """ found_adapter = False for module in model.modules(): # Decide which pad function to call based on module type if isinstance(module, Linear): pad_fn = _get_padded_linear elif isinstance(module, Conv2d): pad_fn = _get_padded_conv2d else: # Skip any other module types continue # Pad LoRA A for adapter_name, lora_A_module in module.lora_A.items(): new_layer = pad_fn(lora_A_module, target_rank=target_rank, is_lora_A=True) module.lora_A[adapter_name] = new_layer # Pad LoRA B for adapter_name, lora_B_module in module.lora_B.items(): new_layer = pad_fn(lora_B_module, target_rank=target_rank, is_lora_A=False) module.lora_B[adapter_name] = new_layer found_adapter = True return found_adapter def prepare_model_for_compiled_hotswap( model: torch.nn.Module, *, target_rank: Optional[int] = None, config: Optional[LoraConfig | dict[str, LoraConfig]] = None, check_compiled: Literal["error", "warn", "ignore"] = "error", ) -> None: """ Helper function that prepares the model so that it can later be compiled and then used with hot-swapping. It is necessary to call this function on the model for hot-swapping to work if both of these are true: - the different LoRA adapters have different ranks and/or different alpha values (i.e. scalings) - you plan to torch.compile the model and want to avoid re-compilation It is important to call this function *after* the first LoRA adapter has been loaded (i.e. the one that will be swapped out) but *before* the model is compiled. Even with this function, hot-swapping LoRA adapters that target different layers is still not supported. Note: This function modifies the model in-place. If you want to restore the model to its initial state, you will have to reload it. Args: model (`nn.Module`): The model with the loaded adapter, before compilation. target_rank (`int`, *optional*): The target rank to pad the LoRA weights to. Should be the maximum rank among all LoRA adapters that will be hot-swapped. If not specified, the target ranks will not be changed. config (`LoraConfig` or `dict[str, LoraConfig]`, *optional*): Optionally pass the `LoraConfig`s of the LoRA adapters. If passed, the rank in the configs will be updated to `target_rank`. check_compiled (`str`, *optional*, defaults to `"error"`): How to handle the case when the model is already compiled, which should generally be avoided. The options are: - "error" (default): raise an error - "warn": issue a warning - "ignore": do nothing Raises: ValueError If the model is already compiled or if no adpater layer was found, raise an error. Example: ```py base_model = ... model = PeftModel.from_pretrained(base_model, path_adapter_0) # Prepare the model to allow hotswapping even if ranks/scalings of 2nd adapter differ. # You can skip this step if all ranks and scalings are identical. prepare_model_for_compiled_hotswap(model, target_rank=highest_lora_rank) model = torch.compile(model) # do inference with adapter 0 # replace the "default" lora adapter with the new one hotswap_adapter(model, path_adapter_1, adapter_name="default", torch_device=device) # do inference with adapter 1 ``` """ is_compiled = hasattr(model, "_orig_mod") or getattr(model, "_compiled_call_impl", False) if is_compiled: if check_compiled == "error": raise ValueError("Call prepare_model_for_compiled_hotswap *before* compiling the model") elif check_compiled == "warn": warnings.warn( "prepare_model_for_compiled_hotswap was called with a model that is already compiled. This will likely " "result in re-compilation, hurting performance. Call the function before compiling the model." ) elif check_compiled != "ignore": raise ValueError( f"check_compiles should be one of 'error', 'warn', or 'ignore', got '{check_compiled}' instead." ) conversion_found_adapter = _convert_scalings_to_tensor(model) if target_rank is not None: padding_found_adapter = _pad_lora_weights(model, target_rank=target_rank) else: padding_found_adapter = False if not (conversion_found_adapter or padding_found_adapter): raise ValueError( "No adapter layers found on the model, make sure call `prepare_model_for_compiled_hotswap` after loading " "the first adapter and before loading the second adapter." ) if not config: return if target_rank is None: return if not isinstance(config, dict): # config can be either a PeftConfig, or a dict of PeftConfigs like PeftModel.peft_config config = {"dummy": config} for lora_config in config.values(): lora_config.r = target_rank if lora_config.rank_pattern: for key in lora_config.rank_pattern: lora_config.rank_pattern[key] = target_rank def hotswap_adapter_from_state_dict( model: torch.nn.Module, state_dict: dict[str, torch.Tensor], adapter_name: str, config: LoraConfig, parameter_prefix: str = "lora_", ): """ Swap out the adapter weights from the model with the weights from state_dict. As of now, only LoRA is supported. This is a low-level function that assumes that the adapters have been checked for compatibility and that the state_dict has been correctly mapped to work with PEFT. For a high level function that performs this work for you, use `hotswap_adapter` instead. Args: model (`nn.Module`): The model with the loaded adapter. state_dict (`dict[str, torch.Tensor]`): The state dict of the new adapter, which needs to be compatible (targeting same modules etc.). adapter_name (`str`): The name of the adapter that should be hot-swapped, e.g. `"default"`. The name will remain the same after swapping. config (`LoraConfig`): The config of the LoRA adapter. This is used to determine the scaling and rank of the adapter. parameter_prefix (`str`, *optional*, defaults to `"lora_"`) The prefix used to identify the adapter's keys in the state dict. For LoRA, this would be `"lora_"` (the default). Raises: RuntimeError If the old and the new adapter are not compatible, a RuntimeError is raised. """ # Ensure that all the keys of the new adapter correspond exactly to the keys of the old adapter, otherwise # hot-swapping is not possible # _orig_mod is for torch.compile(model) and _compiled_call_impl is for model.compile() (not wrapped) is_compiled = hasattr(model, "_orig_mod") is_compiled_inplace = bool(getattr(model, "_compiled_call_impl", None)) # TODO: there is probably a more precise way to identify the adapter keys missing_keys = {k for k in model.state_dict() if (parameter_prefix in k) and (adapter_name in k)} unexpected_keys = [] # first: dry run, not swapping anything for key, new_val in state_dict.items(): try: old_val = attrgetter(key)(model) except AttributeError: unexpected_keys.append(key) continue if is_compiled: missing_keys.remove("_orig_mod." + key) else: missing_keys.remove(key) # Right now, we don't deal with unexpected keys, i.e. if the adapter being swapped in targeting new layers. We could # probably add LoRA to these layers ad hoc, but that would not work with compiled models. if unexpected_keys: msg = f"Hot swapping the adapter did not succeed, unexpected keys found: {', '.join(unexpected_keys)}." raise RuntimeError(msg) # If the adapter that is being swapped in is missing some keys, this is fine. We just need to ensure that those LoRA # weights from the previous adapter are set to 0 so that they don't influence the output. We don't need to worry # about ranks are alphas. for key in missing_keys: # in case it's a compiled model key = key.removeprefix("_orig_mod.") # get LoRA parent module name by removing the 'lora_*.<adapter-name>.weight' part module_name = ".".join(key.split(".")[:-3]) module = model.get_submodule(module_name) old_val = attrgetter(key)(model) old_val.data.fill_(0.0) # actual swapping for key, new_val in state_dict.items(): # get LoRA parent module name by removing the 'lora_*.<adapter-name>.weight' part module_name = ".".join(key.split(".")[:-3]) module = model.get_submodule(module_name) # swap alpha/scaling r_key = get_pattern_key(config.rank_pattern.keys(), key) alpha_key = get_pattern_key(config.alpha_pattern.keys(), key) rank = config.rank_pattern.get(r_key, config.r) alpha = config.alpha_pattern.get(alpha_key, config.lora_alpha) if config.use_rslora: scaling = alpha / math.sqrt(rank) else: scaling = alpha / rank _update_scaling(module, adapter_name=adapter_name, scaling=scaling) # swap actual weights # no need to account for potential _orig_mod in key here, as torch handles that old_val = attrgetter(key)(model) new_val = new_val.to(old_val.data.device) # We try to detect if the model is compiled but it does not always work, e.g. if hotswapping is called from # within the model itself. In this case, swap_tensors raises RuntimeError and should continue without # swap_tensors. if not is_compiled and not is_compiled_inplace: try: torch.utils.swap_tensors(old_val, new_val) continue except RuntimeError: is_compiled = True # Compiled models don't work with swap_tensors because there are weakrefs for the tensor. It is unclear if # this workaround could not cause trouble but the tests indicate that it works. if old_val.shape == new_val.shape: # either # - adapters had the same rank # - adapters were padded with prepare_model_for_compiled_hotswap and 2nd adapter was larger old_val.data.copy_(new_val.data) else: # if 2nd adapter was smaller, ensure to fill up to adapter dimension and set the rest to zeros if old_val.dim() not in (2, 4): raise NotImplementedError( f"Trying to hotswap an adapter whose weight has {old_val.dim()} dimensions, but only Conv2d and " "Linear are supported" ) # Linear or Conv2d: the check for dim 0 or 1 works for both of these layer types if old_val.shape[0] > new_val.shape[0]: old_val.data.fill_(0) old_val.data[: new_val.shape[0]].copy_(new_val.data) elif old_val.shape[1] > new_val.shape[1]: old_val.data.fill_(0) old_val.data[:, : new_val.shape[1]].copy_(new_val.data) else: raise ValueError( f"Incompatible shapes found for LoRA weights {key}: {old_val.shape} vs {new_val.shape}. Please " "ensure that all ranks are padded to the largest rank among all LoRA adapters by using " "peft.utils.hotswap.prepare_model_for_compiled_hotswap." ) def check_hotswap_configs_compatible(config0: PeftConfig, config1: PeftConfig) -> None: """ Check if two configs are compatible for hot-swapping. Only LoRA parameters are checked for now. To hot-swap two adapters, their configs must be compatible. Otherwise, the results could be false. E.g. if they use different alpha values, after hot-swapping, the alphas from the first adapter would still be used with the weights from the 2nd adapter, which would result in incorrect behavior. There is probably a way to swap these values as well, but that's not implemented yet, and we need to be careful not to trigger re-compilation if the model is compiled (so no modification of the dict). """ if config0.peft_type != config1.peft_type: msg = f"Incompatible PEFT types found: {config0.peft_type.value} and {config1.peft_type.value}" raise ValueError(msg) if config0.peft_type not in CONFIG_KEYS_TO_CHECK: msg = ( f"Hotswapping only supports {', '.join(CONFIG_KEYS_TO_CHECK.keys())} but " f"{config0.peft_type.value} was passed." ) raise ValueError(msg) config_keys_to_check = CONFIG_KEYS_TO_CHECK[config0.peft_type] # TODO: This is a very rough check only for LoRA at the moment. Also, there might be some options that don't # necessarily require an error. config0 = config0.to_dict() config1 = config1.to_dict() sentinel = object() for key in config_keys_to_check: val0 = config0.get(key, sentinel) val1 = config1.get(key, sentinel) if val0 != val1: raise ValueError(f"Configs are incompatible: for {key}, {val0} != {val1}") def hotswap_adapter(model, model_name_or_path, adapter_name, torch_device=None, **kwargs): """Substitute old adapter data with new adapter data, keeping the rest the same. As of now, only LoRA is supported. This function is useful when you want to replace the loaded adapter with a new adapter. The adapter name will remain the same, but the weights and other parameters will be swapped out. If the adapters are incomptabile, e.g. targeting different layers or having different alpha values, an error will be raised. Example: ```py >>> import torch >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModel >>> from peft.utils.hotswap import hotswap_adapter >>> model_id = ... >>> inputs = ... >>> device = ... >>> model = AutoModelForCausalLM.from_pretrained(model_id).to(device) >>> # load lora 0 >>> model = PeftModel.from_pretrained(model, "path-adapter-0") >>> model = torch.compile(model) # optionally compile the model >>> with torch.inference_mode(): ... output_adapter_0 = model(inputs) >>> # replace the "default" lora adapter with the new one >>> hotswap_adapter(model, "path-adapter-1", adapter_name="default", torch_device=device) >>> with torch.inference_mode(): ... output_adapter_1 = model(inputs).logits ``` Args: model ([`~PeftModel`]): The PEFT model with the loaded adapter. model_name_or_path (`str`): The name or path of the model to load the new adapter from. adapter_name (`str`): The name of the adapter to swap, e.g. `"default"`. The name will stay the same after swapping. torch_device: (`str`, *optional*, defaults to None): The device to load the new adapter onto. **kwargs (`optional`): Additional keyword arguments used for loading the config and weights. """ if torch_device is None: torch_device = infer_device() ############################ # LOAD CONFIG AND VALIDATE # ############################ config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_name_or_path, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), token=kwargs.get("token", None), ) ] config = config_cls.from_pretrained(model_name_or_path, **kwargs) # config keys that could affect the model output besides what is determined by the state_dict check_hotswap_configs_compatible(model.active_peft_config, config) state_dict = load_peft_weights(model_name_or_path, device=torch_device, **kwargs) ########################### # LOAD & REMAP STATE_DICT # ########################### parameter_prefix = PEFT_TYPE_TO_PREFIX_MAPPING[config.peft_type] peft_model_state_dict = _insert_adapter_name_into_state_dict( state_dict, adapter_name=adapter_name, parameter_prefix=parameter_prefix ) hotswap_adapter_from_state_dict( model=model, state_dict=peft_model_state_dict, adapter_name=adapter_name, parameter_prefix=parameter_prefix, config=config, )
peft/src/peft/utils/hotswap.py/0
{ "file_path": "peft/src/peft/utils/hotswap.py", "repo_id": "peft", "token_count": 10563 }
244
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from safetensors.torch import load_file from transformers import AutoModelForCausalLM from peft import BOFTConfig, PeftModel, get_peft_model from peft.utils import infer_device class TestBoft: device = infer_device() def test_boft_state_dict(self, tmp_path): # see #2050 # ensure that the boft_P buffer is not stored in the checkpoint file and is not necessary to load the model # correctly torch.manual_seed(0) inputs = torch.arange(10).view(-1, 1).to(self.device) model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" model = AutoModelForCausalLM.from_pretrained(model_id).to(self.device) model.eval() output_base = model(inputs).logits config = BOFTConfig(init_weights=False) model = get_peft_model(model, config) model.eval() output_peft = model(inputs).logits atol, rtol = 1e-5, 1e-8 # sanity check: loading boft changed the output assert not torch.allclose(output_base, output_peft, atol=atol, rtol=rtol) model.save_pretrained(tmp_path) del model # check that the boft_P buffer is not present state_dict = load_file(tmp_path / "adapter_model.safetensors") assert not any("boft_P" in key for key in state_dict) # sanity check: the model still produces the same output after loading model = AutoModelForCausalLM.from_pretrained(model_id).to(self.device) model = PeftModel.from_pretrained(model, tmp_path) output_loaded = model(inputs).logits assert torch.allclose(output_peft, output_loaded, atol=atol, rtol=rtol) def test_boft_old_checkpoint_including_boft_P(self, tmp_path): # see #2050 # This test exists to ensure that after the boft_P buffer was made non-persistent, old checkpoints can still be # loaded successfully. torch.manual_seed(0) inputs = torch.arange(10).view(-1, 1).to(self.device) model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" model = AutoModelForCausalLM.from_pretrained(model_id).to(self.device) # first create the expected output config = BOFTConfig(init_weights=False) model = get_peft_model(model, config) model.eval() output_peft = model(inputs).logits del model model = AutoModelForCausalLM.from_pretrained(model_id).to(self.device) # checkpoint from before the PR whose state_dict still contains boft_P hub_id = "peft-internal-testing/boft-tiny-opt-peft-v0.12" model = PeftModel.from_pretrained(model, hub_id) output_old = model(inputs).logits atol, rtol = 1e-5, 1e-8 assert torch.allclose(output_peft, output_old, atol=atol, rtol=rtol)
peft/tests/test_boft.py/0
{ "file_path": "peft/tests/test_boft.py", "repo_id": "peft", "token_count": 1325 }
245
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib import os import unittest import torch import torch.nn.init as init from peft import LoraConfig, PeftModel, get_peft_model, get_peft_model_state_dict from .testing_utils import require_torch_gpu def is_megatron_available() -> bool: return importlib.util.find_spec("megatron") is not None if is_megatron_available(): from megatron.core import parallel_state, tensor_parallel from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed from megatron.core.transformer.module import MegatronModule from megatron.core.transformer.transformer_config import TransformerConfig world_size = 1 rank = 0 def initialize_distributed(): print(f"Initializing torch.distributed with rank: {rank}, world_size: {world_size}") torch.cuda.set_device(0) init_method = "tcp://" master_ip = os.getenv("MASTER_ADDR", "localhost") master_port = os.getenv("MASTER_PORT", "6001") init_method += master_ip + ":" + master_port torch.distributed.init_process_group(backend="nccl", world_size=world_size, rank=rank, init_method=init_method) def destroy_model_parallel(): parallel_state.destroy_model_parallel() torch.distributed.barrier() def initialize_model_parallel( tensor_model_parallel_size=1, pipeline_model_parallel_size=1, virtual_pipeline_model_parallel_size=None, pipeline_model_parallel_split_rank=None, ): parallel_state.destroy_model_parallel() if not torch.distributed.is_initialized(): initialize_distributed() parallel_state.initialize_model_parallel( tensor_model_parallel_size, pipeline_model_parallel_size, virtual_pipeline_model_parallel_size, pipeline_model_parallel_split_rank, ) class DummyModule(MegatronModule): def __init__(self, config: TransformerConfig): super().__init__(config) self.linear = tensor_parallel.ColumnParallelLinear( input_size=10, output_size=10, config=config, init_method=init.xavier_normal_, bias=False, gather_output=False, ) self.lm_head = tensor_parallel.RowParallelLinear( input_size=10, output_size=10, config=config, init_method=init.xavier_normal_, bias=False, input_is_parallel=True, skip_bias_add=True, ) def forward(self, input): x = self.linear(input)[0] x = self.lm_head(x)[0] return x @require_torch_gpu class TestMegatronLora(unittest.TestCase): def setUp(self): initialize_model_parallel(1, 1) model_parallel_cuda_manual_seed(123) transformer_config = { "num_layers": 2, "hidden_size": 12, "num_attention_heads": 4, "use_cpu_initialization": True, } config = TransformerConfig(**transformer_config) self.megatron_module = DummyModule(config=config).cuda() self.dummy_module = copy.deepcopy(self.megatron_module).cuda() lora_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", target_modules=["linear", "lm_head"], megatron_config=config, megatron_core="megatron.core", ) self.megatron_module = get_peft_model(self.megatron_module, lora_config) def tearDown(self): destroy_model_parallel() def test_megatron_lora_module(self): megatron_module = self.megatron_module assert isinstance(megatron_module, PeftModel) for name, module in megatron_module.named_modules(): if name.endswith("linear"): assert hasattr(module, "lora_A") assert hasattr(module, "lora_B") if name.endswith("linear.lora_A.default"): assert isinstance(module, torch.nn.Linear) if name.endswith("linear.lora_B.default"): assert isinstance(module, tensor_parallel.ColumnParallelLinear) if name.endswith("lm_head.lora_A.default"): assert isinstance(module, tensor_parallel.RowParallelLinear) if name.endswith("lm_head.lora_B.default"): assert isinstance(module, torch.nn.Linear) def test_forward(self): x = torch.ones((2, 4, 10)).cuda() megatron_module_result = self.megatron_module(x) dummt_module_result = self.dummy_module(x) # Because lora_B is initialized with 0, the forward results of two models should be equal before backward. assert megatron_module_result.equal(dummt_module_result) def test_backward(self): optimizer = torch.optim.AdamW(self.megatron_module.parameters()) loss_fn = torch.nn.CrossEntropyLoss() x = torch.randn(2, 4, 10, requires_grad=True).cuda() label = torch.randint(10, (2 * 4,)).cuda() output = self.megatron_module(x) output = output.reshape(2 * 4, 10) loss = loss_fn(output, label) loss.backward() optimizer.step() def test_get_peft_model_state_dict(self): peft_state_dict = get_peft_model_state_dict(self.megatron_module) for key in peft_state_dict.keys(): assert "lora" in key
peft/tests/test_lora_megatron.py/0
{ "file_path": "peft/tests/test_lora_megatron.py", "repo_id": "peft", "token_count": 2989 }
246
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import pytest import torch from safetensors.torch import load_file as safe_load_file from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from peft import AutoPeftModel, LoraConfig, PeftModel, TrainableTokensConfig, get_peft_model from peft.tuners.trainable_tokens.layer import TrainableTokensLayer from peft.utils import TrainableTokensWrapper, get_peft_model_state_dict from .testing_utils import hub_online_once class ModelEmb(torch.nn.Module): def __init__(self): super().__init__() self.emb = torch.nn.Embedding(100, 10) self.lin0 = torch.nn.Linear(10, 1) def forward(self, x): return self.lin0(self.emb(x)) def get_input_embeddings(self): return self.emb class ModelEmbedIn(torch.nn.Module): def __init__(self): super().__init__() self.embed_in = torch.nn.Embedding(100, 10) self.lin0 = torch.nn.Linear(10, 1) def forward(self, x): return self.lin0(self.embed_in(x)) def get_input_embeddings(self): return self.embed_in class ModelEmbedMultiple(torch.nn.Module): def __init__(self): super().__init__() self.embed_in = torch.nn.Embedding(100, 10) self.embed_in_2 = torch.nn.Embedding(100, 10) self.lin0 = torch.nn.Linear(10, 1) def forward(self, x): return self.lin0(self.embed_in(x) + self.embed_in_2(x)) def get_input_embeddings(self): return self.embed_in class ModelEmbedInNoGet(torch.nn.Module): def __init__(self): super().__init__() self.embed_in = torch.nn.Embedding(100, 10) self.lin0 = torch.nn.Linear(10, 1) def forward(self, x): return self.lin0(self.embed_in(x)) class TestTrainableTokens: @pytest.fixture def model_id(self): return "trl-internal-testing/tiny-random-LlamaForCausalLM" @pytest.fixture def model_multi_embedding(self): class MultiEmbeddingMLP(torch.nn.Module): def __init__(self): super().__init__() self.emb_text = torch.nn.Embedding(10, 5) self.emb_image = torch.nn.Embedding(8, 5) self.lin0 = torch.nn.Linear(5, 10) self.lin1 = torch.nn.Linear(10, 20) def forward(self, x_text, x_image): x_text = self.emb_text(x_text) x_image = self.emb_image(x_image) y = self.lin0(torch.concat([x_text, x_image], dim=1).view(-1, 5)) y = self.lin1(y) return y, (x_text, x_image) return MultiEmbeddingMLP() @pytest.fixture def model(self, model_id): with hub_online_once(model_id): # This must not be a yield fixture so that we don't carry the hub_online_once # behavior over to the rest of the test that uses this fixture return AutoModelForCausalLM.from_pretrained(model_id) @pytest.fixture def tokenizer(self, model_id): return AutoTokenizer.from_pretrained(model_id) def simulate_training(self, trainable_tokens_layer, adapter_name="default"): """Simulates training of trainable_tokens adapter layer by assigning random values to the delta tokens. """ trainable_tokens_layer.trainable_tokens_delta[adapter_name].data = torch.rand_like( trainable_tokens_layer.trainable_tokens_delta[adapter_name].data ) def test_stand_alone_usage(self, model, tokenizer, tmp_path): original_model = copy.deepcopy(model) peft_config = TrainableTokensConfig(target_modules=["embed_tokens"], token_indices=[0, 1, 3]) peft_model = get_peft_model(model, peft_config) save_path = tmp_path / "stand_alone_usage" # simulate normal use but take care to use the tokens that we expect to be modified # (+1 that we don't expect to be modified) X = { "input_ids": torch.tensor([[0, 1, 2, 3]]), "attention_mask": torch.tensor([[1, 1, 1, 1]]), } idcs_to_modify = peft_config.token_indices idcs_to_keep = [i for i in X["input_ids"][0].tolist() if i not in idcs_to_modify] self.simulate_training(peft_model.model.model.embed_tokens) output_train = peft_model(output_hidden_states=True, **X) peft_model.save_pretrained(save_path) peft_model_org = peft_model # check whether the token indices differ from the base model after loading the model # from the checkpoint. peft_model = AutoPeftModel.from_pretrained(save_path) output_load = peft_model(output_hidden_states=True, **X) output_orig = original_model(output_hidden_states=True, **X) # on the way, make sure that the embedding matrix itself was not modified assert torch.allclose( peft_model.model.model.embed_tokens.weight, peft_model_org.model.model.embed_tokens.weight, ) W_load = output_load.hidden_states[0] W_orig = output_orig.hidden_states[0] W_train = output_train.hidden_states[0] # all PEFT model embed outputs must equal the outputs during 'training' to make sure # that saving/loading works properly. assert torch.allclose(W_load, W_train) assert not torch.allclose(W_load[:, idcs_to_modify], W_orig[:, idcs_to_modify]) assert torch.allclose(W_load[:, idcs_to_keep], W_orig[:, idcs_to_keep]) @pytest.mark.parametrize( "peft_config", [ LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": [0, 1, 3]}, ), ], ) def test_combined_with_peft_method_usage(self, model, tokenizer, peft_config, tmp_path): original_model = copy.deepcopy(model) peft_model = get_peft_model(model, peft_config) save_path = tmp_path / "combined_usage" # simulate normal use but take care to use the tokens that we expect to be modified # (+2 that we don't expect to be modified) X = { "input_ids": torch.tensor([[0, 1, 2, 3, 4]]), "attention_mask": torch.tensor([[1, 1, 1, 1, 1]]), } idcs_to_modify = peft_config.trainable_token_indices["embed_tokens"] idcs_to_keep = [i for i in X["input_ids"][0].tolist() if i not in idcs_to_modify] self.simulate_training(peft_model.model.model.embed_tokens.token_adapter) output_train = peft_model(output_hidden_states=True, **X) peft_model.save_pretrained(save_path) peft_model_org = peft_model # check whether the token indices differ from the base model peft_model = AutoPeftModel.from_pretrained(save_path) output_load = peft_model(output_hidden_states=True, **X) output_orig = original_model(output_hidden_states=True, **X) W_load = output_load.hidden_states[0] W_orig = output_orig.hidden_states[0] W_train = output_train.hidden_states[0] # all PEFT model embed outputs must equal the outputs during 'training' to make sure # that saving/loading works properly. assert torch.allclose(W_load, W_train) assert not torch.allclose(W_load[:, idcs_to_modify], W_orig[:, idcs_to_modify]) assert torch.allclose(W_load[:, idcs_to_keep], W_orig[:, idcs_to_keep]) def test_basic_training(self, model, tokenizer): # ensure that the model can be trained and backpropagation works config = TrainableTokensConfig( target_modules=["embed_tokens"], token_indices=[0, 10], ) model = get_peft_model(model, config) optimizer = torch.optim.AdamW(model.parameters(), lr=1) initial_delta = model.model.model.embed_tokens.trainable_tokens_delta.default.clone() initial_originals = model.model.model.embed_tokens.trainable_tokens_original.default.clone() X = { "input_ids": torch.tensor([[0, 1, 2, 3, 4]]), "attention_mask": torch.tensor([[1, 1, 1, 1, 1]]), } for step in range(3): optimizer.zero_grad() y_pred = model(**X) loss = y_pred.logits.mean() loss.backward() optimizer.step() assert torch.allclose( model.model.model.embed_tokens.trainable_tokens_original.default, initial_originals, ) assert not torch.allclose( model.model.model.embed_tokens.trainable_tokens_delta.default, initial_delta, ) @pytest.mark.parametrize( "peft_config", [ LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": [0, 1, 3]}, ), ], ) def test_disable_adapters_with_merging(self, model, tokenizer, peft_config): X = { "input_ids": torch.tensor([[0, 1, 2, 3, 4]]), "attention_mask": torch.tensor([[1, 1, 1, 1, 1]]), } model = get_peft_model(model, peft_config) model.eval() outputs_before = model(**X).logits model.train() lr = 0.01 optimizer = torch.optim.Adam(model.parameters(), lr=lr) # train at least 3 steps for all parameters to be updated (probably this is required because of symmetry # breaking of some LoRA layers that are initialized with constants) for _ in range(3): optimizer.zero_grad() y_pred = model(**X) loss = y_pred.logits.mean() loss.backward() optimizer.step() model.eval() outputs_unmerged = model(**X).logits model.merge_adapter() outputs_after = model(**X).logits with model.disable_adapter(): outputs_disabled = model(**X).logits # check that after leaving the disable_adapter context, everything is enabled again outputs_enabled_after_disable = model(**X).logits atol, rtol = 1e-5, 1e-5 # tolerances higher than defaults since merging introduces some numerical instability # check that there is a difference in results after training assert not torch.allclose(outputs_before, outputs_after, atol=atol, rtol=rtol) # unmerged or merged should make no difference assert torch.allclose(outputs_after, outputs_unmerged, atol=atol, rtol=rtol) # check that disabling adapters gives the same results as before training assert torch.allclose(outputs_before, outputs_disabled, atol=atol, rtol=rtol) # check that enabling + disabling adapters does not change the results assert torch.allclose(outputs_after, outputs_enabled_after_disable, atol=atol, rtol=rtol) @pytest.mark.parametrize( "peft_config", [ LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": [0, 1, 3]}, ), ], ) def test_safe_merge_with_adapter(self, model, tokenizer, peft_config): X = { "input_ids": torch.tensor([[0, 1, 2, 3]]), "attention_mask": torch.tensor([[1, 1, 1, 1]]), } model = model.eval() logits_base = model(**X).logits model = get_peft_model(model, peft_config).eval() logits_peft = model(**X).logits atol, rtol = 1e-6, 1e-6 # default model_unloaded = model.merge_and_unload(safe_merge=True) logits_unloaded = model_unloaded(**X).logits # check that the logits are the same after unloading assert torch.allclose(logits_peft, logits_unloaded, atol=atol, rtol=rtol) @pytest.mark.parametrize( "peft_config", [ LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": [0, 1, 3]}, ), ], ) def test_load_multiple_adapters(self, model, peft_config, tmp_path): # tests if having more than one adpater (even with just the same config) works original_model = copy.deepcopy(model) model = get_peft_model(model, peft_config) model.save_pretrained(tmp_path) del model model = original_model model = PeftModel.from_pretrained(model, tmp_path) load_result1 = model.load_adapter(tmp_path, adapter_name="other") load_result2 = model.load_adapter(tmp_path, adapter_name="yet-another") assert load_result1.missing_keys == [] assert load_result2.missing_keys == [] @pytest.mark.parametrize( "peft_config_factory", [ lambda token_indices: LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": token_indices}, ), ], ) def test_multiple_adapters_different_token_indices(self, model, peft_config_factory, tmp_path): # tests if multiple adapters with different token indices work original_model = copy.deepcopy(model) token_indices_1 = [0, 1, 2] token_indices_2 = [2, 3, 4] peft_config_1 = peft_config_factory(token_indices_1) peft_config_2 = peft_config_factory(token_indices_2) model = get_peft_model(model, peft_config_1, adapter_name="adapter_1") model.add_adapter("adapter_2", peft_config_2) # "train" adapter 1 model.set_adapter("adapter_1") self.simulate_training(model.model.model.embed_tokens.token_adapter, "adapter_1") # "train" adapter 2 model.set_adapter("adapter_2") self.simulate_training(model.model.model.embed_tokens.token_adapter, "adapter_2") # now we infer on adapter 1 and on adapter 2 and check if the requested indices are changed for # each adapter. e.g., for adapter 1, only token indices 1 should be changed. X = { "input_ids": torch.tensor([list(set(token_indices_1 + token_indices_2))]), "attention_mask": torch.tensor([[1] * (len(set(token_indices_1 + token_indices_2)))]), } original_output = original_model(output_hidden_states=True, **X).hidden_states[0] # infer with adapter 1, embeddings for token indices 1 should be changed, no others. model.set_adapter("adapter_1") adapter_1_output = model(output_hidden_states=True, **X).hidden_states[0] idcs_to_modify = token_indices_1 idcs_to_keep = [i for i in X["input_ids"][0].tolist() if i not in idcs_to_modify] assert not torch.allclose(adapter_1_output[:, idcs_to_modify], original_output[:, idcs_to_modify]) assert torch.allclose(adapter_1_output[:, idcs_to_keep], original_output[:, idcs_to_keep]) # infer with adapter 2, embeddings for token indices 2 should be changed, no others. model.set_adapter("adapter_2") adapter_2_output = model(output_hidden_states=True, **X).hidden_states[0] idcs_to_modify = token_indices_2 idcs_to_keep = [i for i in X["input_ids"][0].tolist() if i not in idcs_to_modify] assert not torch.allclose(adapter_2_output[:, idcs_to_modify], original_output[:, idcs_to_modify]) assert torch.allclose(adapter_2_output[:, idcs_to_keep], original_output[:, idcs_to_keep]) @pytest.mark.parametrize( "peft_config_factory", [ lambda token_indices: LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": token_indices}, ), ], ) def test_multiple_adapters_overlapping_token_indices_merging(self, model, peft_config_factory, tmp_path): # tests that merging multiple adapters that have overlapping indices is not defined at the moment # and would yield undefined behavior. note that merging a single adapter is fine. original_model = copy.deepcopy(model) token_indices_1 = [0, 1, 2] token_indices_2 = [2, 3, 4] peft_config_1 = peft_config_factory(token_indices_1) peft_config_2 = peft_config_factory(token_indices_2) model = get_peft_model(model, peft_config_1, adapter_name="adapter_1") model.add_adapter("adapter_2", peft_config_2) with pytest.raises(ValueError) as e: model.merge_and_unload(adapter_names=["adapter_1", "adapter_2"]) assert "are already defined and would result in undefined merging behavior" in str(e) @pytest.mark.parametrize( "peft_config_factory", [ lambda targets, token_indices: LoraConfig( target_modules=targets, trainable_token_indices={"embed_tokens": token_indices}, ), ], ) def test_multiple_adapters_mixed_forward(self, model, peft_config_factory, tmp_path): # tests if multiple adapters with different token indices work original_model = copy.deepcopy(model) token_indices_1 = [0, 1, 2] token_indices_2 = [2, 3, 4] peft_config_1 = peft_config_factory(".*q_proj", token_indices_1) peft_config_2 = peft_config_factory(".*o_proj", token_indices_2) model = get_peft_model(model, peft_config_1, adapter_name="adapter_1") model.add_adapter("adapter_2", peft_config_2) # "train" adapter 1 model.set_adapter("adapter_1") self.simulate_training(model.model.model.embed_tokens.token_adapter, "adapter_1") # "train" adapter 2 model.set_adapter("adapter_2") self.simulate_training(model.model.model.embed_tokens.token_adapter, "adapter_2") # forward(adapter_names=...) is not available in train mode model.eval() # Build a batch of 2 items, each the same input sequence but each sequence will be passed to a different # adapter via mixed batch forward. input_sequence = list(set(token_indices_1 + token_indices_2)) X = { "input_ids": torch.tensor([input_sequence, input_sequence]), "attention_mask": torch.tensor([[1] * len(input_sequence), [1] * len(input_sequence)]), } batch_adapter_names = ["adapter_1", "adapter_2"] original_output = original_model(output_hidden_states=True, **X) mixed_output = model(output_hidden_states=True, adapter_names=batch_adapter_names, **X) # check that the active adapter is still the last activated adapter, adapter_2 assert model.model.model.embed_tokens.token_adapter.active_adapter == ["adapter_2"] adapter_1_output = mixed_output.hidden_states[0][0:1] original_output_1 = original_output.hidden_states[0][0:1] adapter_2_output = mixed_output.hidden_states[0][1:2] original_output_2 = original_output.hidden_states[0][1:2] idcs_to_modify = token_indices_1 idcs_to_keep = [i for i in X["input_ids"][0].tolist() if i not in idcs_to_modify] assert not torch.allclose(adapter_1_output[:, idcs_to_modify], original_output_1[:, idcs_to_modify]) assert torch.allclose(adapter_1_output[:, idcs_to_keep], original_output_1[:, idcs_to_keep]) idcs_to_modify = token_indices_2 idcs_to_keep = [i for i in X["input_ids"][0].tolist() if i not in idcs_to_modify] assert not torch.allclose(adapter_2_output[:, idcs_to_modify], original_output_2[:, idcs_to_modify]) assert torch.allclose(adapter_2_output[:, idcs_to_keep], original_output_2[:, idcs_to_keep]) def test_stand_alone_raises_target_layer_not_found(self, model): config = TrainableTokensConfig(target_modules=["doesnt_exist"], token_indices=[0, 1, 3]) with pytest.raises(ValueError) as e: model = get_peft_model(model, config) assert "Target modules ['doesnt_exist'] not found in the base model." in str(e) @pytest.mark.parametrize( "peft_config, target_layer_name", [ (LoraConfig(trainable_token_indices={"does-not-exist": [0, 1, 2]}), "does-not-exist"), ], ) def test_combined_with_peft_raises_target_layer_not_found(self, model, peft_config, target_layer_name): # same as test_stand_alone_raises_target_layer_not_found but tests the peft method integration with pytest.raises(ValueError) as e: model = get_peft_model(model, peft_config) assert f"Target modules {{{repr(target_layer_name)}}} not found in the base model." in str(e) def test_multiple_targets(self, model_multi_embedding): # tests the ability of targeting two modules with the same token indices original_model = copy.deepcopy(model_multi_embedding) config = TrainableTokensConfig(target_modules=["emb_text", "emb_image"], token_indices=[0, 1]) peft_model = get_peft_model(model_multi_embedding, config) self.simulate_training(peft_model.model.emb_text) self.simulate_training(peft_model.model.emb_image) X = { "x_text": torch.tensor([[0, 1, 2]]), "x_image": torch.tensor([[0, 1, 2]]), } _, (emb_text_orig, emb_image_orig) = original_model(**X) _, (emb_text_peft, emb_image_peft) = peft_model(**X) assert not torch.allclose(emb_text_orig[:, [0, 1]], emb_text_peft[:, [0, 1]]) assert torch.allclose(emb_text_orig[:, [2]], emb_text_peft[:, [2]]) assert not torch.allclose(emb_image_orig[:, [0, 1]], emb_image_peft[:, [0, 1]]) assert torch.allclose(emb_image_orig[:, [2]], emb_image_peft[:, [2]]) @pytest.mark.parametrize( "peft_config", [ LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": [0, 1, 3]}, ), ], ) def test_no_embeddings_in_save_with_combined_usage(self, model, tokenizer, peft_config, tmp_path): # make sure that in combined use the only state dict key is that of the token deltas and nothing more peft_model = get_peft_model(model, peft_config) state_dict = get_peft_model_state_dict( model=peft_model, state_dict=None, adapter_name="default", ) embedding_keys = [n for n in state_dict.keys() if "embed_tokens" in n] assert embedding_keys == ["base_model.model.model.embed_tokens.token_adapter.trainable_tokens_delta"] @pytest.fixture() def model_weight_untied(self, model): return model @pytest.fixture() def model_id_weight_tied(self): return "facebook/opt-125m" @pytest.fixture() def model_weight_tied(self, model_id_weight_tied): return AutoModelForCausalLM.from_pretrained(model_id_weight_tied) @pytest.mark.parametrize( "peft_config", [ LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": [0, 1, 3]}, ), ], ) def test_weight_tying_noop_when_model_is_untied(self, model_weight_untied, peft_config, tmp_path): # test if the weight tying is affected as well when we modified the embedding. assert model_weight_untied._tied_weights_keys assert not model_weight_untied.config.tie_word_embeddings peft_model = get_peft_model(model_weight_untied, peft_config) assert hasattr(peft_model.model.model.embed_tokens, "token_adapter") assert not hasattr(peft_model.model.lm_head, "token_adapter") @pytest.mark.parametrize( "peft_config", [ LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": [0, 1, 3]}, ), ], ) def test_weight_tying_applied_when_model_is_tied(self, model_weight_tied, peft_config, tmp_path): # test if the weight tying is affected as well when we modified the embedding. assert model_weight_tied._tied_weights_keys assert model_weight_tied.config.tie_word_embeddings peft_model = get_peft_model(model_weight_tied, peft_config) # make it so that the input embeddings diverge. when the weights are tied this should # reflect in the output embeddings as well. self.simulate_training(peft_model.model.model.decoder.embed_tokens.token_adapter) # we have to find out if the input embedding tying is doing its job during forward. # for this we can leverage the fact that emb_out(1/emb_in(x)) is embed_dim on the # diagonal iff emb_in.weight == emb_out.weight. token_indices = [0, 1, 2, 3] emb_dim = 768 emb_in = peft_model.model.model.decoder.embed_tokens(torch.tensor([token_indices])) emb_out = peft_model.model.lm_head(1 / emb_in) assert torch.allclose(torch.diag(emb_out[0]), torch.tensor([emb_dim] * len(token_indices)).float()) # make sure that the state dict does not include weight-tied weights. state_dict = get_peft_model_state_dict(peft_model) assert not [key for key in state_dict if any(tied_key in key for tied_key in peft_model._tied_weights_keys)] # make sure that merging and unloading restores the weight-tying. merged_model = peft_model.merge_and_unload() assert merged_model.model.decoder.embed_tokens.weight.data_ptr() == merged_model.lm_head.weight.data_ptr() def test_weight_tying_applied_when_model_is_tied_standalone(self, model_weight_tied): # since weight tying is currently not supported make sure that an error is raised when attempting # to use a model that has tied input/output embeddings assert model_weight_tied._tied_weights_keys assert model_weight_tied.config.tie_word_embeddings peft_config = TrainableTokensConfig( target_modules=["embed_tokens"], token_indices=[0, 1, 3], ) peft_model = get_peft_model(model_weight_tied, peft_config) # make it so that the input embeddings diverge. when the weights are tied this should # reflect in the output embeddings as well. self.simulate_training(peft_model.model.model.decoder.embed_tokens) # we have to find out if the input embedding tying is doing its job during forward. # for this we can leverage the fact that emb_out(1/emb_in(x)) is embed_dim on the # diagonal iff emb_in.weight == emb_out.weight. token_indices = [0, 1, 2, 3] emb_dim = 768 emb_in = peft_model.model.model.decoder.embed_tokens(torch.tensor([token_indices])) emb_out = peft_model.model.lm_head(1 / emb_in) assert torch.allclose(torch.diag(emb_out[0]), torch.tensor([emb_dim] * len(token_indices)).float()) # make sure that the state dict does not include weight-tied weights. state_dict = get_peft_model_state_dict(peft_model) assert not [key for key in state_dict if any(tied_key in key for tied_key in peft_model._tied_weights_keys)] # make sure that merging and unloading restores the weight-tying. merged_model = peft_model.merge_and_unload() assert merged_model.model.decoder.embed_tokens.weight.data_ptr() == merged_model.lm_head.weight.data_ptr() def test_weight_tying_normally_issues_warning(self, model_weight_tied, recwarn): # When using models with weight tying and targeting the embedding or the tied layer should raise a warning. peft_config = LoraConfig(target_modules=["embed_tokens"]) peft_model = get_peft_model(model_weight_tied, peft_config) warnings = [w.message.args[0] for w in recwarn] warnings = [msg for msg in warnings if "Model with `tie_word_embeddings=True` and the" in msg] assert warnings def test_weight_tying_state_dict_ignores_tied_weights(self, model_weight_tied): # since weight tying is currently not supported make sure that an error is raised when attempting # to use a model that has tied input/output embeddings assert model_weight_tied._tied_weights_keys assert model_weight_tied.config.tie_word_embeddings peft_config = TrainableTokensConfig( target_modules=["embed_tokens"], token_indices=[0, 1, 3], ) peft_model = get_peft_model(model_weight_tied, peft_config) state_dict = peft_model.state_dict() peft_state_dict = get_peft_model_state_dict(peft_model) # the state dict or the peft model state dict must not include tied adapter weights state_dict_keys = [n for n, _ in state_dict.items() if "tied_adapter." in n] peft_state_dict_keys = [n for n, _ in peft_state_dict.items() if "tied_adapter." in n] assert not state_dict_keys assert not peft_state_dict_keys @pytest.mark.parametrize( "peft_config", [ LoraConfig( target_modules="all-linear", trainable_token_indices={"shared": [0, 1, 3]}, ), ], ) def test_weight_tying_applied_when_model_is_tied_encoder_decoder(self, peft_config): model_id = "hf-internal-testing/tiny-random-t5" base_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) peft_model = get_peft_model(base_model, peft_config) # make it so that the input embeddings diverge. when the weights are tied this should # reflect in the output embeddings as well. self.simulate_training(peft_model.model.shared.token_adapter) # we have to find out if the input embedding tying is doing its job during forward. # for this we can leverage the fact that emb_out(1/emb_in(x)) is embed_dim on the # diagonal iff emb_in.weight == emb_out.weight. token_indices = [0, 1, 2, 3] emb_dim = base_model.config.d_model emb_in = peft_model.model.encoder.embed_tokens(torch.tensor([token_indices])) emb_out = peft_model.model.lm_head(1 / emb_in) assert torch.allclose(torch.diag(emb_out[0]), torch.tensor([emb_dim] * len(token_indices)).float()) # T5 has a decoder embedding layer, we can simply check if it's forward is equal to the encoder # embedding forward. emb_out = peft_model.model.decoder.embed_tokens(torch.tensor([token_indices])) assert torch.allclose(emb_in, emb_out) # make sure that the state dict does not include weight-tied weights. state_dict = get_peft_model_state_dict(peft_model) assert not [key for key in state_dict if any(tied_key in key for tied_key in peft_model._tied_weights_keys)] # make sure that merging and unloading restores the weight-tying. merged_model = peft_model.merge_and_unload() assert merged_model.encoder.embed_tokens.weight.data_ptr() == merged_model.lm_head.weight.data_ptr() assert ( merged_model.encoder.embed_tokens.weight.data_ptr() == merged_model.decoder.embed_tokens.weight.data_ptr() ) @pytest.mark.parametrize( "peft_config", [ LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": [0, 1, 3]}, modules_to_save=["embed_tokens"], ), ], ) def test_modules_to_save_excludes_trainable_tokens(self, model, peft_config): with pytest.raises(ValueError) as e: get_peft_model(model, peft_config) assert "The embedding layer is already marked to be trained fully" in str(e) def test_merge_and_unload_standalone(self, model): # test basic functionality of merge_and_unload for standalone TrainableTokens token_indices = [0, 1, 3] peft_config = TrainableTokensConfig( target_modules=["embed_tokens"], token_indices=token_indices, ) peft_model = get_peft_model(model, peft_config) self.simulate_training(peft_model.model.model.embed_tokens) expected_changed_weights = peft_model.model.model.embed_tokens.trainable_tokens_delta.default.data.clone() # make sure no TrainableTokensLayer is in the module merged_model = peft_model.merge_and_unload() for _, module in merged_model.named_modules(): assert not isinstance(module, TrainableTokensLayer) # make sure that deltas are applied to the embedding matrix assert torch.allclose(merged_model.model.embed_tokens.weight.data[token_indices], expected_changed_weights) def test_original_module_not_in_state_dict(self, model): # Every AuxiliaryTrainingWrapper has an original_module attribute. Since the TrainableTokensWrapper is wrapping # a TrainableTokensLayer and it already has a base layer which serves as the original module, we don't need that # and so it should not come up in the state dict to save memory. peft_config = LoraConfig( target_modules="all-linear", trainable_token_indices={"embed_tokens": [0, 1, 3]}, ) peft_model = get_peft_model(model, peft_config) # make sure that the original module is present and accessible even though # we want to exclude it from the state dict. assert peft_model.model.model.embed_tokens.original_module state_dict = get_peft_model_state_dict(peft_model) assert not [k for k in state_dict if ".original_module.weight" in k] state_dict = peft_model.state_dict() assert not [k for k in state_dict if ".original_module.weight" in k] @pytest.fixture def model_emb(self): return ModelEmb() @pytest.fixture def model_embed_in(self): return ModelEmbedIn() @pytest.fixture def model_embed_in_no_get(self): return ModelEmbedInNoGet() @pytest.fixture def model_embed_multiple(self): return ModelEmbedMultiple() @pytest.mark.parametrize( "model_fixture_name, getter", [ ("model_emb", lambda model: model.emb), ("model_embed_in", lambda model: model.embed_in), ("model", lambda model: model.model.model.embed_tokens), ], ) def test_default_embedding_name_is_inferred_standalone(self, model_fixture_name, getter, request): # make sure that the auto targeting works when `target_module=None` base_model = request.getfixturevalue(model_fixture_name) peft_config = TrainableTokensConfig(target_modules=None, token_indices=[0, 1, 3]) peft_model = get_peft_model(base_model, peft_config) assert isinstance(getter(peft_model), TrainableTokensLayer) @pytest.mark.parametrize( "model_fixture_name, getter", [ ("model_emb", lambda model: model.emb), ("model_embed_in", lambda model: model.embed_in), ("model", lambda model: model.model.model.embed_tokens), ], ) def test_default_embedding_name_is_inferred_combined(self, model_fixture_name, getter, request): # make sure that the auto targeting works when `target_module=None` base_model = request.getfixturevalue(model_fixture_name) peft_config = LoraConfig(target_modules="all-linear", trainable_token_indices=[0, 1, 3]) peft_model = get_peft_model(base_model, peft_config) assert isinstance(getter(peft_model), TrainableTokensWrapper) def test_default_embedding_name_cannot_be_inferred(self, model_embed_in_no_get): # should default to default value `embed_tokens` which is not present in this model base_model = model_embed_in_no_get peft_config = TrainableTokensConfig(target_modules=None, token_indices=[0, 1, 3]) with pytest.raises(ValueError) as e: peft_model = get_peft_model(base_model, peft_config) assert "Target modules embed_tokens not found in the base model." in str(e) def test_embedding_name_is_used_when_given_standalone(self, model_embed_multiple): peft_config = TrainableTokensConfig(target_modules="embed_in_2", token_indices=[0, 1, 3]) peft_model = get_peft_model(model_embed_multiple, peft_config) assert isinstance(peft_model.model.embed_in_2, TrainableTokensLayer) assert not isinstance(peft_model.model.embed_in, TrainableTokensLayer) def test_embedding_name_is_used_when_given_combined(self, model_embed_multiple): peft_config = LoraConfig(target_modules="all-linear", trainable_token_indices={"embed_in_2": [0, 1, 3]}) peft_model = get_peft_model(model_embed_multiple, peft_config) assert isinstance(peft_model.model.embed_in_2, TrainableTokensWrapper) assert not isinstance(peft_model.model.embed_in, TrainableTokensWrapper) @pytest.mark.parametrize("resize_embedding", [True, False]) @pytest.mark.parametrize( "peft_config", [ LoraConfig(target_modules="all-linear", trainable_token_indices=[1, 2, 3]), TrainableTokensConfig(target_modules=None, token_indices=[1, 2, 3]), ], ) def test_save_pretrained_auto(self, model, resize_embedding, peft_config, tmp_path): # make sure that embeddings are saved alongside trainable token weights but only when # the we detect the embedding to be resized (as detected by save_embedding_layers="auto") if resize_embedding: model.resize_token_embeddings(model.config.vocab_size + 2) peft_model = get_peft_model(model, peft_config) peft_model.save_pretrained(tmp_path, save_embedding_layers="auto") state_dict = safe_load_file(tmp_path / "adapter_model.safetensors") if isinstance(peft_config, TrainableTokensConfig): contains_embedding = "base_model.model.model.embed_tokens.base_layer.weight" in state_dict else: contains_embedding = "base_model.model.model.embed_tokens.token_adapter.base_layer.weight" in state_dict if resize_embedding: assert contains_embedding else: assert not contains_embedding
peft/tests/test_trainable_tokens.py/0
{ "file_path": "peft/tests/test_trainable_tokens.py", "repo_id": "peft", "token_count": 16792 }
247
# Feature Extraction All of the models in `timm` have consistent mechanisms for obtaining various types of features from the model for tasks besides classification. ## Penultimate Layer Features (Pre-Classifier Features) The features from the penultimate model layer can be obtained in several ways without requiring model surgery (although feel free to do surgery). One must first decide if they want pooled or un-pooled features. ### Unpooled There are three ways to obtain unpooled features. The final, unpooled features are sometimes referred to as the last hidden state. In `timm` this is up to and including the final normalization layer (in e.g. ViT style models) but does not include pooling / class token selection and final post-pooling layers. Without modifying the network, one can call `model.forward_features(input)` on any model instead of the usual `model(input)`. This will bypass the head classifier and global pooling for networks. If one wants to explicitly modify the network to return unpooled features, they can either create the model without a classifier and pooling, or remove it later. Both paths remove the parameters associated with the classifier from the network. #### forward_features() ```py >>> import torch >>> import timm >>> m = timm.create_model('xception41', pretrained=True) >>> o = m(torch.randn(2, 3, 299, 299)) >>> print(f'Original shape: {o.shape}') >>> o = m.forward_features(torch.randn(2, 3, 299, 299)) >>> print(f'Unpooled shape: {o.shape}') ``` Output: ```text Original shape: torch.Size([2, 1000]) Unpooled shape: torch.Size([2, 2048, 10, 10]) ``` #### Create with no classifier and pooling ```py >>> import torch >>> import timm >>> m = timm.create_model('resnet50', pretrained=True, num_classes=0, global_pool='') >>> o = m(torch.randn(2, 3, 224, 224)) >>> print(f'Unpooled shape: {o.shape}') ``` Output: ```text Unpooled shape: torch.Size([2, 2048, 7, 7]) ``` #### Remove it later ```py >>> import torch >>> import timm >>> m = timm.create_model('densenet121', pretrained=True) >>> o = m(torch.randn(2, 3, 224, 224)) >>> print(f'Original shape: {o.shape}') >>> m.reset_classifier(0, '') >>> o = m(torch.randn(2, 3, 224, 224)) >>> print(f'Unpooled shape: {o.shape}') ``` Output: ```text Original shape: torch.Size([2, 1000]) Unpooled shape: torch.Size([2, 1024, 7, 7]) ``` #### Chaining unpooled output to classifier The last hidden state can be fed back into the head of the model using the `forward_head()` function. ```py >>> model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True) >>> output = model.forward_features(torch.randn(2,3,256,256)) >>> print('Unpooled output shape:', output.shape) >>> classified = model.forward_head(output) >>> print('Classification output shape:', classified.shape) ``` Output: ```text Unpooled output shape: torch.Size([2, 257, 512]) Classification output shape: torch.Size([2, 1000]) ``` ### Pooled To modify the network to return pooled features, one can use `forward_features()` and pool/flatten the result themselves, or modify the network like above but keep pooling intact. #### Create with no classifier ```py >>> import torch >>> import timm >>> m = timm.create_model('resnet50', pretrained=True, num_classes=0) >>> o = m(torch.randn(2, 3, 224, 224)) >>> print(f'Pooled shape: {o.shape}') ``` Output: ```text Pooled shape: torch.Size([2, 2048]) ``` #### Remove it later ```py >>> import torch >>> import timm >>> m = timm.create_model('ese_vovnet19b_dw', pretrained=True) >>> o = m(torch.randn(2, 3, 224, 224)) >>> print(f'Original shape: {o.shape}') >>> m.reset_classifier(0) >>> o = m(torch.randn(2, 3, 224, 224)) >>> print(f'Pooled shape: {o.shape}') ``` Output: ```text Original shape: torch.Size([2, 1000]) Pooled shape: torch.Size([2, 1024]) ``` ## Multi-scale Feature Maps (Feature Pyramid) Object detection, segmentation, keypoint, and a variety of dense pixel tasks require access to feature maps from the backbone network at multiple scales. This is often done by modifying the original classification network. Since each network varies quite a bit in structure, it's not uncommon to see only a few backbones supported in any given obj detection or segmentation library. `timm` allows a consistent interface for creating any of the included models as feature backbones that output feature maps for selected levels. A feature backbone can be created by adding the argument `features_only=True` to any `create_model` call. By default most models with a feature hierarchy will output up to 5 features up to a reduction of 32. However this varies per model, some models have fewer hierarchy levels, and some (like ViT) have a larger number of non-hierarchical feature maps and they default to outputting the last 3. The `out_indices` arg can be passed to `create_model` to specify which features you want. ### Create a feature map extraction model ```py >>> import torch >>> import timm >>> m = timm.create_model('resnest26d', features_only=True, pretrained=True) >>> o = m(torch.randn(2, 3, 224, 224)) >>> for x in o: ... print(x.shape) ``` Output: ```text torch.Size([2, 64, 112, 112]) torch.Size([2, 256, 56, 56]) torch.Size([2, 512, 28, 28]) torch.Size([2, 1024, 14, 14]) torch.Size([2, 2048, 7, 7]) ``` ### Query the feature information After a feature backbone has been created, it can be queried to provide channel or resolution reduction information to the downstream heads without requiring static config or hardcoded constants. The `.feature_info` attribute is a class encapsulating the information about the feature extraction points. ```py >>> import torch >>> import timm >>> m = timm.create_model('regnety_032', features_only=True, pretrained=True) >>> print(f'Feature channels: {m.feature_info.channels()}') >>> o = m(torch.randn(2, 3, 224, 224)) >>> for x in o: ... print(x.shape) ``` Output: ```text Feature channels: [32, 72, 216, 576, 1512] torch.Size([2, 32, 112, 112]) torch.Size([2, 72, 56, 56]) torch.Size([2, 216, 28, 28]) torch.Size([2, 576, 14, 14]) torch.Size([2, 1512, 7, 7]) ``` ### Select specific feature levels or limit the stride There are two additional creation arguments impacting the output features. * `out_indices` selects which indices to output * `output_stride` limits the feature output stride of the network (also works in classification mode BTW) #### Output index selection The `out_indices` argument is supported by all models, but not all models have the same index to feature stride mapping. Look at the code or check feature_info to compare. The out indices generally correspond to the `C(i+1)th` feature level (a `2^(i+1)` reduction). For most convnet models, index 0 is the stride 2 features, and index 4 is stride 32. For many ViT or ViT-Conv hybrids there may be many to all features maps of the same shape, or a combination of hierarchical and non-hierarchical feature maps. It is best to look at the `feature_info` attribute to see the number of features, their corresponding channel count and reduction level. `out_indices` supports negative indexing, this makes it easy to get the last, penultimate, etc feature map. `out_indices=(-2,)` would return the penultimate feature map for any model. #### Output stride (feature map dilation) `output_stride` is achieved by converting layers to use dilated convolutions. Doing so is not always straightforward, some networks only support `output_stride=32`. ```py >>> import torch >>> import timm >>> m = timm.create_model('ecaresnet101d', features_only=True, output_stride=8, out_indices=(2, 4), pretrained=True) >>> print(f'Feature channels: {m.feature_info.channels()}') >>> print(f'Feature reduction: {m.feature_info.reduction()}') >>> o = m(torch.randn(2, 3, 320, 320)) >>> for x in o: ... print(x.shape) ``` Output: ```text Feature channels: [512, 2048] Feature reduction: [8, 8] torch.Size([2, 512, 40, 40]) torch.Size([2, 2048, 40, 40]) ``` ## Flexible intermediate feature map extraction In addition to using `features_only` with the model factory, many models support a `forward_intermediates()` method which provides a flexible mechanism for extracting both the intermediate feature maps and the last hidden state (which can be chained to the head). Additionally this method supports some model specific features such as returning class or distill prefix tokens for some models. Accompanying the `forward_intermediates` function is a `prune_intermediate_layers` function that allows one to prune layers from the model, including both the head, final norm, and/or trailing blocks/stages that are not needed. An `indices` argument is used for both `forward_intermediates()` and `prune_intermediate_layers()` to select the features to return or layers to remove. As with the `out_indices` for `features_only` API, `indices` is model specific and selects which intermediates are returned. In non-hierarchical block based models such as ViT the indices correspond to the blocks, in models with hierarchical stages they usually correspond to the output of the stem + each hierarchical stage. Both positive (from the start), and negative (relative to the end) indexing works, and `None` is used to return all intermediates. The `prune_intermediate_layers()` call returns an indices variable, as negative indices must be converted to absolute (positive) indices when the model is trimmed. ```py model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True) output, intermediates = model.forward_intermediates(torch.randn(2,3,256,256)) for i, o in enumerate(intermediates): print(f'Feat index: {i}, shape: {o.shape}') ``` ```text Feat index: 0, shape: torch.Size([2, 512, 16, 16]) Feat index: 1, shape: torch.Size([2, 512, 16, 16]) Feat index: 2, shape: torch.Size([2, 512, 16, 16]) Feat index: 3, shape: torch.Size([2, 512, 16, 16]) Feat index: 4, shape: torch.Size([2, 512, 16, 16]) Feat index: 5, shape: torch.Size([2, 512, 16, 16]) Feat index: 6, shape: torch.Size([2, 512, 16, 16]) Feat index: 7, shape: torch.Size([2, 512, 16, 16]) Feat index: 8, shape: torch.Size([2, 512, 16, 16]) Feat index: 9, shape: torch.Size([2, 512, 16, 16]) Feat index: 10, shape: torch.Size([2, 512, 16, 16]) Feat index: 11, shape: torch.Size([2, 512, 16, 16]) ``` ```py model = timm.create_model('vit_medium_patch16_reg1_gap_256', pretrained=True) print('Original params:', sum([p.numel() for p in model.parameters()])) indices = model.prune_intermediate_layers(indices=(-2,), prune_head=True, prune_norm=True) # prune head, norm, last block print('Pruned params:', sum([p.numel() for p in model.parameters()])) intermediates = model.forward_intermediates(torch.randn(2,3,256,256), indices=indices, intermediates_only=True) # return penultimate intermediate for o in intermediates: print(f'Feat shape: {o.shape}') ``` ```text Original params: 38880232 Pruned params: 35212800 Feat shape: torch.Size([2, 512, 16, 16]) ```
pytorch-image-models/hfdocs/source/feature_extraction.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/feature_extraction.mdx", "repo_id": "pytorch-image-models", "token_count": 3391 }
248
# EfficientNet **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use \\( 2^N \\) times more computational resources, then we can simply increase the network depth by \\( \alpha ^ N \\), width by \\( \beta ^ N \\), and image size by \\( \gamma ^ N \\), where \\( \alpha, \beta, \gamma \\) are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient \\( \phi \\) to uniformly scale network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('efficientnet_b0', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `efficientnet_b0`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{tan2020efficientnet, title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, author={Mingxing Tan and Quoc V. Le}, year={2020}, eprint={1905.11946}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: EfficientNet Paper: Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' URL: https://paperswithcode.com/paper/efficientnet-rethinking-model-scaling-for Models: - Name: efficientnet_b0 In Collection: EfficientNet Metadata: FLOPs: 511241564 Parameters: 5290000 File Size: 21376743 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b0 Layers: 18 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1002 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.71% Top 5 Accuracy: 93.52% - Name: efficientnet_b1 In Collection: EfficientNet Metadata: FLOPs: 909691920 Parameters: 7790000 File Size: 31502706 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b1 Crop Pct: '0.875' Image Size: '240' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1011 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.71% Top 5 Accuracy: 94.15% - Name: efficientnet_b2 In Collection: EfficientNet Metadata: FLOPs: 1265324514 Parameters: 9110000 File Size: 36788104 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b2 Crop Pct: '0.875' Image Size: '260' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1020 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.38% Top 5 Accuracy: 95.08% - Name: efficientnet_b2a In Collection: EfficientNet Metadata: FLOPs: 1452041554 Parameters: 9110000 File Size: 49369973 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b2a Crop Pct: '1.0' Image Size: '288' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1029 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.61% Top 5 Accuracy: 95.32% - Name: efficientnet_b3 In Collection: EfficientNet Metadata: FLOPs: 2327905920 Parameters: 12230000 File Size: 49369973 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b3 Crop Pct: '0.904' Image Size: '300' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1038 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.08% Top 5 Accuracy: 96.03% - Name: efficientnet_b3a In Collection: EfficientNet Metadata: FLOPs: 2600628304 Parameters: 12230000 File Size: 49369973 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b3a Crop Pct: '1.0' Image Size: '320' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1047 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.25% Top 5 Accuracy: 96.11% - Name: efficientnet_em In Collection: EfficientNet Metadata: FLOPs: 3935516480 Parameters: 6900000 File Size: 27927309 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_em Crop Pct: '0.882' Image Size: '240' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1118 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.26% Top 5 Accuracy: 94.79% - Name: efficientnet_es In Collection: EfficientNet Metadata: FLOPs: 2317181824 Parameters: 5440000 File Size: 22003339 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_es Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1110 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.09% Top 5 Accuracy: 93.93% - Name: efficientnet_lite0 In Collection: EfficientNet Metadata: FLOPs: 510605024 Parameters: 4650000 File Size: 18820005 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_lite0 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1163 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.5% Top 5 Accuracy: 92.51% -->
pytorch-image-models/hfdocs/source/models/efficientnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/efficientnet.mdx", "repo_id": "pytorch-image-models", "token_count": 4915 }
249
# (Legacy) SE-ResNeXt **SE ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `legacy_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{hu2019squeezeandexcitation, title={Squeeze-and-Excitation Networks}, author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, year={2019}, eprint={1709.01507}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Legacy SE ResNeXt Paper: Title: Squeeze-and-Excitation Networks URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks Models: - Name: legacy_seresnext101_32x4d In Collection: Legacy SE ResNeXt Metadata: FLOPs: 10287698672 Parameters: 48960000 File Size: 196466866 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnext101_32x4d LR: 0.6 Epochs: 100 Layers: 101 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L462 Weights: http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.23% Top 5 Accuracy: 95.02% - Name: legacy_seresnext26_32x4d In Collection: Legacy SE ResNeXt Metadata: FLOPs: 3187342304 Parameters: 16790000 File Size: 67346327 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnext26_32x4d LR: 0.6 Epochs: 100 Layers: 26 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L448 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.11% Top 5 Accuracy: 93.31% - Name: legacy_seresnext50_32x4d In Collection: Legacy SE ResNeXt Metadata: FLOPs: 5459954352 Parameters: 27560000 File Size: 110559176 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnext50_32x4d LR: 0.6 Epochs: 100 Layers: 50 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L455 Weights: http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.08% Top 5 Accuracy: 94.43% -->
pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/legacy-se-resnext.mdx", "repo_id": "pytorch-image-models", "token_count": 2733 }
250
# ResNeXt A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) \\( C \\), as an essential factor in addition to the dimensions of depth and width. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('resnext101_32x8d', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `resnext101_32x8d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('resnext101_32x8d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/XieGDTH16, author = {Saining Xie and Ross B. Girshick and Piotr Doll{\'{a}}r and Zhuowen Tu and Kaiming He}, title = {Aggregated Residual Transformations for Deep Neural Networks}, journal = {CoRR}, volume = {abs/1611.05431}, year = {2016}, url = {http://arxiv.org/abs/1611.05431}, archivePrefix = {arXiv}, eprint = {1611.05431}, timestamp = {Mon, 13 Aug 2018 16:45:58 +0200}, biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: ResNeXt Paper: Title: Aggregated Residual Transformations for Deep Neural Networks URL: https://paperswithcode.com/paper/aggregated-residual-transformations-for-deep Models: - Name: resnext101_32x8d In Collection: ResNeXt Metadata: FLOPs: 21180417024 Parameters: 88790000 File Size: 356082095 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnext101_32x8d Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L877 Weights: https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.3% Top 5 Accuracy: 94.53% - Name: resnext50_32x4d In Collection: ResNeXt Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100435887 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnext50_32x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L851 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.79% Top 5 Accuracy: 94.61% - Name: resnext50d_32x4d In Collection: ResNeXt Metadata: FLOPs: 5781119488 Parameters: 25050000 File Size: 100515304 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnext50d_32x4d Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnet.py#L869 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.67% Top 5 Accuracy: 94.87% - Name: tv_resnext50_32x4d In Collection: ResNeXt Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100441675 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnext50_32x4d LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L842 Weights: https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.61% Top 5 Accuracy: 93.68% -->
pytorch-image-models/hfdocs/source/models/resnext.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/resnext.mdx", "repo_id": "pytorch-image-models", "token_count": 3059 }
251
# (Tensorflow) MobileNet v3 **MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block). The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_mobilenetv3_large_075`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-02244, author = {Andrew Howard and Mark Sandler and Grace Chu and Liang{-}Chieh Chen and Bo Chen and Mingxing Tan and Weijun Wang and Yukun Zhu and Ruoming Pang and Vijay Vasudevan and Quoc V. Le and Hartwig Adam}, title = {Searching for MobileNetV3}, journal = {CoRR}, volume = {abs/1905.02244}, year = {2019}, url = {http://arxiv.org/abs/1905.02244}, archivePrefix = {arXiv}, eprint = {1905.02244}, timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: TF MobileNet V3 Paper: Title: Searching for MobileNetV3 URL: https://paperswithcode.com/paper/searching-for-mobilenetv3 Models: - Name: tf_mobilenetv3_large_075 In Collection: TF MobileNet V3 Metadata: FLOPs: 194323712 Parameters: 3990000 File Size: 16097377 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: tf_mobilenetv3_large_075 LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L394 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.45% Top 5 Accuracy: 91.34% - Name: tf_mobilenetv3_large_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 274535288 Parameters: 5480000 File Size: 22076649 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: tf_mobilenetv3_large_100 LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L403 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.51% Top 5 Accuracy: 92.61% - Name: tf_mobilenetv3_large_minimal_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 267216928 Parameters: 3920000 File Size: 15836368 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 4x4 TPU Pod ID: tf_mobilenetv3_large_minimal_100 LR: 0.1 Dropout: 0.8 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L412 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.24% Top 5 Accuracy: 90.64% - Name: tf_mobilenetv3_small_075 In Collection: TF MobileNet V3 Metadata: FLOPs: 48457664 Parameters: 2040000 File Size: 8242701 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: tf_mobilenetv3_small_075 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bilinear RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L421 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 65.72% Top 5 Accuracy: 86.13% - Name: tf_mobilenetv3_small_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 65450600 Parameters: 2540000 File Size: 10256398 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: tf_mobilenetv3_small_100 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bilinear RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L430 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 67.92% Top 5 Accuracy: 87.68% - Name: tf_mobilenetv3_small_minimal_100 In Collection: TF MobileNet V3 Metadata: FLOPs: 60827936 Parameters: 2040000 File Size: 8258083 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Depthwise Separable Convolution - Dropout - Global Average Pooling - Hard Swish - Inverted Residual Block - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: tf_mobilenetv3_small_minimal_100 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4096 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bilinear RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L439 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 62.91% Top 5 Accuracy: 84.24% -->
pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/tf-mobilenet-v3.mdx", "repo_id": "pytorch-image-models", "token_count": 4784 }
252
from torch.nn.modules.batchnorm import BatchNorm2d from torchvision.ops.misc import FrozenBatchNorm2d import timm import pytest from timm.utils.model import freeze, unfreeze from timm.utils.model import ActivationStatsHook from timm.utils.model import extract_spp_stats from timm.utils.model import _freeze_unfreeze from timm.utils.model import avg_sq_ch_mean, avg_ch_var, avg_ch_var_residual from timm.utils.model import reparameterize_model from timm.utils.model import get_state_dict def test_freeze_unfreeze(): model = timm.create_model('resnet18') # Freeze all freeze(model) # Check top level module assert model.fc.weight.requires_grad == False # Check submodule assert model.layer1[0].conv1.weight.requires_grad == False # Check BN assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d) # Unfreeze all unfreeze(model) # Check top level module assert model.fc.weight.requires_grad == True # Check submodule assert model.layer1[0].conv1.weight.requires_grad == True # Check BN assert isinstance(model.layer1[0].bn1, BatchNorm2d) # Freeze some freeze(model, ['layer1', 'layer2.0']) # Check frozen assert model.layer1[0].conv1.weight.requires_grad == False assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d) assert model.layer2[0].conv1.weight.requires_grad == False # Check not frozen assert model.layer3[0].conv1.weight.requires_grad == True assert isinstance(model.layer3[0].bn1, BatchNorm2d) assert model.layer2[1].conv1.weight.requires_grad == True # Unfreeze some unfreeze(model, ['layer1', 'layer2.0']) # Check not frozen assert model.layer1[0].conv1.weight.requires_grad == True assert isinstance(model.layer1[0].bn1, BatchNorm2d) assert model.layer2[0].conv1.weight.requires_grad == True # Freeze/unfreeze BN # From root freeze(model, ['layer1.0.bn1']) assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d) unfreeze(model, ['layer1.0.bn1']) assert isinstance(model.layer1[0].bn1, BatchNorm2d) # From direct parent freeze(model.layer1[0], ['bn1']) assert isinstance(model.layer1[0].bn1, FrozenBatchNorm2d) unfreeze(model.layer1[0], ['bn1']) assert isinstance(model.layer1[0].bn1, BatchNorm2d) def test_activation_stats_hook_validation(): model = timm.create_model('resnet18') def test_hook(model, input, output): return output.mean().item() # Test error case with mismatched lengths with pytest.raises(ValueError, match="Please provide `hook_fns` for each `hook_fn_locs`"): ActivationStatsHook( model, hook_fn_locs=['layer1.0.conv1', 'layer1.0.conv2'], hook_fns=[test_hook] ) def test_extract_spp_stats(): model = timm.create_model('resnet18') def test_hook(model, input, output): return output.mean().item() stats = extract_spp_stats( model, hook_fn_locs=['layer1.0.conv1'], hook_fns=[test_hook], input_shape=[2, 3, 32, 32] ) assert isinstance(stats, dict) assert test_hook.__name__ in stats assert isinstance(stats[test_hook.__name__], list) assert len(stats[test_hook.__name__]) > 0 def test_freeze_unfreeze_bn_root(): import torch.nn as nn from timm.layers import BatchNormAct2d # Create batch norm layers bn = nn.BatchNorm2d(10) bn_act = BatchNormAct2d(10) # Test with BatchNorm2d as root with pytest.raises(AssertionError): _freeze_unfreeze(bn, mode="freeze") # Test with BatchNormAct2d as root with pytest.raises(AssertionError): _freeze_unfreeze(bn_act, mode="freeze") def test_activation_stats_functions(): import torch # Create sample input tensor [batch, channels, height, width] x = torch.randn(2, 3, 4, 4) # Test avg_sq_ch_mean result1 = avg_sq_ch_mean(None, None, x) assert isinstance(result1, float) # Test avg_ch_var result2 = avg_ch_var(None, None, x) assert isinstance(result2, float) # Test avg_ch_var_residual result3 = avg_ch_var_residual(None, None, x) assert isinstance(result3, float) def test_reparameterize_model(): import torch.nn as nn class FusableModule(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(3, 3, 1) def fuse(self): return nn.Identity() class ModelWithFusable(nn.Module): def __init__(self): super().__init__() self.fusable = FusableModule() self.normal = nn.Linear(10, 10) model = ModelWithFusable() # Test with inplace=False (should create a copy) new_model = reparameterize_model(model, inplace=False) assert isinstance(new_model.fusable, nn.Identity) assert isinstance(model.fusable, FusableModule) # Original unchanged # Test with inplace=True reparameterize_model(model, inplace=True) assert isinstance(model.fusable, nn.Identity) def test_get_state_dict_custom_unwrap(): import torch.nn as nn class CustomModel(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(10, 10) model = CustomModel() def custom_unwrap(m): return m state_dict = get_state_dict(model, unwrap_fn=custom_unwrap) assert 'linear.weight' in state_dict assert 'linear.bias' in state_dict def test_freeze_unfreeze_string_input(): model = timm.create_model('resnet18') # Test with string input _freeze_unfreeze(model, 'layer1', mode='freeze') assert model.layer1[0].conv1.weight.requires_grad == False # Test unfreezing with string input _freeze_unfreeze(model, 'layer1', mode='unfreeze') assert model.layer1[0].conv1.weight.requires_grad == True
pytorch-image-models/tests/test_utils.py/0
{ "file_path": "pytorch-image-models/tests/test_utils.py", "repo_id": "pytorch-image-models", "token_count": 2521 }
253
""" Loader Factory, Fast Collate, CUDA Prefetcher Prefetcher and Fast Collate inspired by NVIDIA APEX example at https://github.com/NVIDIA/apex/commit/d5e2bb4bdeedd27b1dfaf5bb2b24d6c000dee9be#diff-cf86c282ff7fba81fad27a559379d5bf Hacked together by / Copyright 2019, Ross Wightman """ import logging import random from contextlib import suppress from functools import partial from itertools import repeat from typing import Callable, Optional, Tuple, Union import torch import torch.utils.data import numpy as np from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from .dataset import IterableImageDataset, ImageDataset from .distributed_sampler import OrderedDistributedSampler, RepeatAugSampler from .random_erasing import RandomErasing from .mixup import FastCollateMixup from .transforms_factory import create_transform _logger = logging.getLogger(__name__) def fast_collate(batch): """ A fast collation function optimized for uint8 images (np array or torch) and int64 targets (labels)""" assert isinstance(batch[0], tuple) batch_size = len(batch) if isinstance(batch[0][0], tuple): # This branch 'deinterleaves' and flattens tuples of input tensors into one tensor ordered by position # such that all tuple of position n will end up in a torch.split(tensor, batch_size) in nth position is_np = isinstance(batch[0][0], np.ndarray) inner_tuple_size = len(batch[0][0]) flattened_batch_size = batch_size * inner_tuple_size targets = torch.zeros(flattened_batch_size, dtype=torch.int64) tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8) for i in range(batch_size): assert len(batch[i][0]) == inner_tuple_size # all input tensor tuples must be same length for j in range(inner_tuple_size): targets[i + j * batch_size] = batch[i][1] if is_np: tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j]) else: tensor[i + j * batch_size] += batch[i][0][j] return tensor, targets elif isinstance(batch[0][0], np.ndarray): targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) assert len(targets) == batch_size tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) for i in range(batch_size): tensor[i] += torch.from_numpy(batch[i][0]) return tensor, targets elif isinstance(batch[0][0], torch.Tensor): targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) assert len(targets) == batch_size tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) for i in range(batch_size): tensor[i].copy_(batch[i][0]) return tensor, targets else: assert False def adapt_to_chs(x, n): if not isinstance(x, (tuple, list)): x = tuple(repeat(x, n)) elif len(x) != n: x_mean = np.mean(x).item() x = (x_mean,) * n _logger.warning(f'Pretrained mean/std different shape than model, using avg value {x}.') else: assert len(x) == n, 'normalization stats must match image channels' return x class PrefetchLoader: def __init__( self, loader: torch.utils.data.DataLoader, mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...] = IMAGENET_DEFAULT_STD, channels: int = 3, device: torch.device = torch.device('cuda'), img_dtype: Optional[torch.dtype] = None, fp16: bool = False, re_prob: float = 0., re_mode: str = 'const', re_count: int = 1, re_num_splits: int = 0, ): mean = adapt_to_chs(mean, channels) std = adapt_to_chs(std, channels) normalization_shape = (1, channels, 1, 1) self.loader = loader self.device = device if fp16: # fp16 arg is deprecated, but will override dtype arg if set for bwd compat img_dtype = torch.float16 self.img_dtype = img_dtype or torch.float32 self.mean = torch.tensor( [x * 255 for x in mean], device=device, dtype=img_dtype).view(normalization_shape) self.std = torch.tensor( [x * 255 for x in std], device=device, dtype=img_dtype).view(normalization_shape) if re_prob > 0.: self.random_erasing = RandomErasing( probability=re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device=device, ) else: self.random_erasing = None self.is_cuda = device.type == 'cuda' and torch.cuda.is_available() self.is_npu = device.type == 'npu' and torch.npu.is_available() def __iter__(self): first = True if self.is_cuda: stream = torch.cuda.Stream(device=self.device) stream_context = partial(torch.cuda.stream, stream=stream) elif self.is_npu: stream = torch.npu.Stream(device=self.device) stream_context = partial(torch.npu.stream, stream=stream) else: stream = None stream_context = suppress for next_input, next_target in self.loader: with stream_context(): next_input = next_input.to(device=self.device, non_blocking=True) next_target = next_target.to(device=self.device, non_blocking=True) next_input = next_input.to(self.img_dtype).sub_(self.mean).div_(self.std) if self.random_erasing is not None: next_input = self.random_erasing(next_input) if not first: yield input, target else: first = False if stream is not None: if self.is_cuda: torch.cuda.current_stream(device=self.device).wait_stream(stream) elif self.is_npu: torch.npu.current_stream(device=self.device).wait_stream(stream) input = next_input target = next_target yield input, target def __len__(self): return len(self.loader) @property def sampler(self): return self.loader.sampler @property def dataset(self): return self.loader.dataset @property def mixup_enabled(self): if isinstance(self.loader.collate_fn, FastCollateMixup): return self.loader.collate_fn.mixup_enabled else: return False @mixup_enabled.setter def mixup_enabled(self, x): if isinstance(self.loader.collate_fn, FastCollateMixup): self.loader.collate_fn.mixup_enabled = x def _worker_init(worker_id, worker_seeding='all'): worker_info = torch.utils.data.get_worker_info() assert worker_info.id == worker_id if isinstance(worker_seeding, Callable): seed = worker_seeding(worker_info) random.seed(seed) torch.manual_seed(seed) np.random.seed(seed % (2 ** 32 - 1)) else: assert worker_seeding in ('all', 'part') # random / torch seed already called in dataloader iter class w/ worker_info.seed # to reproduce some old results (same seed + hparam combo), partial seeding is required (skip numpy re-seed) if worker_seeding == 'all': np.random.seed(worker_info.seed % (2 ** 32 - 1)) def create_loader( dataset: Union[ImageDataset, IterableImageDataset], input_size: Union[int, Tuple[int, int], Tuple[int, int, int]], batch_size: int, is_training: bool = False, no_aug: bool = False, re_prob: float = 0., re_mode: str = 'const', re_count: int = 1, re_split: bool = False, train_crop_mode: Optional[str] = None, scale: Optional[Tuple[float, float]] = None, ratio: Optional[Tuple[float, float]] = None, hflip: float = 0.5, vflip: float = 0., color_jitter: float = 0.4, color_jitter_prob: Optional[float] = None, grayscale_prob: float = 0., gaussian_blur_prob: float = 0., auto_augment: Optional[str] = None, num_aug_repeats: int = 0, num_aug_splits: int = 0, interpolation: str = 'bilinear', mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...] = IMAGENET_DEFAULT_STD, num_workers: int = 1, distributed: bool = False, crop_pct: Optional[float] = None, crop_mode: Optional[str] = None, crop_border_pixels: Optional[int] = None, collate_fn: Optional[Callable] = None, pin_memory: bool = False, fp16: bool = False, # deprecated, use img_dtype img_dtype: torch.dtype = torch.float32, device: torch.device = torch.device('cuda'), use_prefetcher: bool = True, use_multi_epochs_loader: bool = False, persistent_workers: bool = True, worker_seeding: str = 'all', tf_preprocessing: bool = False, ): """ Args: dataset: The image dataset to load. input_size: Target input size (channels, height, width) tuple or size scalar. batch_size: Number of samples in a batch. is_training: Return training (random) transforms. no_aug: Disable augmentation for training (useful for debug). re_prob: Random erasing probability. re_mode: Random erasing fill mode. re_count: Number of random erasing regions. re_split: Control split of random erasing across batch size. scale: Random resize scale range (crop area, < 1.0 => zoom in). ratio: Random aspect ratio range (crop ratio for RRC, ratio adjustment factor for RKR). hflip: Horizontal flip probability. vflip: Vertical flip probability. color_jitter: Random color jitter component factors (brightness, contrast, saturation, hue). Scalar is applied as (scalar,) * 3 (no hue). color_jitter_prob: Apply color jitter with this probability if not None (for SimlCLR-like aug grayscale_prob: Probability of converting image to grayscale (for SimCLR-like aug). gaussian_blur_prob: Probability of applying gaussian blur (for SimCLR-like aug). auto_augment: Auto augment configuration string (see auto_augment.py). num_aug_repeats: Enable special sampler to repeat same augmentation across distributed GPUs. num_aug_splits: Enable mode where augmentations can be split across the batch. interpolation: Image interpolation mode. mean: Image normalization mean. std: Image normalization standard deviation. num_workers: Num worker processes per DataLoader. distributed: Enable dataloading for distributed training. crop_pct: Inference crop percentage (output size / resize size). crop_mode: Inference crop mode. One of ['squash', 'border', 'center']. Defaults to 'center' when None. crop_border_pixels: Inference crop border of specified # pixels around edge of original image. collate_fn: Override default collate_fn. pin_memory: Pin memory for device transfer. fp16: Deprecated argument for half-precision input dtype. Use img_dtype. img_dtype: Data type for input image. device: Device to transfer inputs and targets to. use_prefetcher: Use efficient pre-fetcher to load samples onto device. use_multi_epochs_loader: persistent_workers: Enable persistent worker processes. worker_seeding: Control worker random seeding at init. tf_preprocessing: Use TF 1.0 inference preprocessing for testing model ports. Returns: DataLoader """ re_num_splits = 0 if re_split: # apply RE to second half of batch if no aug split otherwise line up with aug split re_num_splits = num_aug_splits or 2 dataset.transform = create_transform( input_size, is_training=is_training, no_aug=no_aug, train_crop_mode=train_crop_mode, scale=scale, ratio=ratio, hflip=hflip, vflip=vflip, color_jitter=color_jitter, color_jitter_prob=color_jitter_prob, grayscale_prob=grayscale_prob, gaussian_blur_prob=gaussian_blur_prob, auto_augment=auto_augment, interpolation=interpolation, mean=mean, std=std, crop_pct=crop_pct, crop_mode=crop_mode, crop_border_pixels=crop_border_pixels, re_prob=re_prob, re_mode=re_mode, re_count=re_count, re_num_splits=re_num_splits, tf_preprocessing=tf_preprocessing, use_prefetcher=use_prefetcher, separate=num_aug_splits > 0, ) if isinstance(dataset, IterableImageDataset): # give Iterable datasets early knowledge of num_workers so that sample estimates # are correct before worker processes are launched dataset.set_loader_cfg(num_workers=num_workers) sampler = None if distributed and not isinstance(dataset, torch.utils.data.IterableDataset): if is_training: if num_aug_repeats: sampler = RepeatAugSampler(dataset, num_repeats=num_aug_repeats) else: sampler = torch.utils.data.distributed.DistributedSampler(dataset) else: # This will add extra duplicate entries to result in equal num # of samples per-process, will slightly alter validation results sampler = OrderedDistributedSampler(dataset) else: assert num_aug_repeats == 0, "RepeatAugment not currently supported in non-distributed or IterableDataset use" if collate_fn is None: collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate loader_class = torch.utils.data.DataLoader if use_multi_epochs_loader: loader_class = MultiEpochsDataLoader loader_args = dict( batch_size=batch_size, shuffle=not isinstance(dataset, torch.utils.data.IterableDataset) and sampler is None and is_training, num_workers=num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=is_training, worker_init_fn=partial(_worker_init, worker_seeding=worker_seeding), persistent_workers=persistent_workers ) try: loader = loader_class(dataset, **loader_args) except TypeError as e: loader_args.pop('persistent_workers') # only in Pytorch 1.7+ loader = loader_class(dataset, **loader_args) if use_prefetcher: prefetch_re_prob = re_prob if is_training and not no_aug else 0. loader = PrefetchLoader( loader, mean=mean, std=std, channels=input_size[0], device=device, fp16=fp16, # deprecated, use img_dtype img_dtype=img_dtype, re_prob=prefetch_re_prob, re_mode=re_mode, re_count=re_count, re_num_splits=re_num_splits ) return loader class MultiEpochsDataLoader(torch.utils.data.DataLoader): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._DataLoader__initialized = False if self.batch_sampler is None: self.sampler = _RepeatSampler(self.sampler) else: self.batch_sampler = _RepeatSampler(self.batch_sampler) self._DataLoader__initialized = True self.iterator = super().__iter__() def __len__(self): return len(self.sampler) if self.batch_sampler is None else len(self.batch_sampler.sampler) def __iter__(self): for i in range(len(self)): yield next(self.iterator) class _RepeatSampler(object): """ Sampler that repeats forever. Args: sampler (Sampler) """ def __init__(self, sampler): self.sampler = sampler def __iter__(self): while True: yield from iter(self.sampler)
pytorch-image-models/timm/data/loader.py/0
{ "file_path": "pytorch-image-models/timm/data/loader.py", "repo_id": "pytorch-image-models", "token_count": 7171 }
254
""" A dataset reader that reads tarfile based datasets This reader can extract image samples from: * a single tar of image files * a folder of multiple tarfiles containing imagefiles * a tar of tars containing image files Labels are based on the combined folder and/or tar name structure. Hacked together by / Copyright 2020 Ross Wightman """ import logging import os import pickle import tarfile from glob import glob from typing import List, Tuple, Dict, Set, Optional, Union import numpy as np from timm.utils.misc import natural_key from .class_map import load_class_map from .img_extensions import get_img_extensions from .reader import Reader _logger = logging.getLogger(__name__) CACHE_FILENAME_SUFFIX = '_tarinfos.pickle' class TarState: def __init__(self, tf: tarfile.TarFile = None, ti: tarfile.TarInfo = None): self.tf: tarfile.TarFile = tf self.ti: tarfile.TarInfo = ti self.children: Dict[str, TarState] = {} # child states (tars within tars) def reset(self): self.tf = None def _extract_tarinfo(tf: tarfile.TarFile, parent_info: Dict, extensions: Set[str]): sample_count = 0 for i, ti in enumerate(tf): if not ti.isfile(): continue dirname, basename = os.path.split(ti.path) name, ext = os.path.splitext(basename) ext = ext.lower() if ext == '.tar': with tarfile.open(fileobj=tf.extractfile(ti), mode='r|') as ctf: child_info = dict( name=ti.name, path=os.path.join(parent_info['path'], name), ti=ti, children=[], samples=[]) sample_count += _extract_tarinfo(ctf, child_info, extensions=extensions) _logger.debug(f'{i}/?. Extracted child tarinfos from {ti.name}. {len(child_info["samples"])} images.') parent_info['children'].append(child_info) elif ext in extensions: parent_info['samples'].append(ti) sample_count += 1 return sample_count def extract_tarinfos( root, class_name_to_idx: Optional[Dict] = None, cache_tarinfo: Optional[bool] = None, extensions: Optional[Union[List, Tuple, Set]] = None, sort: bool = True ): extensions = get_img_extensions(as_set=True) if not extensions else set(extensions) root_is_tar = False if os.path.isfile(root): assert os.path.splitext(root)[-1].lower() == '.tar' tar_filenames = [root] root, root_name = os.path.split(root) root_name = os.path.splitext(root_name)[0] root_is_tar = True else: root_name = root.strip(os.path.sep).split(os.path.sep)[-1] tar_filenames = glob(os.path.join(root, '*.tar'), recursive=True) num_tars = len(tar_filenames) tar_bytes = sum([os.path.getsize(f) for f in tar_filenames]) assert num_tars, f'No .tar files found at specified path ({root}).' _logger.info(f'Scanning {tar_bytes/1024**2:.2f}MB of tar files...') info = dict(tartrees=[]) cache_path = '' if cache_tarinfo is None: cache_tarinfo = True if tar_bytes > 10*1024**3 else False # FIXME magic number, 10GB if cache_tarinfo: cache_filename = '_' + root_name + CACHE_FILENAME_SUFFIX cache_path = os.path.join(root, cache_filename) if os.path.exists(cache_path): _logger.info(f'Reading tar info from cache file {cache_path}.') with open(cache_path, 'rb') as pf: info = pickle.load(pf) assert len(info['tartrees']) == num_tars, "Cached tartree len doesn't match number of tarfiles" else: for i, fn in enumerate(tar_filenames): path = '' if root_is_tar else os.path.splitext(os.path.basename(fn))[0] with tarfile.open(fn, mode='r|') as tf: # tarinfo scans done in streaming mode parent_info = dict(name=os.path.relpath(fn, root), path=path, ti=None, children=[], samples=[]) num_samples = _extract_tarinfo(tf, parent_info, extensions=extensions) num_children = len(parent_info["children"]) _logger.debug( f'{i}/{num_tars}. Extracted tarinfos from {fn}. {num_children} children, {num_samples} samples.') info['tartrees'].append(parent_info) if cache_path: _logger.info(f'Writing tar info to cache file {cache_path}.') with open(cache_path, 'wb') as pf: pickle.dump(info, pf) samples = [] labels = [] build_class_map = False if class_name_to_idx is None: build_class_map = True # Flatten tartree info into lists of samples and targets w/ targets based on label id via # class map arg or from unique paths. # NOTE: currently only flattening up to two-levels, filesystem .tars and then one level of sub-tar children # this covers my current use cases and keeps things a little easier to test for now. tarfiles = [] def _label_from_paths(*path, leaf_only=True): path = os.path.join(*path).strip(os.path.sep) return path.split(os.path.sep)[-1] if leaf_only else path.replace(os.path.sep, '_') def _add_samples(info, fn): added = 0 for s in info['samples']: label = _label_from_paths(info['path'], os.path.dirname(s.path)) if not build_class_map and label not in class_name_to_idx: continue samples.append((s, fn, info['ti'])) labels.append(label) added += 1 return added _logger.info(f'Collecting samples and building tar states.') for parent_info in info['tartrees']: # if tartree has children, we assume all samples are at the child level tar_name = None if root_is_tar else parent_info['name'] tar_state = TarState() parent_added = 0 for child_info in parent_info['children']: child_added = _add_samples(child_info, fn=tar_name) if child_added: tar_state.children[child_info['name']] = TarState(ti=child_info['ti']) parent_added += child_added parent_added += _add_samples(parent_info, fn=tar_name) if parent_added: tarfiles.append((tar_name, tar_state)) del info if build_class_map: # build class index sorted_labels = list(sorted(set(labels), key=natural_key)) class_name_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} _logger.info(f'Mapping targets and sorting samples.') samples_and_targets = [(s, class_name_to_idx[l]) for s, l in zip(samples, labels) if l in class_name_to_idx] if sort: samples_and_targets = sorted(samples_and_targets, key=lambda k: natural_key(k[0][0].path)) samples, targets = zip(*samples_and_targets) samples = np.array(samples) targets = np.array(targets) _logger.info(f'Finished processing {len(samples)} samples across {len(tarfiles)} tar files.') return samples, targets, class_name_to_idx, tarfiles class ReaderImageInTar(Reader): """ Multi-tarfile dataset reader where there is one .tar file per class """ def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None): super().__init__() class_name_to_idx = None if class_map: class_name_to_idx = load_class_map(class_map, root) self.root = root self.samples, self.targets, self.class_name_to_idx, tarfiles = extract_tarinfos( self.root, class_name_to_idx=class_name_to_idx, cache_tarinfo=cache_tarinfo ) self.class_idx_to_name = {v: k for k, v in self.class_name_to_idx.items()} if len(tarfiles) == 1 and tarfiles[0][0] is None: self.root_is_tar = True self.tar_state = tarfiles[0][1] else: self.root_is_tar = False self.tar_state = dict(tarfiles) self.cache_tarfiles = cache_tarfiles def __len__(self): return len(self.samples) def __getitem__(self, index): sample = self.samples[index] target = self.targets[index] sample_ti, parent_fn, child_ti = sample parent_abs = os.path.join(self.root, parent_fn) if parent_fn else self.root tf = None cache_state = None if self.cache_tarfiles: cache_state = self.tar_state if self.root_is_tar else self.tar_state[parent_fn] tf = cache_state.tf if tf is None: tf = tarfile.open(parent_abs) if self.cache_tarfiles: cache_state.tf = tf if child_ti is not None: ctf = cache_state.children[child_ti.name].tf if self.cache_tarfiles else None if ctf is None: ctf = tarfile.open(fileobj=tf.extractfile(child_ti)) if self.cache_tarfiles: cache_state.children[child_ti.name].tf = ctf tf = ctf return tf.extractfile(sample_ti), target def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0].name if basename: filename = os.path.basename(filename) return filename
pytorch-image-models/timm/data/readers/reader_image_in_tar.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_image_in_tar.py", "repo_id": "pytorch-image-models", "token_count": 4050 }
255
from typing import Optional, Type import torch import torch.nn as nn import torch.nn.functional as F from .attention import maybe_add_mask from .config import use_fused_attn from .mlp import Mlp from .weight_init import trunc_normal_tf_ class AttentionPoolLatent(nn.Module): """ Attention pooling w/ latent query """ fused_attn: torch.jit.Final[bool] def __init__( self, in_features: int, out_features: int = None, embed_dim: int = None, num_heads: int = 8, feat_size: Optional[int] = None, mlp_ratio: float = 4.0, qkv_bias: bool = True, qk_norm: bool = False, latent_len: int = 1, latent_dim: int = None, pos_embed: str = '', pool_type: str = 'token', norm_layer: Optional[Type[nn.Module]] = None, act_layer: Optional[Type[nn.Module]] = nn.GELU, drop: float = 0.0, ): super().__init__() embed_dim = embed_dim or in_features out_features = out_features or in_features assert embed_dim % num_heads == 0 self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.feat_size = feat_size self.scale = self.head_dim ** -0.5 self.pool = pool_type self.fused_attn = use_fused_attn() if pos_embed == 'abs': assert feat_size is not None self.pos_embed = nn.Parameter(torch.zeros(feat_size, in_features)) else: self.pos_embed = None self.latent_dim = latent_dim or embed_dim self.latent_len = latent_len self.latent = nn.Parameter(torch.zeros(1, self.latent_len, embed_dim)) self.q = nn.Linear(embed_dim, embed_dim, bias=qkv_bias) self.kv = nn.Linear(embed_dim, embed_dim * 2, bias=qkv_bias) if qk_norm: qk_norm_layer = norm_layer or nn.LayerNorm self.q_norm = qk_norm_layer(self.head_dim) self.k_norm = qk_norm_layer(self.head_dim) else: self.q_norm = nn.Identity() self.k_norm = nn.Identity() self.proj = nn.Linear(embed_dim, embed_dim) self.proj_drop = nn.Dropout(drop) self.norm = norm_layer(out_features) if norm_layer is not None else nn.Identity() self.mlp = Mlp(embed_dim, int(embed_dim * mlp_ratio), act_layer=act_layer) self.init_weights() def init_weights(self): if self.pos_embed is not None: trunc_normal_tf_(self.pos_embed, std=self.pos_embed.shape[1] ** -0.5) trunc_normal_tf_(self.latent, std=self.latent_dim ** -0.5) def forward(self, x, attn_mask: Optional[torch.Tensor] = None): B, N, C = x.shape if self.pos_embed is not None: # FIXME interpolate x = x + self.pos_embed.unsqueeze(0).to(x.dtype) q_latent = self.latent.expand(B, -1, -1) q = self.q(q_latent).reshape(B, self.latent_len, self.num_heads, self.head_dim).transpose(1, 2) kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) k, v = kv.unbind(0) q, k = self.q_norm(q), self.k_norm(k) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = maybe_add_mask(attn, attn_mask) attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 2).reshape(B, self.latent_len, C) x = self.proj(x) x = self.proj_drop(x) x = x + self.mlp(self.norm(x)) # optional pool if latent seq_len > 1 and pooled output is desired if self.pool == 'token': x = x[:, 0] elif self.pool == 'avg': x = x.mean(1) return x
pytorch-image-models/timm/layers/attention_pool.py/0
{ "file_path": "pytorch-image-models/timm/layers/attention_pool.py", "repo_id": "pytorch-image-models", "token_count": 1995 }
256
""" ECA module from ECAnet paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks https://arxiv.org/abs/1910.03151 Original ECA model borrowed from https://github.com/BangguWu/ECANet Modified circular ECA implementation and adaption for use in timm package by Chris Ha https://github.com/VRandme Original License: MIT License Copyright (c) 2019 BangguWu, Qilong Wang Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import math from torch import nn import torch.nn.functional as F from .create_act import create_act_layer from .helpers import make_divisible class EcaModule(nn.Module): """Constructs an ECA module. Args: channels: Number of channels of the input feature map for use in adaptive kernel sizes for actual calculations according to channel. gamma, beta: when channel is given parameters of mapping function refer to original paper https://arxiv.org/pdf/1910.03151.pdf (default=None. if channel size not given, use k_size given for kernel size.) kernel_size: Adaptive selection of kernel size (default=3) gamm: used in kernel_size calc, see above beta: used in kernel_size calc, see above act_layer: optional non-linearity after conv, enables conv bias, this is an experiment gate_layer: gating non-linearity to use """ def __init__( self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): super(EcaModule, self).__init__() if channels is not None: t = int(abs(math.log(channels, 2) + beta) / gamma) kernel_size = max(t if t % 2 else t + 1, 3) assert kernel_size % 2 == 1 padding = (kernel_size - 1) // 2 if use_mlp: # NOTE 'mlp' mode is a timm experiment, not in paper assert channels is not None if rd_channels is None: rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) act_layer = act_layer or nn.ReLU self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) self.act = create_act_layer(act_layer) self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) else: self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) self.act = None self.conv2 = None self.gate = create_act_layer(gate_layer) def forward(self, x): y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv y = self.conv(y) if self.conv2 is not None: y = self.act(y) y = self.conv2(y) y = self.gate(y).view(x.shape[0], -1, 1, 1) return x * y.expand_as(x) EfficientChannelAttn = EcaModule # alias class CecaModule(nn.Module): """Constructs a circular ECA module. ECA module where the conv uses circular padding rather than zero padding. Unlike the spatial dimension, the channels do not have inherent ordering nor locality. Although this module in essence, applies such an assumption, it is unnecessary to limit the channels on either "edge" from being circularly adapted to each other. This will fundamentally increase connectivity and possibly increase performance metrics (accuracy, robustness), without significantly impacting resource metrics (parameter size, throughput,latency, etc) Args: channels: Number of channels of the input feature map for use in adaptive kernel sizes for actual calculations according to channel. gamma, beta: when channel is given parameters of mapping function refer to original paper https://arxiv.org/pdf/1910.03151.pdf (default=None. if channel size not given, use k_size given for kernel size.) kernel_size: Adaptive selection of kernel size (default=3) gamm: used in kernel_size calc, see above beta: used in kernel_size calc, see above act_layer: optional non-linearity after conv, enables conv bias, this is an experiment gate_layer: gating non-linearity to use """ def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): super(CecaModule, self).__init__() if channels is not None: t = int(abs(math.log(channels, 2) + beta) / gamma) kernel_size = max(t if t % 2 else t + 1, 3) has_act = act_layer is not None assert kernel_size % 2 == 1 # PyTorch circular padding mode is buggy as of pytorch 1.4 # see https://github.com/pytorch/pytorch/pull/17240 # implement manual circular padding self.padding = (kernel_size - 1) // 2 self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) self.gate = create_act_layer(gate_layer) def forward(self, x): y = x.mean((2, 3)).view(x.shape[0], 1, -1) # Manually implement circular padding, F.pad does not seemed to be bugged y = F.pad(y, (self.padding, self.padding), mode='circular') y = self.conv(y) y = self.gate(y).view(x.shape[0], -1, 1, 1) return x * y.expand_as(x) CircularEfficientChannelAttn = CecaModule
pytorch-image-models/timm/layers/eca.py/0
{ "file_path": "pytorch-image-models/timm/layers/eca.py", "repo_id": "pytorch-image-models", "token_count": 2411 }
257
""" Linear layer (alternate definition) """ import torch import torch.nn.functional as F from torch import nn as nn class Linear(nn.Linear): r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. """ def forward(self, input: torch.Tensor) -> torch.Tensor: if torch.jit.is_scripting(): bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) else: return F.linear(input, self.weight, self.bias)
pytorch-image-models/timm/layers/linear.py/0
{ "file_path": "pytorch-image-models/timm/layers/linear.py", "repo_id": "pytorch-image-models", "token_count": 282 }
258
""" Selective Kernel Convolution/Attention Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from .conv_bn_act import ConvNormAct from .helpers import make_divisible from .trace_utils import _assert def _kernel_valid(k): if isinstance(k, (list, tuple)): for ki in k: return _kernel_valid(ki) assert k >= 3 and k % 2 class SelectiveKernelAttn(nn.Module): def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): """ Selective Kernel Attention Module Selective Kernel attention mechanism factored out into its own module. """ super(SelectiveKernelAttn, self).__init__() self.num_paths = num_paths self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) self.bn = norm_layer(attn_channels) self.act = act_layer(inplace=True) self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) def forward(self, x): _assert(x.shape[1] == self.num_paths, '') x = x.sum(1).mean((2, 3), keepdim=True) x = self.fc_reduce(x) x = self.bn(x) x = self.act(x) x = self.fc_select(x) B, C, H, W = x.shape x = x.view(B, self.num_paths, C // self.num_paths, H, W) x = torch.softmax(x, dim=1) return x class SelectiveKernel(nn.Module): def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None): """ Selective Kernel Convolution Module As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. Largest change is the input split, which divides the input channels across each convolution path, this can be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps the parameter count from ballooning when the convolutions themselves don't have groups, but still provides a noteworthy increase in performance over similar param count models without this attention layer. -Ross W Args: in_channels (int): module input (feature) channel count out_channels (int): module output (feature) channel count kernel_size (int, list): kernel size for each convolution branch stride (int): stride for convolutions dilation (int): dilation for module as a whole, impacts dilation of each branch groups (int): number of groups for each branch rd_ratio (int, float): reduction factor for attention features keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, can be viewed as grouping by path, output expands to module out_channels count act_layer (nn.Module): activation layer to use norm_layer (nn.Module): batchnorm/norm layer to use aa_layer (nn.Module): anti-aliasing module drop_layer (nn.Module): spatial drop module in convs (drop block, etc) """ super(SelectiveKernel, self).__init__() out_channels = out_channels or in_channels kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation _kernel_valid(kernel_size) if not isinstance(kernel_size, list): kernel_size = [kernel_size] * 2 if keep_3x3: dilation = [dilation * (k - 1) // 2 for k in kernel_size] kernel_size = [3] * len(kernel_size) else: dilation = [dilation] * len(kernel_size) self.num_paths = len(kernel_size) self.in_channels = in_channels self.out_channels = out_channels self.split_input = split_input if self.split_input: assert in_channels % self.num_paths == 0 in_channels = in_channels // self.num_paths groups = min(out_channels, groups) conv_kwargs = dict( stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_layer=drop_layer) self.paths = nn.ModuleList([ ConvNormAct(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) for k, d in zip(kernel_size, dilation)]) attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) def forward(self, x): if self.split_input: x_split = torch.split(x, self.in_channels // self.num_paths, 1) x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] else: x_paths = [op(x) for op in self.paths] x = torch.stack(x_paths, dim=1) x_attn = self.attn(x) x = x * x_attn x = torch.sum(x, dim=1) return x
pytorch-image-models/timm/layers/selective_kernel.py/0
{ "file_path": "pytorch-image-models/timm/layers/selective_kernel.py", "repo_id": "pytorch-image-models", "token_count": 2314 }
259
from .beit import * from .byoanet import * from .byobnet import * from .cait import * from .coat import * from .convit import * from .convmixer import * from .convnext import * from .crossvit import * from .cspnet import * from .davit import * from .deit import * from .densenet import * from .dla import * from .dpn import * from .edgenext import * from .efficientformer import * from .efficientformer_v2 import * from .efficientnet import * from .efficientvit_mit import * from .efficientvit_msra import * from .eva import * from .fasternet import * from .fastvit import * from .focalnet import * from .gcvit import * from .ghostnet import * from .hardcorenas import * from .hgnet import * from .hiera import * from .hieradet_sam2 import * from .hrnet import * from .inception_next import * from .inception_resnet_v2 import * from .inception_v3 import * from .inception_v4 import * from .levit import * from .maxxvit import * from .mambaout import * from .metaformer import * from .mlp_mixer import * from .mobilenetv3 import * from .mobilenetv5 import * from .mobilevit import * from .mvitv2 import * from .naflexvit import * from .nasnet import * from .nest import * from .nextvit import * from .nfnet import * from .pit import * from .pnasnet import * from .pvt_v2 import * from .rdnet import * from .regnet import * from .repghost import * from .repvit import * from .res2net import * from .resnest import * from .resnet import * from .resnetv2 import * from .rexnet import * from .selecsls import * from .senet import * from .sequencer import * from .shvit import * from .sknet import * from .starnet import * from .swiftformer import * from .swin_transformer import * from .swin_transformer_v2 import * from .swin_transformer_v2_cr import * from .tiny_vit import * from .tnt import * from .tresnet import * from .twins import * from .vgg import * from .visformer import * from .vision_transformer import * from .vision_transformer_hybrid import * from .vision_transformer_relpos import * from .vision_transformer_sam import * from .vitamin import * from .volo import * from .vovnet import * from .xception import * from .xception_aligned import * from .xcit import * from ._builder import ( build_model_with_cfg as build_model_with_cfg, load_pretrained as load_pretrained, load_custom_pretrained as load_custom_pretrained, resolve_pretrained_cfg as resolve_pretrained_cfg, set_pretrained_download_progress as set_pretrained_download_progress, set_pretrained_check_hash as set_pretrained_check_hash, ) from ._factory import ( create_model as create_model, parse_model_name as parse_model_name, safe_model_name as safe_model_name, ) from ._features import ( FeatureInfo as FeatureInfo, FeatureHooks as FeatureHooks, FeatureHookNet as FeatureHookNet, FeatureListNet as FeatureListNet, FeatureDictNet as FeatureDictNet, ) from ._features_fx import ( FeatureGraphNet as FeatureGraphNet, GraphExtractNet as GraphExtractNet, create_feature_extractor as create_feature_extractor, get_graph_node_names as get_graph_node_names, register_notrace_module as register_notrace_module, is_notrace_module as is_notrace_module, get_notrace_modules as get_notrace_modules, register_notrace_function as register_notrace_function, is_notrace_function as is_notrace_function, get_notrace_functions as get_notrace_functions, ) from ._helpers import ( clean_state_dict as clean_state_dict, load_state_dict as load_state_dict, load_checkpoint as load_checkpoint, remap_state_dict as remap_state_dict, resume_checkpoint as resume_checkpoint, ) from ._hub import ( load_model_config_from_hf as load_model_config_from_hf, load_state_dict_from_hf as load_state_dict_from_hf, push_to_hf_hub as push_to_hf_hub, save_for_hf as save_for_hf, ) from ._manipulate import ( model_parameters as model_parameters, named_apply as named_apply, named_modules as named_modules, named_modules_with_params as named_modules_with_params, group_modules as group_modules, group_parameters as group_parameters, checkpoint_seq as checkpoint_seq, checkpoint as checkpoint, adapt_input_conv as adapt_input_conv, ) from ._pretrained import ( PretrainedCfg as PretrainedCfg, DefaultCfg as DefaultCfg, filter_pretrained_cfg as filter_pretrained_cfg, ) from ._prune import adapt_model_from_string as adapt_model_from_string from ._registry import ( split_model_name_tag as split_model_name_tag, get_arch_name as get_arch_name, generate_default_cfgs as generate_default_cfgs, register_model as register_model, register_model_deprecations as register_model_deprecations, model_entrypoint as model_entrypoint, list_models as list_models, list_pretrained as list_pretrained, get_deprecated_models as get_deprecated_models, is_model as is_model, list_modules as list_modules, is_model_in_modules as is_model_in_modules, is_model_pretrained as is_model_pretrained, get_pretrained_cfg as get_pretrained_cfg, get_pretrained_cfg_value as get_pretrained_cfg_value, get_arch_pretrained_cfgs as get_arch_pretrained_cfgs, )
pytorch-image-models/timm/models/__init__.py/0
{ "file_path": "pytorch-image-models/timm/models/__init__.py", "repo_id": "pytorch-image-models", "token_count": 1833 }
260
""" PyTorch implementation of DualPathNetworks Based on original MXNet implementation https://github.com/cypw/DPNs with many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs. This implementation is compatible with the pretrained weights from cypw's MXNet implementation. Hacked together by / Copyright 2020 Ross Wightman """ from collections import OrderedDict from functools import partial from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import BatchNormAct2d, ConvNormAct, create_conv2d, create_classifier, get_norm_act_layer from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['DPN'] class CatBnAct(nn.Module): def __init__(self, in_chs, norm_layer=BatchNormAct2d): super(CatBnAct, self).__init__() self.bn = norm_layer(in_chs, eps=0.001) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (torch.Tensor) pass def forward(self, x): if isinstance(x, tuple): x = torch.cat(x, dim=1) return self.bn(x) class BnActConv2d(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d): super(BnActConv2d, self).__init__() self.bn = norm_layer(in_chs, eps=0.001) self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups) def forward(self, x): return self.conv(self.bn(x)) class DualPathBlock(nn.Module): def __init__( self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False, ): super(DualPathBlock, self).__init__() self.num_1x1_c = num_1x1_c self.inc = inc self.b = b if block_type == 'proj': self.key_stride = 1 self.has_proj = True elif block_type == 'down': self.key_stride = 2 self.has_proj = True else: assert block_type == 'normal' self.key_stride = 1 self.has_proj = False self.c1x1_w_s1 = None self.c1x1_w_s2 = None if self.has_proj: # Using different member names here to allow easier parameter key matching for conversion if self.key_stride == 2: self.c1x1_w_s2 = BnActConv2d( in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2) else: self.c1x1_w_s1 = BnActConv2d( in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1) self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1) self.c3x3_b = BnActConv2d( in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups) if b: self.c1x1_c = CatBnAct(in_chs=num_3x3_b) self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1) self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1) else: self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1) self.c1x1_c1 = None self.c1x1_c2 = None @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor] pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] pass def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: if isinstance(x, tuple): x_in = torch.cat(x, dim=1) else: x_in = x if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None: # self.has_proj == False, torchscript requires condition on module == None x_s1 = x[0] x_s2 = x[1] else: # self.has_proj == True if self.c1x1_w_s1 is not None: # self.key_stride = 1 x_s = self.c1x1_w_s1(x_in) else: # self.key_stride = 2 x_s = self.c1x1_w_s2(x_in) x_s1 = x_s[:, :self.num_1x1_c, :, :] x_s2 = x_s[:, self.num_1x1_c:, :, :] x_in = self.c1x1_a(x_in) x_in = self.c3x3_b(x_in) x_in = self.c1x1_c(x_in) if self.c1x1_c1 is not None: # self.b == True, using None check for torchscript compat out1 = self.c1x1_c1(x_in) out2 = self.c1x1_c2(x_in) else: out1 = x_in[:, :self.num_1x1_c, :, :] out2 = x_in[:, self.num_1x1_c:, :, :] resid = x_s1 + out1 dense = torch.cat([x_s2, out2], dim=1) return resid, dense class DPN(nn.Module): def __init__( self, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), k_r=96, groups=32, num_classes=1000, in_chans=3, output_stride=32, global_pool='avg', small=False, num_init_features=64, b=False, drop_rate=0., norm_layer='batchnorm2d', act_layer='relu', fc_act_layer='elu', ): super(DPN, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.b = b assert output_stride == 32 # FIXME look into dilation support norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=act_layer), eps=.001) fc_norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=fc_act_layer), eps=.001, inplace=False) bw_factor = 1 if small else 4 blocks = OrderedDict() # conv1 blocks['conv1_1'] = ConvNormAct( in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer) blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')] # conv2 bw = 64 * bw_factor inc = inc_sec[0] r = (k_r * bw) // (64 * bw_factor) blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b) in_chs = bw + 3 * inc for i in range(2, k_sec[0] + 1): blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')] # conv3 bw = 128 * bw_factor inc = inc_sec[1] r = (k_r * bw) // (64 * bw_factor) blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[1] + 1): blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')] # conv4 bw = 256 * bw_factor inc = inc_sec[2] r = (k_r * bw) // (64 * bw_factor) blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[2] + 1): blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')] # conv5 bw = 512 * bw_factor inc = inc_sec[3] r = (k_r * bw) // (64 * bw_factor) blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[3] + 1): blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')] blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer) self.num_features = self.head_hidden_size = in_chs self.features = nn.Sequential(blocks) # Using 1x1 conv for the FC layer to allow the extra pooling scheme self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^features\.conv1', blocks=[ (r'^features\.conv(\d+)' if coarse else r'^features\.conv(\d+)_(\d+)', None), (r'^features\.conv5_bn_ac', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() def forward_features(self, x): return self.features(x) def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) if pre_logits: return self.flatten(x) x = self.classifier(x) return self.flatten(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_dpn(variant, pretrained=False, **kwargs): return build_model_with_cfg( DPN, variant, pretrained, feature_cfg=dict(feature_concat=True, flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD, 'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'dpn48b.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'dpn68.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn68b.ra_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'dpn68b.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn92.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn98.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn131.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn107.mx_in1k': _cfg(hf_hub_id='timm/') }) @register_model def dpn48b(pretrained=False, **kwargs) -> DPN: model_args = dict( small=True, num_init_features=10, k_r=128, groups=32, b=True, k_sec=(3, 4, 6, 3), inc_sec=(16, 32, 32, 64), act_layer='silu') return _create_dpn('dpn48b', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn68(pretrained=False, **kwargs) -> DPN: model_args = dict( small=True, num_init_features=10, k_r=128, groups=32, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64)) return _create_dpn('dpn68', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn68b(pretrained=False, **kwargs) -> DPN: model_args = dict( small=True, num_init_features=10, k_r=128, groups=32, b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64)) return _create_dpn('dpn68b', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn92(pretrained=False, **kwargs) -> DPN: model_args = dict( num_init_features=64, k_r=96, groups=32, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128)) return _create_dpn('dpn92', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn98(pretrained=False, **kwargs) -> DPN: model_args = dict( num_init_features=96, k_r=160, groups=40, k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128)) return _create_dpn('dpn98', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn131(pretrained=False, **kwargs) -> DPN: model_args = dict( num_init_features=128, k_r=160, groups=40, k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128)) return _create_dpn('dpn131', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def dpn107(pretrained=False, **kwargs) -> DPN: model_args = dict( num_init_features=128, k_r=200, groups=50, k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128)) return _create_dpn('dpn107', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/dpn.py/0
{ "file_path": "pytorch-image-models/timm/models/dpn.py", "repo_id": "pytorch-image-models", "token_count": 7004 }
261
from functools import partial import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._builder import pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels from ._registry import register_model, generate_default_cfgs from .mobilenetv3 import MobileNetV3, MobileNetV3Features __all__ = [] # model_registry will add each entrypoint fn to this def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs): """Creates a hardcorenas model Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS Paper: https://arxiv.org/abs/2102.11646 """ num_features = 1280 se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) model_kwargs = dict( block_args=decode_arch_def(arch_def), num_features=num_features, stem_size=32, norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=se_layer, **kwargs, ) features_only = False model_cls = MobileNetV3 kwargs_filter = None if model_kwargs.pop('features_only', False): features_only = True kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool') model_cls = MobileNetV3Features model = build_model_with_cfg( model_cls, variant, pretrained, pretrained_strict=not features_only, kwargs_filter=kwargs_filter, **model_kwargs, ) if features_only: model.default_cfg = pretrained_cfg_for_features(model.default_cfg) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'hardcorenas_a.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_b.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_c.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_d.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_e.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_f.miil_green_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def hardcorenas_a(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_A """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_b(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_B """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_c(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_C """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_d(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_D """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25'], ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', 'ir_r1_k5_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_e(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_E """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_f(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_F """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25'], ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs) return model
pytorch-image-models/timm/models/hardcorenas.py/0
{ "file_path": "pytorch-image-models/timm/models/hardcorenas.py", "repo_id": "pytorch-image-models", "token_count": 4629 }
262
""" MLP-Mixer, ResMLP, and gMLP in PyTorch This impl originally based on MLP-Mixer paper. Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 @article{tolstikhin2021, title={MLP-Mixer: An all-MLP Architecture for Vision}, author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey}, journal={arXiv preprint arXiv:2105.01601}, year={2021} } Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP Code: https://github.com/facebookresearch/deit Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 @misc{touvron2021resmlp, title={ResMLP: Feedforward networks for image classification with data-efficient training}, author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou}, year={2021}, eprint={2105.03404}, } Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 @misc{liu2021pay, title={Pay Attention to MLPs}, author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le}, year={2021}, eprint={2105.08050}, } A thank you to paper authors for releasing code and weights. Hacked together by / Copyright 2021 Ross Wightman """ import math from functools import partial from typing import Any, Dict, List, Optional, Union, Tuple import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint, checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['MixerBlock', 'MlpMixer'] # model_registry will add each entrypoint fn to this class MixerBlock(nn.Module): """Residual Block w/ token mixing and channel MLPs. Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ def __init__( self, dim: int, seq_len: int, mlp_ratio: Union[float, Tuple[float, float]] = (0.5, 4.0), mlp_layer: type = Mlp, norm_layer: type = partial(nn.LayerNorm, eps=1e-6), act_layer: type = nn.GELU, drop: float = 0., drop_path: float = 0., ) -> None: """Initialize MixerBlock. Args: dim: Dimension of input features. seq_len: Sequence length. mlp_ratio: Expansion ratios for token mixing and channel MLPs. mlp_layer: MLP layer class. norm_layer: Normalization layer. act_layer: Activation layer. drop: Dropout rate. drop_path: Drop path rate. """ super().__init__() tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)] self.norm1 = norm_layer(dim) self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) x = x + self.drop_path(self.mlp_channels(self.norm2(x))) return x class Affine(nn.Module): """Affine transformation layer.""" def __init__(self, dim: int) -> None: """Initialize Affine layer. Args: dim: Dimension of features. """ super().__init__() self.alpha = nn.Parameter(torch.ones((1, 1, dim))) self.beta = nn.Parameter(torch.zeros((1, 1, dim))) def forward(self, x: torch.Tensor) -> torch.Tensor: """Apply affine transformation.""" return torch.addcmul(self.beta, self.alpha, x) class ResBlock(nn.Module): """Residual MLP block w/ LayerScale and Affine 'norm'. Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ def __init__( self, dim: int, seq_len: int, mlp_ratio: float = 4, mlp_layer: type = Mlp, norm_layer: type = Affine, act_layer: type = nn.GELU, init_values: float = 1e-4, drop: float = 0., drop_path: float = 0., ) -> None: """Initialize ResBlock. Args: dim: Dimension of input features. seq_len: Sequence length. mlp_ratio: Channel MLP expansion ratio. mlp_layer: MLP layer class. norm_layer: Normalization layer. act_layer: Activation layer. init_values: Initial values for layer scale. drop: Dropout rate. drop_path: Drop path rate. """ super().__init__() channel_dim = int(dim * mlp_ratio) self.norm1 = norm_layer(dim) self.linear_tokens = nn.Linear(seq_len, seq_len) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop) self.ls1 = nn.Parameter(init_values * torch.ones(dim)) self.ls2 = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x))) return x class SpatialGatingUnit(nn.Module): """Spatial Gating Unit. Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ def __init__(self, dim: int, seq_len: int, norm_layer: type = nn.LayerNorm) -> None: """Initialize Spatial Gating Unit. Args: dim: Dimension of input features. seq_len: Sequence length. norm_layer: Normalization layer. """ super().__init__() gate_dim = dim // 2 self.norm = norm_layer(gate_dim) self.proj = nn.Linear(seq_len, seq_len) def init_weights(self) -> None: """Initialize weights for projection gate.""" # special init for the projection gate, called as override by base model init nn.init.normal_(self.proj.weight, std=1e-6) nn.init.ones_(self.proj.bias) def forward(self, x: torch.Tensor) -> torch.Tensor: """Apply spatial gating.""" u, v = x.chunk(2, dim=-1) v = self.norm(v) v = self.proj(v.transpose(-1, -2)) return u * v.transpose(-1, -2) class SpatialGatingBlock(nn.Module): """Residual Block w/ Spatial Gating. Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ def __init__( self, dim: int, seq_len: int, mlp_ratio: float = 4, mlp_layer: type = GatedMlp, norm_layer: type = partial(nn.LayerNorm, eps=1e-6), act_layer: type = nn.GELU, drop: float = 0., drop_path: float = 0., ) -> None: """Initialize SpatialGatingBlock. Args: dim: Dimension of input features. seq_len: Sequence length. mlp_ratio: Channel MLP expansion ratio. mlp_layer: MLP layer class. norm_layer: Normalization layer. act_layer: Activation layer. drop: Dropout rate. drop_path: Drop path rate. """ super().__init__() channel_dim = int(dim * mlp_ratio) self.norm = norm_layer(dim) sgu = partial(SpatialGatingUnit, seq_len=seq_len) self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" x = x + self.drop_path(self.mlp_channels(self.norm(x))) return x class MlpMixer(nn.Module): """MLP-Mixer model architecture. Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ def __init__( self, num_classes: int = 1000, img_size: int = 224, in_chans: int = 3, patch_size: int = 16, num_blocks: int = 8, embed_dim: int = 512, mlp_ratio: Union[float, Tuple[float, float]] = (0.5, 4.0), block_layer: type = MixerBlock, mlp_layer: type = Mlp, norm_layer: type = partial(nn.LayerNorm, eps=1e-6), act_layer: type = nn.GELU, drop_rate: float = 0., proj_drop_rate: float = 0., drop_path_rate: float = 0., nlhb: bool = False, stem_norm: bool = False, global_pool: str = 'avg', ) -> None: """Initialize MLP-Mixer. Args: num_classes: Number of classes for classification. img_size: Input image size. in_chans: Number of input channels. patch_size: Patch size. num_blocks: Number of mixer blocks. embed_dim: Embedding dimension. mlp_ratio: MLP expansion ratio(s). block_layer: Block layer class. mlp_layer: MLP layer class. norm_layer: Normalization layer. act_layer: Activation layer. drop_rate: Head dropout rate. proj_drop_rate: Projection dropout rate. drop_path_rate: Drop path rate. nlhb: Use negative log bias initialization. stem_norm: Apply normalization to stem. global_pool: Global pooling type. """ super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models self.grad_checkpointing = False self.stem = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None, ) reduction = self.stem.feat_ratio() if hasattr(self.stem, 'feat_ratio') else patch_size # FIXME drop_path (stochastic depth scaling rule or all the same?) self.blocks = nn.Sequential(*[ block_layer( embed_dim, self.stem.num_patches, mlp_ratio, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, drop=proj_drop_rate, drop_path=drop_path_rate, ) for _ in range(num_blocks)]) self.feature_info = [ dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=reduction) for i in range(num_blocks)] self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() self.init_weights(nlhb=nlhb) @torch.jit.ignore def init_weights(self, nlhb: bool = False) -> None: """Initialize model weights. Args: nlhb: Use negative log bias initialization for head. """ head_bias = -math.log(self.num_classes) if nlhb else 0. named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Any]: """Create regex patterns for parameter grouping. Args: coarse: Use coarse grouping. Returns: Dictionary mapping group names to regex patterns. """ return dict( stem=r'^stem', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing. Args: enable: Whether to enable gradient checkpointing. """ self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: """Get the classifier module.""" return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: """Reset the classifier head. Args: num_classes: Number of classes for new classifier. global_pool: Global pooling type. """ self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """Forward features that returns intermediates. Args: x: Input image tensor. indices: Take last n blocks if int, all if None, select matching indices if sequence. norm: Apply norm layer to all intermediates. stop_early: Stop iterating over blocks when last desired intermediate hit. output_fmt: Shape of intermediate feature outputs ('NCHW' or 'NLC'). intermediates_only: Only return intermediate features. Returns: List of intermediate features or tuple of (final features, intermediates). """ assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] take_indices, max_index = feature_take_indices(len(self.blocks), indices) # forward pass B, _, height, width = x.shape x = self.stem(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for i, blk in enumerate(blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x) else: x = blk(x) if i in take_indices: # normalize intermediates with final norm layer if enabled intermediates.append(self.norm(x) if norm else x) # process intermediates if reshape: # reshape to BCHW output format H, W = self.stem.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if intermediates_only: return intermediates x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ) -> List[int]: """Prune layers not required for specified intermediates. Args: indices: Indices of intermediate layers to keep. prune_norm: Whether to prune normalization layer. prune_head: Whether to prune the classifier head. Returns: List of indices that were kept. """ take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: """Forward pass through feature extraction layers.""" x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.norm(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: """Forward pass through classifier head. Args: x: Feature tensor. pre_logits: Return features before final classifier. Returns: Output tensor. """ if self.global_pool == 'avg': x = x.mean(dim=1) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax: bool = False) -> None: """Mixer weight initialization (trying to match Flax defaults). Args: module: Module to initialize. name: Module name. head_bias: Bias value for head layer. flax: Use Flax-style initialization. """ if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: if flax: # Flax defaults lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: # like MLP init in vit (my original init) nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): # NOTE if a parent module contains init_weights method, it can override the init of the # child modules as this will be called in depth-first order. module.init_weights() def checkpoint_filter_fn(state_dict, model): """ Remap checkpoints if needed """ if 'patch_embed.proj.weight' in state_dict: # Remap FB ResMlp models -> timm out_dict = {} for k, v in state_dict.items(): k = k.replace('patch_embed.', 'stem.') k = k.replace('attn.', 'linear_tokens.') k = k.replace('mlp.', 'mlp_channels.') k = k.replace('gamma_', 'ls') if k.endswith('.alpha') or k.endswith('.beta'): v = v.reshape(1, 1, -1) out_dict[k] = v return out_dict return state_dict def _create_mixer(variant, pretrained=False, **kwargs) -> MlpMixer: out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg( MlpMixer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) return model def _cfg(url='', **kwargs) -> Dict[str, Any]: return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'stem.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'mixer_s32_224.untrained': _cfg(), 'mixer_s16_224.untrained': _cfg(), 'mixer_b32_224.untrained': _cfg(), 'mixer_b16_224.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth', ), 'mixer_b16_224.goog_in21k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth', num_classes=21843 ), 'mixer_l32_224.untrained': _cfg(), 'mixer_l16_224.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth', ), 'mixer_l16_224.goog_in21k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth', num_classes=21843 ), # Mixer ImageNet-21K-P pretraining 'mixer_b16_224.miil_in21k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil_in21k-2a558a71.pth', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221, ), 'mixer_b16_224.miil_in21k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil-9229a591.pth', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', ), 'gmixer_12_224.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'gmixer_24_224.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth', #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_36_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_36_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_dino': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dino.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_dino': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dino.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'gmlp_ti16_224.untrained': _cfg(), 'gmlp_s16_224.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth', ), 'gmlp_b16_224.untrained': _cfg(), }) @register_model def mixer_s32_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-S/32 224x224 Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs) model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_s16_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-S/16 224x224 Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs) model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args) return model @register_model def mixer_b32_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-B/32 224x224 Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs) model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_b16_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args) return model @register_model def mixer_l32_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-L/32 224x224. Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs) model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_l16_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-L/16 224x224. ImageNet-1k pretrained weights. Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args) return model @register_model def gmixer_12_224(pretrained=False, **kwargs) -> MlpMixer: """ Glu-Mixer-12 224x224 Experiment by Ross Wightman, adding SwiGLU to MLP-Mixer """ model_args = dict( patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0), mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args) return model @register_model def gmixer_24_224(pretrained=False, **kwargs) -> MlpMixer: """ Glu-Mixer-24 224x224 Experiment by Ross Wightman, adding SwiGLU to MLP-Mixer """ model_args = dict( patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_12_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-12 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_24_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-24 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_36_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-36 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_big_24_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-B-24 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_ti16_224(pretrained=False, **kwargs) -> MlpMixer: """ gMLP-Tiny Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ model_args = dict( patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_s16_224(pretrained=False, **kwargs) -> MlpMixer: """ gMLP-Small Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ model_args = dict( patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_b16_224(pretrained=False, **kwargs) -> MlpMixer: """ gMLP-Base Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ model_args = dict( patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args) return model register_model_deprecations(__name__, { 'mixer_b16_224_in21k': 'mixer_b16_224.goog_in21k_ft_in1k', 'mixer_l16_224_in21k': 'mixer_l16_224.goog_in21k_ft_in1k', 'mixer_b16_224_miil': 'mixer_b16_224.miil_in21k_ft_in1k', 'mixer_b16_224_miil_in21k': 'mixer_b16_224.miil_in21k', 'resmlp_12_distilled_224': 'resmlp_12_224.fb_distilled_in1k', 'resmlp_24_distilled_224': 'resmlp_24_224.fb_distilled_in1k', 'resmlp_36_distilled_224': 'resmlp_36_224.fb_distilled_in1k', 'resmlp_big_24_distilled_224': 'resmlp_big_24_224.fb_distilled_in1k', 'resmlp_big_24_224_in22ft1k': 'resmlp_big_24_224.fb_in22k_ft_in1k', 'resmlp_12_224_dino': 'resmlp_12_224', 'resmlp_24_224_dino': 'resmlp_24_224', })
pytorch-image-models/timm/models/mlp_mixer.py/0
{ "file_path": "pytorch-image-models/timm/models/mlp_mixer.py", "repo_id": "pytorch-image-models", "token_count": 15333 }
263
""" An implementation of RepGhostNet Model as defined in: RepGhost: A Hardware-Efficient Ghost Module via Re-parameterization. https://arxiv.org/abs/2211.06088 Original implementation: https://github.com/ChengpengChen/RepGhost """ import copy from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, Linear, make_divisible from ._builder import build_model_with_cfg from ._efficientnet_blocks import SqueezeExcite, ConvBnAct from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['RepGhostNet'] _SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) class RepGhostModule(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=1, dw_size=3, stride=1, relu=True, reparam=True, ): super(RepGhostModule, self).__init__() self.out_chs = out_chs init_chs = out_chs new_chs = out_chs self.primary_conv = nn.Sequential( nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(init_chs), nn.ReLU(inplace=True) if relu else nn.Identity(), ) fusion_conv = [] fusion_bn = [] if reparam: fusion_conv.append(nn.Identity()) fusion_bn.append(nn.BatchNorm2d(init_chs)) self.fusion_conv = nn.Sequential(*fusion_conv) self.fusion_bn = nn.Sequential(*fusion_bn) self.cheap_operation = nn.Sequential( nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False), nn.BatchNorm2d(new_chs), # nn.ReLU(inplace=True) if relu else nn.Identity(), ) self.relu = nn.ReLU(inplace=False) if relu else nn.Identity() def forward(self, x): x1 = self.primary_conv(x) x2 = self.cheap_operation(x1) for conv, bn in zip(self.fusion_conv, self.fusion_bn): x2 = x2 + bn(conv(x1)) return self.relu(x2) def get_equivalent_kernel_bias(self): kernel3x3, bias3x3 = self._fuse_bn_tensor(self.cheap_operation[0], self.cheap_operation[1]) for conv, bn in zip(self.fusion_conv, self.fusion_bn): kernel, bias = self._fuse_bn_tensor(conv, bn, kernel3x3.shape[0], kernel3x3.device) kernel3x3 += self._pad_1x1_to_3x3_tensor(kernel) bias3x3 += bias return kernel3x3, bias3x3 @staticmethod def _pad_1x1_to_3x3_tensor(kernel1x1): if kernel1x1 is None: return 0 else: return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1]) @staticmethod def _fuse_bn_tensor(conv, bn, in_channels=None, device=None): in_channels = in_channels if in_channels else bn.running_mean.shape[0] device = device if device else bn.weight.device if isinstance(conv, nn.Conv2d): kernel = conv.weight assert conv.bias is None else: assert isinstance(conv, nn.Identity) kernel = torch.ones(in_channels, 1, 1, 1, device=device) if isinstance(bn, nn.BatchNorm2d): running_mean = bn.running_mean running_var = bn.running_var gamma = bn.weight beta = bn.bias eps = bn.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std assert isinstance(bn, nn.Identity) return kernel, torch.zeros(in_channels).to(kernel.device) def switch_to_deploy(self): if len(self.fusion_conv) == 0 and len(self.fusion_bn) == 0: return kernel, bias = self.get_equivalent_kernel_bias() self.cheap_operation = nn.Conv2d( in_channels=self.cheap_operation[0].in_channels, out_channels=self.cheap_operation[0].out_channels, kernel_size=self.cheap_operation[0].kernel_size, padding=self.cheap_operation[0].padding, dilation=self.cheap_operation[0].dilation, groups=self.cheap_operation[0].groups, bias=True) self.cheap_operation.weight.data = kernel self.cheap_operation.bias.data = bias self.__delattr__('fusion_conv') self.__delattr__('fusion_bn') self.fusion_conv = [] self.fusion_bn = [] def reparameterize(self): self.switch_to_deploy() class RepGhostBottleneck(nn.Module): """ RepGhost bottleneck w/ optional SE""" def __init__( self, in_chs, mid_chs, out_chs, dw_kernel_size=3, stride=1, act_layer=nn.ReLU, se_ratio=0., reparam=True, ): super(RepGhostBottleneck, self).__init__() has_se = se_ratio is not None and se_ratio > 0. self.stride = stride # Point-wise expansion self.ghost1 = RepGhostModule(in_chs, mid_chs, relu=True, reparam=reparam) # Depth-wise convolution if self.stride > 1: self.conv_dw = nn.Conv2d( mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) self.bn_dw = nn.BatchNorm2d(mid_chs) else: self.conv_dw = None self.bn_dw = None # Squeeze-and-excitation self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None # Point-wise linear projection self.ghost2 = RepGhostModule(mid_chs, out_chs, relu=False, reparam=reparam) # shortcut if in_chs == out_chs and self.stride == 1: self.shortcut = nn.Sequential() else: self.shortcut = nn.Sequential( nn.Conv2d( in_chs, in_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), nn.BatchNorm2d(in_chs), nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_chs), ) def forward(self, x): shortcut = x # 1st ghost bottleneck x = self.ghost1(x) # Depth-wise convolution if self.conv_dw is not None: x = self.conv_dw(x) x = self.bn_dw(x) # Squeeze-and-excitation if self.se is not None: x = self.se(x) # 2nd ghost bottleneck x = self.ghost2(x) x += self.shortcut(shortcut) return x class RepGhostNet(nn.Module): def __init__( self, cfgs, num_classes=1000, width=1.0, in_chans=3, output_stride=32, global_pool='avg', drop_rate=0.2, reparam=True, ): super(RepGhostNet, self).__init__() # setting of inverted residual blocks assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' self.cfgs = cfgs self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] # building first layer stem_chs = make_divisible(16 * width, 4) self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) self.bn1 = nn.BatchNorm2d(stem_chs) self.act1 = nn.ReLU(inplace=True) prev_chs = stem_chs # building inverted residual blocks stages = nn.ModuleList([]) block = RepGhostBottleneck stage_idx = 0 net_stride = 2 for cfg in self.cfgs: layers = [] s = 1 for k, exp_size, c, se_ratio, s in cfg: out_chs = make_divisible(c * width, 4) mid_chs = make_divisible(exp_size * width, 4) layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, reparam=reparam)) prev_chs = out_chs if s > 1: net_stride *= 2 self.feature_info.append(dict( num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) stages.append(nn.Sequential(*layers)) stage_idx += 1 out_chs = make_divisible(exp_size * width * 2, 4) stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) self.pool_dim = prev_chs = out_chs self.blocks = nn.Sequential(*stages) # building last several layers self.num_features = prev_chs self.head_hidden_size = out_chs = 1280 self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) self.act2 = nn.ReLU(inplace=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv_stem|bn1', blocks=[ (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), (r'conv_head', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: # NOTE: cannot meaningfully change pooling of efficient head after creation self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] stage_ends = [-1] + [int(info['module'].split('.')[-1]) for info in self.feature_info[1:]] take_indices, max_index = feature_take_indices(len(stage_ends), indices) take_indices = [stage_ends[i]+1 for i in take_indices] max_index = stage_ends[max_index] # forward pass feat_idx = 0 x = self.conv_stem(x) if feat_idx in take_indices: intermediates.append(x) x = self.bn1(x) x = self.act1(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.blocks else: stages = self.blocks[:max_index + 1] for feat_idx, stage in enumerate(stages, start=1): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(stage, x) else: x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ stage_ends = [-1] + [int(info['module'].split('.')[-1]) for info in self.feature_info[1:]] take_indices, max_index = feature_take_indices(len(stage_ends), indices) max_index = stage_ends[max_index] self.blocks = self.blocks[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.conv_head(x) x = self.act2(x) x = self.flatten(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.classifier(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def convert_to_deploy(self): repghost_model_convert(self, do_copy=False) def repghost_model_convert(model: torch.nn.Module, save_path=None, do_copy=True): """ taken from from https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py """ if do_copy: model = copy.deepcopy(model) for module in model.modules(): if hasattr(module, 'switch_to_deploy'): module.switch_to_deploy() if save_path is not None: torch.save(model.state_dict(), save_path) return model def _create_repghostnet(variant, width=1.0, pretrained=False, **kwargs): """ Constructs a RepGhostNet model """ cfgs = [ # k, t, c, SE, s # stage1 [[3, 8, 16, 0, 1]], # stage2 [[3, 24, 24, 0, 2]], [[3, 36, 24, 0, 1]], # stage3 [[5, 36, 40, 0.25, 2]], [[5, 60, 40, 0.25, 1]], # stage4 [[3, 120, 80, 0, 2]], [[3, 100, 80, 0, 1], [3, 120, 80, 0, 1], [3, 120, 80, 0, 1], [3, 240, 112, 0.25, 1], [3, 336, 112, 0.25, 1] ], # stage5 [[5, 336, 160, 0.25, 2]], [[5, 480, 160, 0, 1], [5, 480, 160, 0.25, 1], [5, 480, 160, 0, 1], [5, 480, 160, 0.25, 1] ] ] model_kwargs = dict( cfgs=cfgs, width=width, **kwargs, ) return build_model_with_cfg( RepGhostNet, variant, pretrained, feature_cfg=dict(flatten_sequential=True), **model_kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'repghostnet_050.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_5x_43M_66.95.pth.tar' ), 'repghostnet_058.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_58x_60M_68.94.pth.tar' ), 'repghostnet_080.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_0_8x_96M_72.24.pth.tar' ), 'repghostnet_100.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_0x_142M_74.22.pth.tar' ), 'repghostnet_111.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_11x_170M_75.07.pth.tar' ), 'repghostnet_130.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_3x_231M_76.37.pth.tar' ), 'repghostnet_150.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_1_5x_301M_77.45.pth.tar' ), 'repghostnet_200.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/ChengpengChen/RepGhost/releases/download/RepGhost/repghostnet_2_0x_516M_78.81.pth.tar' ), }) @register_model def repghostnet_050(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-0.5x """ model = _create_repghostnet('repghostnet_050', width=0.5, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_058(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-0.58x """ model = _create_repghostnet('repghostnet_058', width=0.58, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_080(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-0.8x """ model = _create_repghostnet('repghostnet_080', width=0.8, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_100(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-1.0x """ model = _create_repghostnet('repghostnet_100', width=1.0, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_111(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-1.11x """ model = _create_repghostnet('repghostnet_111', width=1.11, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_130(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-1.3x """ model = _create_repghostnet('repghostnet_130', width=1.3, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_150(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-1.5x """ model = _create_repghostnet('repghostnet_150', width=1.5, pretrained=pretrained, **kwargs) return model @register_model def repghostnet_200(pretrained=False, **kwargs) -> RepGhostNet: """ RepGhostNet-2.0x """ model = _create_repghostnet('repghostnet_200', width=2.0, pretrained=pretrained, **kwargs) return model
pytorch-image-models/timm/models/repghost.py/0
{ "file_path": "pytorch-image-models/timm/models/repghost.py", "repo_id": "pytorch-image-models", "token_count": 9456 }
264
""" Swin Transformer V2 A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - https://arxiv.org/pdf/2111.09883 Code adapted from https://github.com/ChristophReich1996/Swin-Transformer-V2, original copyright/license info below This implementation is experimental and subject to change in manners that will break weight compat: * Size of the pos embed MLP are not spelled out in paper in terms of dim, fixed for all models? vary with num_heads? * currently dim is fixed, I feel it may make sense to scale with num_heads (dim per head) * The specifics of the memory saving 'sequential attention' are not detailed, Christoph Reich has an impl at GitHub link above. It needs further investigation as throughput vs mem tradeoff doesn't appear beneficial. * num_heads per stage is not detailed for Huge and Giant model variants * 'Giant' is 3B params in paper but ~2.6B here despite matching paper dim + block counts * experiments are ongoing wrt to 'main branch' norm layer use and weight init scheme Noteworthy additions over official Swin v1: * MLP relative position embedding is looking promising and adapts to different image/window sizes * This impl has been designed to allow easy change of image size with matching window size changes * Non-square image size and window size are supported Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman """ # -------------------------------------------------------- # Swin Transformer V2 reimplementation # Copyright (c) 2021 Christoph Reich # Licensed under The MIT License [see LICENSE for details] # Written by Christoph Reich # -------------------------------------------------------- import logging import math from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Mlp, ClassifierHead, to_2tuple, _assert, ndgrid from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._manipulate import named_apply, checkpoint from ._registry import generate_default_cfgs, register_model __all__ = ['SwinTransformerV2Cr'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) def bchw_to_bhwc(x: torch.Tensor) -> torch.Tensor: """Permutes a tensor from the shape (B, C, H, W) to (B, H, W, C).""" return x.permute(0, 2, 3, 1) def bhwc_to_bchw(x: torch.Tensor) -> torch.Tensor: """Permutes a tensor from the shape (B, H, W, C) to (B, C, H, W).""" return x.permute(0, 3, 1, 2) def window_partition(x: torch.Tensor, window_size: Tuple[int, int]) -> torch.Tensor: """Partition into non-overlapping windows. Args: x: Input tensor of shape (B, H, W, C). window_size: Window size (height, width). Returns: Windows tensor of shape (num_windows*B, window_size[0], window_size[1], C). """ B, H, W, C = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows: torch.Tensor, window_size: Tuple[int, int], img_size: Tuple[int, int]) -> torch.Tensor: """Merge windows back to feature map. Args: windows: Windows tensor of shape (num_windows * B, window_size[0], window_size[1], C). window_size: Window size (height, width). img_size: Image size (height, width). Returns: Feature map tensor of shape (B, H, W, C). """ H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class WindowMultiHeadAttention(nn.Module): r"""This class implements window-based Multi-Head-Attention with log-spaced continuous position bias. Args: dim (int): Number of input features window_size (int): Window size num_heads (int): Number of attention heads drop_attn (float): Dropout rate of attention map drop_proj (float): Dropout rate after projection meta_hidden_dim (int): Number of hidden features in the two layer MLP meta network sequential_attn (bool): If true sequential self-attention is performed """ def __init__( self, dim: int, num_heads: int, window_size: Tuple[int, int], drop_attn: float = 0.0, drop_proj: float = 0.0, meta_hidden_dim: int = 384, # FIXME what's the optimal value? sequential_attn: bool = False, ) -> None: super(WindowMultiHeadAttention, self).__init__() assert dim % num_heads == 0, \ "The number of input features (in_features) are not divisible by the number of heads (num_heads)." self.in_features: int = dim self.window_size: Tuple[int, int] = to_2tuple(window_size) self.num_heads: int = num_heads self.sequential_attn: bool = sequential_attn self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias=True) self.attn_drop = nn.Dropout(drop_attn) self.proj = nn.Linear(in_features=dim, out_features=dim, bias=True) self.proj_drop = nn.Dropout(drop_proj) # meta network for positional encodings self.meta_mlp = Mlp( 2, # x, y hidden_features=meta_hidden_dim, out_features=num_heads, act_layer=nn.ReLU, drop=(0.125, 0.) # FIXME should there be stochasticity, appears to 'overfit' without? ) # NOTE old checkpoints used inverse of logit_scale ('tau') following the paper, see conversion fn self.logit_scale = nn.Parameter(torch.log(10 * torch.ones(num_heads))) self._make_pair_wise_relative_positions() def _make_pair_wise_relative_positions(self) -> None: """Initialize the pair-wise relative positions to compute the positional biases.""" device = self.logit_scale.device coordinates = torch.stack(ndgrid( torch.arange(self.window_size[0], device=device), torch.arange(self.window_size[1], device=device) ), dim=0).flatten(1) relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :] relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float() relative_coordinates_log = torch.sign(relative_coordinates) * torch.log( 1.0 + relative_coordinates.abs()) self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False) def set_window_size(self, window_size: Tuple[int, int]) -> None: """Update window size and regenerate relative position coordinates. Args: window_size: New window size. """ window_size = to_2tuple(window_size) if window_size != self.window_size: self.window_size = window_size self._make_pair_wise_relative_positions() def _relative_positional_encodings(self) -> torch.Tensor: """Compute the relative positional encodings. Returns: Relative positional encodings of shape (1, num_heads, window_size**2, window_size**2). """ window_area = self.window_size[0] * self.window_size[1] relative_position_bias = self.meta_mlp(self.relative_coordinates_log) relative_position_bias = relative_position_bias.transpose(1, 0).reshape( self.num_heads, window_area, window_area ) relative_position_bias = relative_position_bias.unsqueeze(0) return relative_position_bias def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor: """Forward pass of window multi-head self-attention. Args: x: Input tensor of shape (B * windows, N, C). mask: Attention mask for the shift case. Returns: Output tensor of shape (B * windows, N, C). """ Bw, L, C = x.shape qkv = self.qkv(x).view(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) query, key, value = qkv.unbind(0) # compute attention map with scaled cosine attention attn = (F.normalize(query, dim=-1) @ F.normalize(key, dim=-1).transpose(-2, -1)) logit_scale = torch.clamp(self.logit_scale.reshape(1, self.num_heads, 1, 1), max=math.log(1. / 0.01)).exp() attn = attn * logit_scale attn = attn + self._relative_positional_encodings() if mask is not None: # Apply mask if utilized num_win: int = mask.shape[0] attn = attn.view(Bw // num_win, num_win, self.num_heads, L, L) attn = attn + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, L, L) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ value).transpose(1, 2).reshape(Bw, L, -1) x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerV2CrBlock(nn.Module): r"""This class implements the Swin transformer block. Args: dim (int): Number of input channels num_heads (int): Number of attention heads to be utilized feat_size (Tuple[int, int]): Input resolution window_size (Tuple[int, int]): Window size to be utilized shift_size (int): Shifting size to be used mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels proj_drop (float): Dropout in input mapping drop_attn (float): Dropout rate of attention map drop_path (float): Dropout in main path extra_norm (bool): Insert extra norm on 'main' branch if True sequential_attn (bool): If true sequential self-attention is performed norm_layer (Type[nn.Module]): Type of normalization layer to be utilized """ def __init__( self, dim: int, num_heads: int, feat_size: Tuple[int, int], window_size: Tuple[int, int], shift_size: Tuple[int, int] = (0, 0), always_partition: bool = False, dynamic_mask: bool = False, mlp_ratio: float = 4.0, init_values: Optional[float] = 0, proj_drop: float = 0.0, drop_attn: float = 0.0, drop_path: float = 0.0, extra_norm: bool = False, sequential_attn: bool = False, norm_layer: Type[nn.Module] = nn.LayerNorm, ): super(SwinTransformerV2CrBlock, self).__init__() self.dim: int = dim self.feat_size: Tuple[int, int] = feat_size self.target_shift_size: Tuple[int, int] = to_2tuple(shift_size) self.always_partition = always_partition self.dynamic_mask = dynamic_mask self.window_size, self.shift_size = self._calc_window_shift(window_size) self.window_area = self.window_size[0] * self.window_size[1] self.init_values: Optional[float] = init_values # attn branch self.attn = WindowMultiHeadAttention( dim=dim, num_heads=num_heads, window_size=self.window_size, drop_attn=drop_attn, drop_proj=proj_drop, sequential_attn=sequential_attn, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() # mlp branch self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), drop=proj_drop, out_features=dim, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() # Extra main branch norm layer mentioned for Huge/Giant models in V2 paper. # Also being used as final network norm and optional stage ending norm while still in a C-last format. self.norm3 = norm_layer(dim) if extra_norm else nn.Identity() self.register_buffer( "attn_mask", None if self.dynamic_mask else self.get_attn_mask(), persistent=False, ) self.init_weights() def _calc_window_shift( self, target_window_size: Tuple[int, int], ) -> Tuple[Tuple[int, int], Tuple[int, int]]: target_window_size = to_2tuple(target_window_size) target_shift_size = self.target_shift_size if any(target_shift_size): # if non-zero, recalculate shift from current window size in case window size has changed target_shift_size = (target_window_size[0] // 2, target_window_size[1] // 2) if self.always_partition: return target_window_size, target_shift_size window_size = [f if f <= w else w for f, w in zip(self.feat_size, target_window_size)] shift_size = [0 if f <= w else s for f, w, s in zip(self.feat_size, window_size, target_shift_size)] return tuple(window_size), tuple(shift_size) def get_attn_mask(self, x: Optional[torch.Tensor] = None) -> Optional[torch.Tensor]: """Method generates the attention mask used in shift case.""" # Make masks for shift case if any(self.shift_size): # calculate attention mask for SW-MSA if x is None: img_mask = torch.zeros((1, *self.feat_size, 1)) # 1 H W 1 else: img_mask = torch.zeros((1, x.shape[1], x.shape[2], 1), dtype=x.dtype, device=x.device) # 1 H W 1 cnt = 0 for h in ( (0, -self.window_size[0]), (-self.window_size[0], -self.shift_size[0]), (-self.shift_size[0], None), ): for w in ( (0, -self.window_size[1]), (-self.window_size[1], -self.shift_size[1]), (-self.shift_size[1], None), ): img_mask[:, h[0]:h[1], w[0]:w[1], :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_area) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def init_weights(self): # extra, module specific weight init if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def set_input_size(self, feat_size: Tuple[int, int], window_size: Tuple[int, int]) -> None: """Method updates the image resolution to be processed and window size and so the pair-wise relative positions. Args: feat_size (Tuple[int, int]): New input resolution window_size (int): New window size """ # Update input resolution self.feat_size: Tuple[int, int] = feat_size self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size)) self.window_area = self.window_size[0] * self.window_size[1] self.attn.set_window_size(self.window_size) self.register_buffer( "attn_mask", None if self.dynamic_mask else self.get_attn_mask(), persistent=False, ) def _shifted_window_attn(self, x): B, H, W, C = x.shape # cyclic shift sh, sw = self.shift_size do_shift: bool = any(self.shift_size) if do_shift: # FIXME PyTorch XLA needs cat impl, roll not lowered # x = torch.cat([x[:, sh:], x[:, :sh]], dim=1) # x = torch.cat([x[:, :, sw:], x[:, :, :sw]], dim=2) x = torch.roll(x, shifts=(-sh, -sw), dims=(1, 2)) pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] x = torch.nn.functional.pad(x, (0, 0, 0, pad_w, 0, pad_h)) _, Hp, Wp, _ = x.shape # partition windows x_windows = window_partition(x, self.window_size) # num_windows * B, window_size, window_size, C x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C) # W-MSA/SW-MSA if getattr(self, 'dynamic_mask', False): attn_mask = self.get_attn_mask(x) else: attn_mask = self.attn_mask attn_windows = self.attn(x_windows, mask=attn_mask) # num_windows * B, window_size * window_size, C # merge windows attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) x = window_reverse(attn_windows, self.window_size, (Hp, Wp)) # B H' W' C x = x[:, :H, :W, :].contiguous() # reverse cyclic shift if do_shift: # FIXME PyTorch XLA needs cat impl, roll not lowered # x = torch.cat([x[:, -sh:], x[:, :-sh]], dim=1) # x = torch.cat([x[:, :, -sw:], x[:, :, :-sw]], dim=2) x = torch.roll(x, shifts=(sh, sw), dims=(1, 2)) return x def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass of Swin Transformer V2 block. Args: x: Input tensor of shape [B, C, H, W]. Returns: Output tensor of shape [B, C, H, W]. """ # post-norm branches (op -> norm -> drop) x = x + self.drop_path1(self.norm1(self._shifted_window_attn(x))) B, H, W, C = x.shape x = x.reshape(B, -1, C) x = x + self.drop_path2(self.norm2(self.mlp(x))) x = self.norm3(x) # main-branch norm enabled for some blocks / stages (every 6 for Huge/Giant) x = x.reshape(B, H, W, C) return x class PatchMerging(nn.Module): """Patch merging layer. This class implements the patch merging as a strided convolution with a normalization before. """ def __init__(self, dim: int, norm_layer: Type[nn.Module] = nn.LayerNorm) -> None: """Initialize patch merging layer. Args: dim: Number of input channels. norm_layer: Type of normalization layer to be utilized. """ super(PatchMerging, self).__init__() self.norm = norm_layer(4 * dim) self.reduction = nn.Linear(in_features=4 * dim, out_features=2 * dim, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass of patch merging. Args: x: Input tensor of shape [B, C, H, W]. Returns: Output tensor of shape [B, 2 * C, H // 2, W // 2]. """ B, H, W, C = x.shape pad_values = (0, 0, 0, W % 2, 0, H % 2) x = nn.functional.pad(x, pad_values) _, H, W, _ = x.shape x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) x = self.norm(x) x = self.reduction(x) return x class PatchEmbed(nn.Module): """2D Image to Patch Embedding.""" def __init__( self, img_size: Union[int, Tuple[int, int]] = 224, patch_size: Union[int, Tuple[int, int]] = 16, in_chans: int = 3, embed_dim: int = 768, norm_layer: Optional[Callable] = None, strict_img_size: bool = True, ) -> None: """Initialize patch embedding. Args: img_size: Input image size. patch_size: Patch size. in_chans: Number of input channels. embed_dim: Embedding dimension. norm_layer: Normalization layer. strict_img_size: Enforce strict image size. """ super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.strict_img_size = strict_img_size self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def set_input_size(self, img_size: Tuple[int, int]) -> None: """Update input image size. Args: img_size: New image size. """ img_size = to_2tuple(img_size) if img_size != self.img_size: self.img_size = img_size self.grid_size = (img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass of patch embedding. Args: x: Input tensor of shape [B, C, H, W]. Returns: Output tensor of shape [B, C', H', W']. """ B, C, H, W = x.shape if self.strict_img_size: _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") x = self.proj(x) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x class SwinTransformerV2CrStage(nn.Module): r"""This class implements a stage of the Swin transformer including multiple layers. Args: embed_dim (int): Number of input channels depth (int): Depth of the stage (number of layers) downscale (bool): If true input is downsampled (see Fig. 3 or V1 paper) feat_size (Tuple[int, int]): input feature map size (H, W) num_heads (int): Number of attention heads to be utilized window_size (int): Window size to be utilized mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels proj_drop (float): Dropout in input mapping drop_attn (float): Dropout rate of attention map drop_path (float): Dropout in main path norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. Default: nn.LayerNorm extra_norm_period (int): Insert extra norm layer on main branch every N (period) blocks extra_norm_stage (bool): End each stage with an extra norm layer in main branch sequential_attn (bool): If true sequential self-attention is performed """ def __init__( self, embed_dim: int, depth: int, downscale: bool, num_heads: int, feat_size: Tuple[int, int], window_size: Tuple[int, int], always_partition: bool = False, dynamic_mask: bool = False, mlp_ratio: float = 4.0, init_values: Optional[float] = 0.0, proj_drop: float = 0.0, drop_attn: float = 0.0, drop_path: Union[List[float], float] = 0.0, norm_layer: Type[nn.Module] = nn.LayerNorm, extra_norm_period: int = 0, extra_norm_stage: bool = False, sequential_attn: bool = False, ): super(SwinTransformerV2CrStage, self).__init__() self.downscale: bool = downscale self.grad_checkpointing: bool = False self.feat_size: Tuple[int, int] = (feat_size[0] // 2, feat_size[1] // 2) if downscale else feat_size if downscale: self.downsample = PatchMerging(embed_dim, norm_layer=norm_layer) embed_dim = embed_dim * 2 else: self.downsample = nn.Identity() def _extra_norm(index): i = index + 1 if extra_norm_period and i % extra_norm_period == 0: return True return i == depth if extra_norm_stage else False self.blocks = nn.Sequential(*[ SwinTransformerV2CrBlock( dim=embed_dim, num_heads=num_heads, feat_size=self.feat_size, window_size=window_size, always_partition=always_partition, dynamic_mask=dynamic_mask, shift_size=tuple([0 if ((index % 2) == 0) else w // 2 for w in window_size]), mlp_ratio=mlp_ratio, init_values=init_values, proj_drop=proj_drop, drop_attn=drop_attn, drop_path=drop_path[index] if isinstance(drop_path, list) else drop_path, extra_norm=_extra_norm(index), sequential_attn=sequential_attn, norm_layer=norm_layer, ) for index in range(depth)] ) def set_input_size( self, feat_size: Tuple[int, int], window_size: int, always_partition: Optional[bool] = None, ): """ Updates the resolution to utilize and the window size and so the pair-wise relative positions. Args: window_size (int): New window size feat_size (Tuple[int, int]): New input resolution """ self.feat_size = (feat_size[0] // 2, feat_size[1] // 2) if self.downscale else feat_size for block in self.blocks: block.set_input_size( feat_size=self.feat_size, window_size=window_size, ) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x (torch.Tensor): Input tensor of the shape [B, C, H, W] or [B, L, C] Returns: output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2] """ x = bchw_to_bhwc(x) x = self.downsample(x) for block in self.blocks: # Perform checkpointing if utilized if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(block, x) else: x = block(x) x = bhwc_to_bchw(x) return x class SwinTransformerV2Cr(nn.Module): r""" Swin Transformer V2 A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - https://arxiv.org/pdf/2111.09883 Args: img_size: Input resolution. window_size: Window size. If None, grid_size // window_div window_ratio: Window size to patch grid ratio. patch_size: Patch size. in_chans: Number of input channels. depths: Depth of the stage (number of layers). num_heads: Number of attention heads to be utilized. embed_dim: Patch embedding dimension. num_classes: Number of output classes. mlp_ratio: Ratio of the hidden dimension in the FFN to the input channels. drop_rate: Dropout rate. proj_drop_rate: Projection dropout rate. attn_drop_rate: Dropout rate of attention map. drop_path_rate: Stochastic depth rate. norm_layer: Type of normalization layer to be utilized. extra_norm_period: Insert extra norm layer on main branch every N (period) blocks in stage extra_norm_stage: End each stage with an extra norm layer in main branch sequential_attn: If true sequential self-attention is performed. """ def __init__( self, img_size: Tuple[int, int] = (224, 224), patch_size: int = 4, window_size: Optional[int] = None, window_ratio: int = 8, always_partition: bool = False, strict_img_size: bool = True, in_chans: int = 3, num_classes: int = 1000, embed_dim: int = 96, depths: Tuple[int, ...] = (2, 2, 6, 2), num_heads: Tuple[int, ...] = (3, 6, 12, 24), mlp_ratio: float = 4.0, init_values: Optional[float] = 0., drop_rate: float = 0.0, proj_drop_rate: float = 0.0, attn_drop_rate: float = 0.0, drop_path_rate: float = 0.0, norm_layer: Type[nn.Module] = nn.LayerNorm, extra_norm_period: int = 0, extra_norm_stage: bool = False, sequential_attn: bool = False, global_pool: str = 'avg', weight_init='skip', **kwargs: Any ) -> None: super(SwinTransformerV2Cr, self).__init__() img_size = to_2tuple(img_size) self.num_classes: int = num_classes self.patch_size: int = patch_size self.img_size: Tuple[int, int] = img_size self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.feature_info = [] self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer, strict_img_size=strict_img_size, ) grid_size = self.patch_embed.grid_size if window_size is None: self.window_size = tuple([s // window_ratio for s in grid_size]) else: self.window_size = to_2tuple(window_size) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] in_dim = embed_dim in_scale = 1 for stage_idx, (depth, num_heads) in enumerate(zip(depths, num_heads)): stages += [SwinTransformerV2CrStage( embed_dim=in_dim, depth=depth, downscale=stage_idx != 0, feat_size=(grid_size[0] // in_scale, grid_size[1] // in_scale), num_heads=num_heads, window_size=self.window_size, always_partition=always_partition, dynamic_mask=not strict_img_size, mlp_ratio=mlp_ratio, init_values=init_values, proj_drop=proj_drop_rate, drop_attn=attn_drop_rate, drop_path=dpr[stage_idx], extra_norm_period=extra_norm_period, extra_norm_stage=extra_norm_stage or (stage_idx + 1) == len(depths), # last stage ends w/ norm sequential_attn=sequential_attn, norm_layer=norm_layer, )] if stage_idx != 0: in_dim *= 2 in_scale *= 2 self.feature_info += [dict(num_chs=in_dim, reduction=4 * in_scale, module=f'stages.{stage_idx}')] self.stages = nn.Sequential(*stages) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, ) # current weight init skips custom init and uses pytorch layer defaults, seems to work well # FIXME more experiments needed if weight_init != 'skip': named_apply(init_weights, self) def set_input_size( self, img_size: Optional[Tuple[int, int]] = None, window_size: Optional[Tuple[int, int]] = None, window_ratio: int = 8, always_partition: Optional[bool] = None, ) -> None: """Updates the image resolution, window size and so the pair-wise relative positions. Args: img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used window_size (Optional[int]): New window size, if None based on new_img_size // window_div window_ratio (int): divisor for calculating window size from patch grid size always_partition: always partition / shift windows even if feat size is < window """ if img_size is not None: self.patch_embed.set_input_size(img_size=img_size) grid_size = self.patch_embed.grid_size if window_size is None and window_ratio is not None: window_size = tuple([s // window_ratio for s in grid_size]) for index, stage in enumerate(self.stages): stage_scale = 2 ** max(index - 1, 0) stage.set_input_size( feat_size=(grid_size[0] // stage_scale, grid_size[1] // stage_scale), window_size=window_size, always_partition=always_partition, ) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^patch_embed', # stem and embed blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+).downsample', (0,)), (r'^stages\.(\d+)\.\w+\.(\d+)', None), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore() def get_classifier(self) -> nn.Module: """Method returns the classification head of the model. Returns: head (nn.Module): Current classification head """ return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: """Method results the classification head Args: num_classes (int): Number of classes to be predicted global_pool (str): Unused """ self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.patch_embed(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for i, stage in enumerate(stages): x = stage(x) if i in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def init_weights(module: nn.Module, name: str = ''): # FIXME WIP determining if there's a better weight init if isinstance(module, nn.Linear): if 'qkv' in name: # treat the weights of Q, K, V separately val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) nn.init.uniform_(module.weight, -val, val) elif 'head' in name: nn.init.zeros_(module.weight) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) if 'head.fc.weight' in state_dict: return state_dict out_dict = {} for k, v in state_dict.items(): if 'tau' in k: # convert old tau based checkpoints -> logit_scale (inverse) v = torch.log(1 / v) k = k.replace('tau', 'logit_scale') k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_swin_transformer_v2_cr(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( SwinTransformerV2Cr, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs ) return model def _cfg(url: str = '', **kwargs) -> Dict[str, Any]: """Create a default configuration dictionary. Args: url: Model weights URL. **kwargs: Additional configuration parameters. Returns: Configuration dictionary. """ return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs, } default_cfgs = generate_default_cfgs({ 'swinv2_cr_tiny_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_tiny_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_tiny_ns_224.sw_in1k': _cfg( hf_hub_id='timm/', url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_tiny_ns_224-ba8166c6.pth", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_small_224.sw_in1k': _cfg( hf_hub_id='timm/', url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_224-0813c165.pth", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_ns_224.sw_in1k': _cfg( hf_hub_id='timm/', url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_ns_224_iv-2ce90f8e.pth", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_ns_256.untrained': _cfg( url="", input_size=(3, 256, 256), crop_pct=1.0, pool_size=(8, 8)), 'swinv2_cr_base_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_base_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_base_ns_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_large_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_large_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_huge_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_huge_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_giant_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_giant_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), }) @register_model def swinv2_cr_tiny_384(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-T V2 CR @ 384x384, trained ImageNet-1k.""" model_args = dict( embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_tiny_224(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-T V2 CR @ 224x224, trained ImageNet-1k.""" model_args = dict( embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_tiny_ns_224(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-T V2 CR @ 224x224, trained ImageNet-1k w/ extra stage norms. ** Experimental, may make default if results are improved. ** """ model_args = dict( embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_384(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 384x384, trained ImageNet-1k.""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_small_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_224(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 224x224, trained ImageNet-1k.""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_ns_224(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 224x224, trained ImageNet-1k.""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_ns_256(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 256x256, trained ImageNet-1k.""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_384(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-B V2 CR @ 384x384, trained ImageNet-1k.""" model_args = dict( embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), ) return _create_swin_transformer_v2_cr('swinv2_cr_base_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_224(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-B V2 CR @ 224x224, trained ImageNet-1k.""" model_args = dict( embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), ) return _create_swin_transformer_v2_cr('swinv2_cr_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_ns_224(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-B V2 CR @ 224x224, trained ImageNet-1k.""" model_args = dict( embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_base_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_large_384(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-L V2 CR @ 384x384, trained ImageNet-1k.""" model_args = dict( embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), ) return _create_swin_transformer_v2_cr('swinv2_cr_large_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_large_224(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-L V2 CR @ 224x224, trained ImageNet-1k.""" model_args = dict( embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), ) return _create_swin_transformer_v2_cr('swinv2_cr_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_huge_384(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-H V2 CR @ 384x384, trained ImageNet-1k.""" model_args = dict( embed_dim=352, depths=(2, 2, 18, 2), num_heads=(11, 22, 44, 88), # head count not certain for Huge, 384 & 224 trying diff values extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_huge_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_huge_224(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-H V2 CR @ 224x224, trained ImageNet-1k.""" model_args = dict( embed_dim=352, depths=(2, 2, 18, 2), num_heads=(8, 16, 32, 64), # head count not certain for Huge, 384 & 224 trying diff values extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_huge_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_giant_384(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-G V2 CR @ 384x384, trained ImageNet-1k.""" model_args = dict( embed_dim=512, depths=(2, 2, 42, 2), num_heads=(16, 32, 64, 128), extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_giant_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_giant_224(pretrained: bool = False, **kwargs) -> SwinTransformerV2Cr: """Swin-G V2 CR @ 224x224, trained ImageNet-1k.""" model_args = dict( embed_dim=512, depths=(2, 2, 42, 2), num_heads=(16, 32, 64, 128), extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_giant_224', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/swin_transformer_v2_cr.py/0
{ "file_path": "pytorch-image-models/timm/models/swin_transformer_v2_cr.py", "repo_id": "pytorch-image-models", "token_count": 21802 }
265
""" Cross-Covariance Image Transformer (XCiT) in PyTorch Paper: - https://arxiv.org/abs/2106.09681 Same as the official implementation, with some minor adaptations, original copyright below - https://github.com/facebookresearch/xcit/blob/master/xcit.py Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. import math from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, to_2tuple, use_fused_attn, Mlp from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_module from ._manipulate import checkpoint from ._registry import register_model, generate_default_cfgs, register_model_deprecations from .cait import ClassAttn __all__ = ['Xcit'] # model_registry will add each entrypoint fn to this @register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method class PositionalEncodingFourier(nn.Module): """ Positional encoding relying on a fourier kernel matching the one used in the "Attention is all you Need" paper. Based on the official XCiT code - https://github.com/facebookresearch/xcit/blob/master/xcit.py """ def __init__(self, hidden_dim=32, dim=768, temperature=10000): super().__init__() self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) self.scale = 2 * math.pi self.temperature = temperature self.hidden_dim = hidden_dim self.dim = dim self.eps = 1e-6 def forward(self, B: int, H: int, W: int): device = self.token_projection.weight.device dtype = self.token_projection.weight.dtype y_embed = torch.arange(1, H + 1, device=device).to(torch.float32).unsqueeze(1).repeat(1, 1, W) x_embed = torch.arange(1, W + 1, device=device).to(torch.float32).repeat(1, H, 1) y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale dim_t = torch.arange(self.hidden_dim, device=device).to(torch.float32) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3) pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) pos = self.token_projection(pos.to(dtype)) return pos.repeat(B, 1, 1, 1) # (B, C, H, W) def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution + batch norm""" return torch.nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(out_planes) ) class ConvPatchEmbed(nn.Module): """Image to Patch Embedding using multiple convolutional layers""" def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU): super().__init__() img_size = to_2tuple(img_size) num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches if patch_size == 16: self.proj = torch.nn.Sequential( conv3x3(in_chans, embed_dim // 8, 2), act_layer(), conv3x3(embed_dim // 8, embed_dim // 4, 2), act_layer(), conv3x3(embed_dim // 4, embed_dim // 2, 2), act_layer(), conv3x3(embed_dim // 2, embed_dim, 2), ) elif patch_size == 8: self.proj = torch.nn.Sequential( conv3x3(in_chans, embed_dim // 4, 2), act_layer(), conv3x3(embed_dim // 4, embed_dim // 2, 2), act_layer(), conv3x3(embed_dim // 2, embed_dim, 2), ) else: raise('For convolutional projection, patch size has to be in [8, 16]') def forward(self, x): x = self.proj(x) Hp, Wp = x.shape[2], x.shape[3] x = x.flatten(2).transpose(1, 2) # (B, N, C) return x, (Hp, Wp) class LPI(nn.Module): """ Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable 3x3 convolutions with GeLU and BatchNorm2d """ def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3): super().__init__() out_features = out_features or in_features padding = kernel_size // 2 self.conv1 = torch.nn.Conv2d( in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features) self.act = act_layer() self.bn = nn.BatchNorm2d(in_features) self.conv2 = torch.nn.Conv2d( in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features) def forward(self, x, H: int, W: int): B, N, C = x.shape x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.conv1(x) x = self.act(x) x = self.bn(x) x = self.conv2(x) x = x.reshape(B, C, N).permute(0, 2, 1) return x class ClassAttentionBlock(nn.Module): """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239""" def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = ClassAttn( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) if eta is not None: # LayerScale Initialization (no layerscale when None) self.gamma1 = nn.Parameter(eta * torch.ones(dim)) self.gamma2 = nn.Parameter(eta * torch.ones(dim)) else: self.gamma1, self.gamma2 = 1.0, 1.0 # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 self.tokens_norm = tokens_norm def forward(self, x): x_norm1 = self.norm1(x) x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) x = x + self.drop_path(self.gamma1 * x_attn) if self.tokens_norm: x = self.norm2(x) else: x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) x_res = x cls_token = x[:, 0:1] cls_token = self.gamma2 * self.mlp(cls_token) x = torch.cat([cls_token, x[:, 1:]], dim=1) x = x_res + self.drop_path(x) return x class XCA(nn.Module): fused_attn: torch.jit.Final[bool] """ Cross-Covariance Attention (XCA) Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h) """ def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads self.fused_attn = use_fused_attn(experimental=True) self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape # Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N) qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) if self.fused_attn: q = torch.nn.functional.normalize(q, dim=-1) * self.temperature k = torch.nn.functional.normalize(k, dim=-1) x = torch.nn.functional.scaled_dot_product_attention(q, k, v, scale=1.0) else: # Paper section 3.2 l2-Normalization and temperature scaling q = torch.nn.functional.normalize(q, dim=-1) k = torch.nn.functional.normalize(k, dim=-1) attn = (q @ k.transpose(-2, -1)) * self.temperature attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.permute(0, 3, 1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @torch.jit.ignore def no_weight_decay(self): return {'temperature'} class XCABlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., ): super().__init__() self.norm1 = norm_layer(dim) self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm3 = norm_layer(dim) self.local_mp = LPI(in_features=dim, act_layer=act_layer) self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.gamma1 = nn.Parameter(eta * torch.ones(dim)) self.gamma3 = nn.Parameter(eta * torch.ones(dim)) self.gamma2 = nn.Parameter(eta * torch.ones(dim)) def forward(self, x, H: int, W: int): x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) # NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W)) x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) return x class Xcit(nn.Module): """ Based on timm and DeiT code bases https://github.com/rwightman/pytorch-image-models/tree/master/timm https://github.com/facebookresearch/deit/ """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False, ): """ Args: img_size (int, tuple): input image size patch_size (int): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP pos_drop_rate: position embedding dropout rate proj_drop_rate (float): projection dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate (constant across all layers) norm_layer: (nn.Module): normalization layer cls_attn_layers: (int) Depth of Class attention layers use_pos_embed: (bool) whether to use positional encoding eta: (float) layerscale initialization value tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA Notes: - Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch interaction (class LPI) and the patch embedding (class ConvPatchEmbed) """ super().__init__() assert global_pool in ('', 'avg', 'token') img_size = to_2tuple(img_size) assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \ '`patch_size` should divide image dimensions evenly' norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.num_features = self.head_hidden_size = self.embed_dim = embed_dim self.global_pool = global_pool self.grad_checkpointing = False self.patch_embed = ConvPatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer, ) r = patch_size self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_pos_embed: self.pos_embed = PositionalEncodingFourier(dim=embed_dim) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=pos_drop_rate) self.blocks = nn.ModuleList([ XCABlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, ) for _ in range(depth)]) self.feature_info = [dict(num_chs=embed_dim, reduction=r, module=f'blocks.{i}') for i in range(depth)] self.cls_attn_blocks = nn.ModuleList([ ClassAttentionBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=drop_rate, attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm, ) for _ in range(cls_attn_layers)]) # Classifier head self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() # Init weights trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=r'^blocks\.(\d+)', cls_attn_blocks=[(r'^cls_attn_blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'token') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] take_indices, max_index = feature_take_indices(len(self.blocks), indices) # forward pass B, _, height, width = x.shape x, (Hp, Wp) = self.patch_embed(x) if self.pos_embed is not None: # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = self.pos_drop(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for i, blk in enumerate(blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, Hp, Wp) else: x = blk(x, Hp, Wp) if i in take_indices: # normalize intermediates with final norm layer if enabled intermediates.append(self.norm(x) if norm else x) # process intermediates if reshape: # reshape to BCHW output format intermediates = [y.reshape(B, Hp, Wp, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if intermediates_only: return intermediates # NOTE not supporting return of class tokens x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1) for blk in self.cls_attn_blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x) else: x = blk(x) x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_norm: self.norm = nn.Identity() if prune_head: self.cls_attn_blocks = nn.ModuleList() # prune token blocks with head self.reset_classifier(0, '') return take_indices def forward_features(self, x): B = x.shape[0] # x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches) x, (Hp, Wp) = self.patch_embed(x) if self.pos_embed is not None: # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = self.pos_drop(x) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, Hp, Wp) else: x = blk(x, Hp, Wp) x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1) for blk in self.cls_attn_blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x) else: x = blk(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'model' in state_dict: state_dict = state_dict['model'] # For consistency with timm's transformer models while being compatible with official weights source we rename # pos_embeder to pos_embed. Also account for use_pos_embed == False use_pos_embed = getattr(model, 'pos_embed', None) is not None pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')] for k in pos_embed_keys: if use_pos_embed: state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k) else: del state_dict[k] # timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors # for all tokens, just the class token. To use official weights source we must split qkv into q, k, v if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict(): num_ca_blocks = len(model.cls_attn_blocks) for i in range(num_ca_blocks): qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight') qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1]) for j, subscript in enumerate('qkv'): state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j] qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None) if qkv_bias is not None: qkv_bias = qkv_bias.reshape(3, -1) for j, subscript in enumerate('qkv'): state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j] return state_dict def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs): out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg( Xcit, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # Patch size 16 'xcit_nano_12_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), 'xcit_nano_12_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'), 'xcit_nano_12_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_12_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'), 'xcit_tiny_12_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'), 'xcit_tiny_12_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'), 'xcit_tiny_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'), 'xcit_tiny_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_12_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'), 'xcit_small_12_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'), 'xcit_small_12_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'), 'xcit_small_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'), 'xcit_small_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_medium_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'), 'xcit_medium_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'), 'xcit_medium_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_large_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'), 'xcit_large_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'), 'xcit_large_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)), # Patch size 8 'xcit_nano_12_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), 'xcit_nano_12_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'), 'xcit_nano_12_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_12_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'), 'xcit_tiny_12_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'), 'xcit_tiny_12_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'), 'xcit_tiny_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'), 'xcit_tiny_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_12_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'), 'xcit_small_12_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'), 'xcit_small_12_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'), 'xcit_small_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'), 'xcit_small_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_medium_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'), 'xcit_medium_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'), 'xcit_medium_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_large_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'), 'xcit_large_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'), 'xcit_large_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)), }) @register_model def xcit_nano_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_nano_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384) model = _create_xcit('xcit_nano_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model # Patch size 8x8 models @register_model def xcit_nano_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_nano_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { # Patch size 16 'xcit_nano_12_p16_224_dist': 'xcit_nano_12_p16_224.fb_dist_in1k', 'xcit_nano_12_p16_384_dist': 'xcit_nano_12_p16_384.fb_dist_in1k', 'xcit_tiny_12_p16_224_dist': 'xcit_tiny_12_p16_224.fb_dist_in1k', 'xcit_tiny_12_p16_384_dist': 'xcit_tiny_12_p16_384.fb_dist_in1k', 'xcit_tiny_24_p16_224_dist': 'xcit_tiny_24_p16_224.fb_dist_in1k', 'xcit_tiny_24_p16_384_dist': 'xcit_tiny_24_p16_384.fb_dist_in1k', 'xcit_small_12_p16_224_dist': 'xcit_small_12_p16_224.fb_dist_in1k', 'xcit_small_12_p16_384_dist': 'xcit_small_12_p16_384.fb_dist_in1k', 'xcit_small_24_p16_224_dist': 'xcit_small_24_p16_224.fb_dist_in1k', 'xcit_small_24_p16_384_dist': 'xcit_small_24_p16_384.fb_dist_in1k', 'xcit_medium_24_p16_224_dist': 'xcit_medium_24_p16_224.fb_dist_in1k', 'xcit_medium_24_p16_384_dist': 'xcit_medium_24_p16_384.fb_dist_in1k', 'xcit_large_24_p16_224_dist': 'xcit_large_24_p16_224.fb_dist_in1k', 'xcit_large_24_p16_384_dist': 'xcit_large_24_p16_384.fb_dist_in1k', # Patch size 8 'xcit_nano_12_p8_224_dist': 'xcit_nano_12_p8_224.fb_dist_in1k', 'xcit_nano_12_p8_384_dist': 'xcit_nano_12_p8_384.fb_dist_in1k', 'xcit_tiny_12_p8_224_dist': 'xcit_tiny_12_p8_224.fb_dist_in1k', 'xcit_tiny_12_p8_384_dist': 'xcit_tiny_12_p8_384.fb_dist_in1k', 'xcit_tiny_24_p8_224_dist': 'xcit_tiny_24_p8_224.fb_dist_in1k', 'xcit_tiny_24_p8_384_dist': 'xcit_tiny_24_p8_384.fb_dist_in1k', 'xcit_small_12_p8_224_dist': 'xcit_small_12_p8_224.fb_dist_in1k', 'xcit_small_12_p8_384_dist': 'xcit_small_12_p8_384.fb_dist_in1k', 'xcit_small_24_p8_224_dist': 'xcit_small_24_p8_224.fb_dist_in1k', 'xcit_small_24_p8_384_dist': 'xcit_small_24_p8_384.fb_dist_in1k', 'xcit_medium_24_p8_224_dist': 'xcit_medium_24_p8_224.fb_dist_in1k', 'xcit_medium_24_p8_384_dist': 'xcit_medium_24_p8_384.fb_dist_in1k', 'xcit_large_24_p8_224_dist': 'xcit_large_24_p8_224.fb_dist_in1k', 'xcit_large_24_p8_384_dist': 'xcit_large_24_p8_384.fb_dist_in1k', })
pytorch-image-models/timm/models/xcit.py/0
{ "file_path": "pytorch-image-models/timm/models/xcit.py", "repo_id": "pytorch-image-models", "token_count": 20592 }
266
""" PyTorch LARS / LARC Optimizer An implementation of LARS (SGD) + LARC in PyTorch Based on: * PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 * NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py Additional cleanup and modifications to properly support PyTorch XLA. Copyright 2021 Ross Wightman """ import torch from torch.optim.optimizer import Optimizer class Lars(Optimizer): """ LARS for PyTorch Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate (default: 1.0). momentum (float, optional): momentum factor (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) dampening (float, optional): dampening for momentum (default: 0) nesterov (bool, optional): enables Nesterov momentum (default: False) trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001) eps (float): eps for division denominator (default: 1e-8) trust_clip (bool): enable LARC trust ratio clipping (default: False) always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False) """ def __init__( self, params, lr=1.0, momentum=0, dampening=0, weight_decay=0, nesterov=False, trust_coeff=0.001, eps=1e-8, trust_clip=False, always_adapt=False, ): if lr < 0.0: raise ValueError(f"Invalid learning rate: {lr}") if momentum < 0.0: raise ValueError(f"Invalid momentum value: {momentum}") if weight_decay < 0.0: raise ValueError(f"Invalid weight_decay value: {weight_decay}") if nesterov and (momentum <= 0 or dampening != 0): raise ValueError("Nesterov momentum requires a momentum and zero dampening") defaults = dict( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, trust_coeff=trust_coeff, eps=eps, trust_clip=trust_clip, always_adapt=always_adapt, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] trust_coeff = group['trust_coeff'] eps = group['eps'] for p in group['params']: if p.grad is None: continue grad = p.grad # apply LARS LR adaptation, LARC clipping, weight decay # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py if weight_decay != 0 or group['always_adapt']: w_norm = p.norm(2.0) g_norm = grad.norm(2.0) trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) # FIXME nested where required since logical and/or not working in PT XLA # Set the ratio to 1.0 (no change) if either weight norm or grad norm is zero trust_ratio = torch.where( w_norm > 0, torch.where(g_norm > 0, trust_ratio, 1.0), 1.0, ) if group['trust_clip']: trust_ratio = torch.clamp(trust_ratio / group['lr'], max=1.0) grad.add_(p, alpha=weight_decay) grad.mul_(trust_ratio) # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone(grad).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(grad, alpha=1. - dampening) if nesterov: grad = grad.add(buf, alpha=momentum) else: grad = buf p.add_(grad, alpha=-group['lr']) return loss
pytorch-image-models/timm/optim/lars.py/0
{ "file_path": "pytorch-image-models/timm/optim/lars.py", "repo_id": "pytorch-image-models", "token_count": 2549 }
267
""" MultiStep LR Scheduler Basic multi step LR schedule with warmup, noise. """ import torch import bisect from timm.scheduler.scheduler import Scheduler from typing import List class MultiStepLRScheduler(Scheduler): """ """ def __init__( self, optimizer: torch.optim.Optimizer, decay_t: List[int], decay_rate: float = 1., warmup_t=0, warmup_lr_init=0, warmup_prefix=True, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) self.decay_t = decay_t self.decay_rate = decay_rate self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def get_curr_decay_steps(self, t): # find where in the array t goes, # assumes self.decay_t is sorted return bisect.bisect_right(self.decay_t, t + 1) def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values] return lrs
pytorch-image-models/timm/scheduler/multistep_lr.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/multistep_lr.py", "repo_id": "pytorch-image-models", "token_count": 1036 }
268
""" Logging helpers Hacked together by / Copyright 2020 Ross Wightman """ import logging import logging.handlers class FormatterNoInfo(logging.Formatter): def __init__(self, fmt='%(levelname)s: %(message)s'): logging.Formatter.__init__(self, fmt) def format(self, record): if record.levelno == logging.INFO: return str(record.getMessage()) return logging.Formatter.format(self, record) def setup_default_logging(default_level=logging.INFO, log_path=''): console_handler = logging.StreamHandler() console_handler.setFormatter(FormatterNoInfo()) logging.root.addHandler(console_handler) logging.root.setLevel(default_level) if log_path: file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") file_handler.setFormatter(file_formatter) logging.root.addHandler(file_handler)
pytorch-image-models/timm/utils/log.py/0
{ "file_path": "pytorch-image-models/timm/utils/log.py", "repo_id": "pytorch-image-models", "token_count": 383 }
269
# Human-in-the-Loop: Customize Agent Plan Interactively This page demonstrates advanced usage of the smolagents library, with a special focus on **Human-in-the-Loop (HITL)** approaches for interactive plan creation, user-driven plan modification, and memory preservation in agentic workflows. The example is based on the code in `examples/plan_customization/plan_customization.py`. ## Overview This example teaches you how to implement Human-in-the-Loop strategies to: - Interrupt agent execution after a plan is created (using step callbacks) - Allow users to review and modify the agent's plan before execution (Human-in-the-Loop) - Resume execution while preserving the agent's memory - Dynamically update plans based on user feedback, keeping the human in control ## Key Concepts ### Step Callbacks for Plan Interruption The agent is configured to pause after creating a plan. This is achieved by registering a step callback for the `PlanningStep`: ```python agent = CodeAgent( model=InferenceClientModel(), tools=[DuckDuckGoSearchTool()], planning_interval=5, # Plan every 5 steps step_callbacks={PlanningStep: interrupt_after_plan}, max_steps=10, verbosity_level=1 ) ``` ### Human-in-the-Loop: Interactive Plan Review and Modification When the agent creates a plan, the callback displays it and prompts the human user to: 1. Approve the plan 2. Modify the plan 3. Cancel execution Example interaction: ``` ============================================================ 🤖 AGENT PLAN CREATED ============================================================ 1. Search for recent AI developments 2. Analyze the top results 3. Summarize the 3 most significant breakthroughs 4. Include sources for each breakthrough ============================================================ Choose an option: 1. Approve plan 2. Modify plan 3. Cancel Your choice (1-3): ``` This Human-in-the-Loop step enables a human to intervene and review or modify the plan before execution continues, and ensures that the agent's actions align with human intent. If the user chooses to modify, they can edit the plan directly. The updated plan is then used for subsequent execution steps. ### Memory Preservation and Resuming Execution By running the agent with `reset=False`, all previous steps and memory are preserved. This allows you to resume execution after an interruption or plan modification: ```python # First run (may be interrupted) agent.run(task, reset=True) # Resume with preserved memory agent.run(task, reset=False) ``` ### Inspecting Agent Memory You can inspect the agent's memory to see all steps taken so far: ```python print(f"Current memory contains {len(agent.memory.steps)} steps:") for i, step in enumerate(agent.memory.steps): step_type = type(step).__name__ print(f" {i+1}. {step_type}") ``` ## Example Human-in-the-Loop Workflow 1. Agent starts with a complex task 2. Planning step is created and execution pauses for human review 3. Human reviews and optionally modifies the plan (Human-in-the-Loop) 4. Execution resumes with the approved/modified plan 5. All steps are preserved for future runs, maintaining transparency and control ## Error Handling The example includes error handling for: - User cancellation - Plan modification errors - Resume execution failures ## Requirements - smolagents library - DuckDuckGoSearchTool (included with smolagents) - InferenceClientModel (requires HuggingFace API token) ## Educational Value This example demonstrates: - Step callback implementation for custom agent behavior - Memory management in multi-step agents - User interaction patterns in agentic systems - Plan modification techniques for dynamic agent control - Error handling in interactive agent systems --- For the full code, see [`examples/plan_customization`](https://github.com/huggingface/smolagents/tree/main/examples/plan_customization).
smolagents/docs/source/en/examples/plan_customization.md/0
{ "file_path": "smolagents/docs/source/en/examples/plan_customization.md", "repo_id": "smolagents", "token_count": 996 }
270
# Tools [[open-in-colab]] Here, we're going to see advanced tool usage. > [!TIP] > If you're new to building agents, make sure to first read the [intro to agents](../conceptual_guides/intro_agents) and the [guided tour of smolagents](../guided_tour). ### What is a tool, and how to build one? A tool is mostly a function that an LLM can use in an agentic system. But to use it, the LLM will need to be given an API: name, tool description, input types and descriptions, output type. So it cannot be only a function. It should be a class. So at core, the tool is a class that wraps a function with metadata that helps the LLM understand how to use it. Here's how it looks: ```python from smolagents import Tool class HFModelDownloadsTool(Tool): name = "model_download_counter" description = """ This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. It returns the name of the checkpoint.""" inputs = { "task": { "type": "string", "description": "the task category (such as text-classification, depth-estimation, etc)", } } output_type = "string" def forward(self, task: str): from huggingface_hub import list_models model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) return model.id model_downloads_tool = HFModelDownloadsTool() ``` The custom tool subclasses [`Tool`] to inherit useful methods. The child class also defines: - An attribute `name`, which corresponds to the name of the tool itself. The name usually describes what the tool does. Since the code returns the model with the most downloads for a task, let's name it `model_download_counter`. - An attribute `description` is used to populate the agent's system prompt. - An `inputs` attribute, which is a dictionary with keys `"type"` and `"description"`. It contains information that helps the Python interpreter make educated choices about the input. - An `output_type` attribute, which specifies the output type. The types for both `inputs` and `output_type` should be [Pydantic formats](https://docs.pydantic.dev/latest/concepts/json_schema/#generating-json-schema), they can be either of these: [`~AUTHORIZED_TYPES`]. - A `forward` method which contains the inference code to be executed. And that's all it needs to be used in an agent! There's another way to build a tool. In the [guided_tour](../guided_tour), we implemented a tool using the `@tool` decorator. The [`tool`] decorator is the recommended way to define simple tools, but sometimes you need more than this: using several methods in a class for more clarity, or using additional class attributes. In this case, you can build your tool by subclassing [`Tool`] as described above. ### Share your tool to the Hub You can share your custom tool to the Hub as a Space repository by calling [`~Tool.push_to_hub`] on the tool. Make sure you've created a repository for it on the Hub and are using a token with read access. ```python model_downloads_tool.push_to_hub("{your_username}/hf-model-downloads", token="<YOUR_HUGGINGFACEHUB_API_TOKEN>") ``` For the push to Hub to work, your tool will need to respect some rules: - All methods are self-contained, e.g. use variables that come either from their args. - As per the above point, **all imports should be defined directly within the tool's functions**, else you will get an error when trying to call [`~Tool.save`] or [`~Tool.push_to_hub`] with your custom tool. - If you subclass the `__init__` method, you can give it no other argument than `self`. This is because arguments set during a specific tool instance's initialization are hard to track, which prevents from sharing them properly to the hub. And anyway, the idea of making a specific class is that you can already set class attributes for anything you need to hard-code (just set `your_variable=(...)` directly under the `class YourTool(Tool):` line). And of course you can still create a class attribute anywhere in your code by assigning stuff to `self.your_variable`. Once your tool is pushed to Hub, you can visualize it. [Here](https://huggingface.co/spaces/m-ric/hf-model-downloads) is the `model_downloads_tool` that I've pushed. It has a nice gradio interface. When diving into the tool files, you can find that all the tool's logic is under [tool.py](https://huggingface.co/spaces/m-ric/hf-model-downloads/blob/main/tool.py). That is where you can inspect a tool shared by someone else. Then you can load the tool with [`load_tool`] or create it with [`~Tool.from_hub`] and pass it to the `tools` parameter in your agent. Since running tools means running custom code, you need to make sure you trust the repository, thus we require to pass `trust_remote_code=True` to load a tool from the Hub. ```python from smolagents import load_tool, CodeAgent model_download_tool = load_tool( "{your_username}/hf-model-downloads", trust_remote_code=True ) ``` ### Use tools from an MCP server Our `MCPClient` allows you to load tools from an MCP server, and gives you full control over the connection and tool management: For stdio-based MCP servers: ```python from smolagents import MCPClient, CodeAgent from mcp import StdioServerParameters import os server_parameters = StdioServerParameters( command="uvx", # Using uvx ensures dependencies are available args=["--quiet", "pubmedmcp@0.1.3"], env={"UV_PYTHON": "3.12", **os.environ}, ) with MCPClient(server_parameters) as tools: agent = CodeAgent(tools=tools, model=model, add_base_tools=True) agent.run("Please find the latest research on COVID-19 treatment.") ``` For Streamable HTTP-based MCP servers: ```python from smolagents import MCPClient, CodeAgent with MCPClient({"url": "http://127.0.0.1:8000/mcp", "transport": "streamable-http"}) as tools: agent = CodeAgent(tools=tools, model=model, add_base_tools=True) agent.run("Please find a remedy for hangover.") ``` You can also manually manage the connection lifecycle with the try...finally pattern: ```python from smolagents import MCPClient, CodeAgent from mcp import StdioServerParameters import os # Initialize server parameters server_parameters = StdioServerParameters( command="uvx", args=["--quiet", "pubmedmcp@0.1.3"], env={"UV_PYTHON": "3.12", **os.environ}, ) # Manually manage the connection try: mcp_client = MCPClient(server_parameters) tools = mcp_client.get_tools() # Use the tools with your agent agent = CodeAgent(tools=tools, model=model, add_base_tools=True) result = agent.run("What are the recent therapeutic approaches for Alzheimer's disease?") # Process the result as needed print(f"Agent response: {result}") finally: # Always ensure the connection is properly closed mcp_client.disconnect() ``` You can also connect to multiple MCP servers at once by passing a list of server parameters: ```python from smolagents import MCPClient, CodeAgent from mcp import StdioServerParameters import os server_params1 = StdioServerParameters( command="uvx", args=["--quiet", "pubmedmcp@0.1.3"], env={"UV_PYTHON": "3.12", **os.environ}, ) server_params2 = {"url": "http://127.0.0.1:8000/sse"} with MCPClient([server_params1, server_params2]) as tools: agent = CodeAgent(tools=tools, model=model, add_base_tools=True) agent.run("Please analyze the latest research and suggest remedies for headaches.") ``` > [!WARNING] > **Security Warning:** Always verify the source and integrity of any MCP server before connecting to it, especially for production environments. > Using MCP servers comes with security risks: > - **Trust is essential:** Only use MCP servers from trusted sources. Malicious servers can execute harmful code on your machine. > - **Stdio-based MCP servers** will always execute code on your machine (that's their intended functionality). > - **Streamable HTTP-based MCP servers:** While remote MCP servers will not execute code on your machine, still proceed with caution. #### Structured Output and Output Schema Support The latest [MCP specifications (2025-06-18+)](https://modelcontextprotocol.io/specification/2025-06-18/server/tools#structured-content) include support for `outputSchema`, which enables tools to return structured data with defined schemas. `smolagents` takes advantage of these structured output capabilities, allowing agents to work with tools that return complex data structures, JSON objects, and other structured formats. With this feature, the agent's LLMs can "see" the structure of the tool output before calling a tool, enabling more intelligent and context-aware interactions. To enable structured output support, pass `structured_output=True` when initializing the `MCPClient`: ```python from smolagents import MCPClient, CodeAgent # Enable structured output support with MCPClient(server_parameters, structured_output=True) as tools: agent = CodeAgent(tools=tools, model=model, add_base_tools=True) agent.run("Get weather information for Paris") ``` When `structured_output=True`, the following features are enabled: - **Output Schema Support**: Tools can define JSON schemas for their outputs - **Structured Content Handling**: Support for `structuredContent` in MCP responses - **JSON Parsing**: Automatic parsing of structured data from tool responses Here's an example using a weather MCP server with structured output: ```python # demo/weather.py - Example MCP server with structured output from pydantic import BaseModel, Field from mcp.server.fastmcp import FastMCP mcp = FastMCP("Weather Service") class WeatherInfo(BaseModel): location: str = Field(description="The location name") temperature: float = Field(description="Temperature in Celsius") conditions: str = Field(description="Weather conditions") humidity: int = Field(description="Humidity percentage", ge=0, le=100) @mcp.tool( name="get_weather_info", description="Get weather information for a location as structured data.", # structured_output=True is enabled by default in FastMCP ) def get_weather_info(city: str) -> WeatherInfo: """Get weather information for a city.""" return WeatherInfo( location=city, temperature=22.5, conditions="partly cloudy", humidity=65 ) ``` Agent using output schema and structured output: ```python from smolagents import MCPClient, CodeAgent # Using the weather server with structured output from mcp import StdioServerParameters server_parameters = StdioServerParameters( command="python", args=["demo/weather.py"] ) with MCPClient(server_parameters, structured_output=True) as tools: agent = CodeAgent(tools=tools, model=model) result = agent.run("What is the temperature in Tokyo in Fahrenheit?") print(result) ``` When structured output is enabled, the `CodeAgent` system prompt is enhanced to include JSON schema information for tools, helping the agent understand the expected structure of tool outputs and access the data appropriately. **Backwards Compatibility**: The `structured_output` parameter currently defaults to `False` to maintain backwards compatibility. Existing code will continue to work without changes, receiving simple text outputs as before. **Future Change**: In a future release, the default value of `structured_output` will change from `False` to `True`. It is recommended to explicitly set `structured_output=True` to opt into the enhanced functionality, which provides better tool output handling and improved agent performance. Use `structured_output=False` only if you specifically need to maintain the current text-only behavior. ### Import a Space as a tool You can directly import a Gradio Space from the Hub as a tool using the [`Tool.from_space`] method! You only need to provide the id of the Space on the Hub, its name, and a description that will help your agent understand what the tool does. Under the hood, this will use [`gradio-client`](https://pypi.org/project/gradio-client/) library to call the Space. For instance, let's import the [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) Space from the Hub and use it to generate an image. ```python image_generation_tool = Tool.from_space( "black-forest-labs/FLUX.1-schnell", name="image_generator", description="Generate an image from a prompt" ) image_generation_tool("A sunny beach") ``` And voilà, here's your image! 🏖️ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/sunny_beach.webp"> Then you can use this tool just like any other tool. For example, let's improve the prompt `a rabbit wearing a space suit` and generate an image of it. This example also shows how you can pass additional arguments to the agent. ```python from smolagents import CodeAgent, InferenceClientModel model = InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct") agent = CodeAgent(tools=[image_generation_tool], model=model) agent.run( "Improve this prompt, then generate an image of it.", additional_args={'user_prompt': 'A rabbit wearing a space suit'} ) ``` ```text === Agent thoughts: improved_prompt could be "A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background" Now that I have improved the prompt, I can use the image generator tool to generate an image based on this prompt. >>> Agent is executing the code below: image = image_generator(prompt="A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background") final_answer(image) ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit_spacesuit_flux.webp"> How cool is this? 🤩 ### Use LangChain tools We love Langchain and think it has a very compelling suite of tools. To import a tool from LangChain, use the `from_langchain()` method. Here is how you can use it to recreate the intro's search result using a LangChain web search tool. This tool will need `pip install langchain google-search-results -q` to work properly. ```python from langchain.agents import load_tools search_tool = Tool.from_langchain(load_tools(["serpapi"])[0]) agent = CodeAgent(tools=[search_tool], model=model) agent.run("How many more blocks (also denoted as layers) are in BERT base encoder compared to the encoder from the architecture proposed in Attention is All You Need?") ``` ### Manage your agent's toolbox You can manage an agent's toolbox by adding or replacing a tool in attribute `agent.tools`, since it is a standard dictionary. Let's add the `model_download_tool` to an existing agent initialized with only the default toolbox. ```python from smolagents import InferenceClientModel model = InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct") agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.tools[model_download_tool.name] = model_download_tool ``` Now we can leverage the new tool: ```python agent.run( "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub but reverse the letters?" ) ``` > [!TIP] > Beware of not adding too many tools to an agent: this can overwhelm weaker LLM engines. ### Use a collection of tools You can leverage tool collections by using [`ToolCollection`]. It supports loading either a collection from the Hub or an MCP server tools. #### Tool Collection from any MCP server Leverage tools from the hundreds of MCP servers available on [glama.ai](https://glama.ai/mcp/servers) or [smithery.ai](https://smithery.ai/). The MCP servers tools can be loaded with [`ToolCollection.from_mcp`]. > [!WARNING] > **Security Warning:** Always verify the source and integrity of any MCP server before connecting to it, especially for production environments. > Using MCP servers comes with security risks: > - **Trust is essential:** Only use MCP servers from trusted sources. Malicious servers can execute harmful code on your machine. > - **Stdio-based MCP servers** will always execute code on your machine (that's their intended functionality). > - **Streamable HTTP-based MCP servers:** While remote MCP servers will not execute code on your machine, still proceed with caution. For stdio-based MCP servers, pass the server parameters as an instance of `mcp.StdioServerParameters`: ```py from smolagents import ToolCollection, CodeAgent from mcp import StdioServerParameters server_parameters = StdioServerParameters( command="uvx", args=["--quiet", "pubmedmcp@0.1.3"], env={"UV_PYTHON": "3.12", **os.environ}, ) with ToolCollection.from_mcp(server_parameters, trust_remote_code=True) as tool_collection: agent = CodeAgent(tools=[*tool_collection.tools], model=model, add_base_tools=True) agent.run("Please find a remedy for hangover.") ``` To enable structured output support with ToolCollection, add the `structured_output=True` parameter: ```py with ToolCollection.from_mcp(server_parameters, trust_remote_code=True, structured_output=True) as tool_collection: agent = CodeAgent(tools=[*tool_collection.tools], model=model, add_base_tools=True) agent.run("Please find a remedy for hangover.") ``` For Streamable HTTP-based MCP servers, simply pass a dict with parameters to `mcp.client.streamable_http.streamablehttp_client` and add the key `transport` with the value `"streamable-http"`: ```py from smolagents import ToolCollection, CodeAgent with ToolCollection.from_mcp({"url": "http://127.0.0.1:8000/mcp", "transport": "streamable-http"}, trust_remote_code=True) as tool_collection: agent = CodeAgent(tools=[*tool_collection.tools], add_base_tools=True) agent.run("Please find a remedy for hangover.") ``` #### Tool Collection from a collection in the Hub You can leverage it with the slug of the collection you want to use. Then pass them as a list to initialize your agent, and start using them! ```py from smolagents import ToolCollection, CodeAgent image_tool_collection = ToolCollection.from_hub( collection_slug="huggingface-tools/diffusion-tools-6630bb19a942c2306a2cdb6f", token="<YOUR_HUGGINGFACEHUB_API_TOKEN>" ) agent = CodeAgent(tools=[*image_tool_collection.tools], model=model, add_base_tools=True) agent.run("Please draw me a picture of rivers and lakes.") ``` To speed up the start, tools are loaded only if called by the agent.
smolagents/docs/source/en/tutorials/tools.md/0
{ "file_path": "smolagents/docs/source/en/tutorials/tools.md", "repo_id": "smolagents", "token_count": 5412 }
271
# docstyle-ignore INSTALL_CONTENT = """ # Installation ! pip install smolagents # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/smolagents.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] black_avoid_patterns = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
smolagents/docs/source/ko/_config.py/0
{ "file_path": "smolagents/docs/source/ko/_config.py", "repo_id": "smolagents", "token_count": 155 }
272