text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import numpy as np import torch from transformers import CLIPImageProcessor, CLIPTokenizer from ...configuration_utils import FrozenDict from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import deprecate, logging from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel from ..pipeline_utils import DiffusionPipeline from . import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) class OnnxStableDiffusionPipeline(DiffusionPipeline): vae_encoder: OnnxRuntimeModel vae_decoder: OnnxRuntimeModel text_encoder: OnnxRuntimeModel tokenizer: CLIPTokenizer unet: OnnxRuntimeModel scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] safety_checker: OnnxRuntimeModel feature_extractor: CLIPImageProcessor _optional_components = ["safety_checker", "feature_extractor"] _is_onnx = True def __init__( self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.register_to_config(requires_safety_checker=requires_safety_checker) def _encode_prompt( self, prompt: Union[str, List[str]], num_images_per_prompt: Optional[int], do_classifier_free_guidance: bool, negative_prompt: Optional[str], prompt_embeds: Optional[np.ndarray] = None, negative_prompt_embeds: Optional[np.ndarray] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`np.ndarray`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="np", ) negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def check_inputs( self, prompt: Union[str, List[str]], height: Optional[int], width: Optional[int], callback_steps: int, negative_prompt: Optional[str] = None, prompt_embeds: Optional[np.ndarray] = None, negative_prompt_embeds: Optional[np.ndarray] = None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = 512, width: Optional[int] = 512, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[np.random.RandomState] = None, latents: Optional[np.ndarray] = None, prompt_embeds: Optional[np.ndarray] = None, negative_prompt_embeds: Optional[np.ndarray] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, callback_steps: int = 1, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `torch.Tensor`): `Image`, or tensor representing an image batch which will be upscaled. * num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`np.random.RandomState`, *optional*): One or a list of [numpy generator(s)](TODO) to make generation deterministic. latents (`np.ndarray`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`np.ndarray`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt( prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # get the initial random noise unless the user supplied it latents_dtype = prompt_embeds.dtype latents_shape = (batch_size * num_images_per_prompt, 4, height // 8, width // 8) if latents is None: latents = generator.randn(*latents_shape).astype(latents_dtype) elif latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") # set timesteps self.scheduler.set_timesteps(num_inference_steps) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta timestep_dtype = next( (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" ) timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) latent_model_input = latent_model_input.cpu().numpy() # predict the noise residual timestep = np.array([t], dtype=timestep_dtype) noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds) noise_pred = noise_pred[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 scheduler_output = self.scheduler.step( torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs ) latents = scheduler_output.prev_sample.numpy() # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents # image = self.vae_decoder(latent_sample=latents)[0] # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 image = np.concatenate( [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] ) image = np.clip(image / 2 + 0.5, 0, 1) image = image.transpose((0, 2, 3, 1)) if self.safety_checker is not None: safety_checker_input = self.feature_extractor( self.numpy_to_pil(image), return_tensors="np" ).pixel_values.astype(image.dtype) images, has_nsfw_concept = [], [] for i in range(image.shape[0]): image_i, has_nsfw_concept_i = self.safety_checker( clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] ) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) else: has_nsfw_concept = None if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) class StableDiffusionOnnxPipeline(OnnxStableDiffusionPipeline): def __init__( self, vae_encoder: OnnxRuntimeModel, vae_decoder: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: CLIPTokenizer, unet: OnnxRuntimeModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: OnnxRuntimeModel, feature_extractor: CLIPImageProcessor, ): deprecation_message = "Please use `OnnxStableDiffusionPipeline` instead of `StableDiffusionOnnxPipeline`." deprecate("StableDiffusionOnnxPipeline", "1.0.0", deprecation_message) super().__init__( vae_encoder=vae_encoder, vae_decoder=vae_decoder, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, )
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py", "repo_id": "diffusers", "token_count": 10676 }
163
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def jax_cosine_distance(emb_1, emb_2, eps=1e-12): norm_emb_1 = jnp.divide(emb_1.T, jnp.clip(jnp.linalg.norm(emb_1, axis=1), a_min=eps)).T norm_emb_2 = jnp.divide(emb_2.T, jnp.clip(jnp.linalg.norm(emb_2, axis=1), a_min=eps)).T return jnp.matmul(norm_emb_1, norm_emb_2.T) class FlaxStableDiffusionSafetyCheckerModule(nn.Module): config: CLIPConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.vision_model = FlaxCLIPVisionModule(self.config.vision_config) self.visual_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype) self.concept_embeds = self.param("concept_embeds", jax.nn.initializers.ones, (17, self.config.projection_dim)) self.special_care_embeds = self.param( "special_care_embeds", jax.nn.initializers.ones, (3, self.config.projection_dim) ) self.concept_embeds_weights = self.param("concept_embeds_weights", jax.nn.initializers.ones, (17,)) self.special_care_embeds_weights = self.param("special_care_embeds_weights", jax.nn.initializers.ones, (3,)) def __call__(self, clip_input): pooled_output = self.vision_model(clip_input)[1] image_embeds = self.visual_projection(pooled_output) special_cos_dist = jax_cosine_distance(image_embeds, self.special_care_embeds) cos_dist = jax_cosine_distance(image_embeds, self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs adjustment = 0.0 special_scores = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment special_scores = jnp.round(special_scores, 3) is_special_care = jnp.any(special_scores > 0, axis=1, keepdims=True) # Use a lower threshold if an image has any special care concept special_adjustment = is_special_care * 0.01 concept_scores = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment concept_scores = jnp.round(concept_scores, 3) has_nsfw_concepts = jnp.any(concept_scores > 0, axis=1) return has_nsfw_concepts class FlaxStableDiffusionSafetyChecker(FlaxPreTrainedModel): config_class = CLIPConfig main_input_name = "clip_input" module_class = FlaxStableDiffusionSafetyCheckerModule def __init__( self, config: CLIPConfig, input_shape: Optional[Tuple] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): if input_shape is None: input_shape = (1, 224, 224, 3) module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.Array, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensor clip_input = jax.random.normal(rng, input_shape) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, clip_input)["params"] return random_params def __call__( self, clip_input, params: dict = None, ): clip_input = jnp.transpose(clip_input, (0, 2, 3, 1)) return self.module.apply( {"params": params or self.params}, jnp.array(clip_input, dtype=jnp.float32), rngs={}, )
diffusers/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py", "repo_id": "diffusers", "token_count": 1822 }
164
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect from typing import List, Optional, Tuple, Union import torch from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser from k_diffusion.sampling import BrownianTreeNoiseSampler, get_sigmas_karras from transformers import ( CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, ) from ...image_processor import VaeImageProcessor from ...loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import ( AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor, ) from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionXLKDiffusionPipeline >>> pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained( ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> pipe.set_scheduler("sample_dpmpp_2m_sde") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> image = pipe(prompt).images[0] ``` """ # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.ModelWrapper class ModelWrapper: def __init__(self, model, alphas_cumprod): self.model = model self.alphas_cumprod = alphas_cumprod def apply_model(self, *args, **kwargs): if len(args) == 3: encoder_hidden_states = args[-1] args = args[:2] if kwargs.get("cond", None) is not None: encoder_hidden_states = kwargs.pop("cond") return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample class StableDiffusionXLKDiffusionPipeline( DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, IPAdapterMixin, ): _last_supported_version = "0.33.1" r""" Pipeline for text-to-image generation using Stable Diffusion XL and k-diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "feature_extractor", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool = True, ): super().__init__() # get correct sigmas from LMS scheduler = LMSDiscreteScheduler.from_config(scheduler.config) self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 ) model = ModelWrapper(unet, scheduler.alphas_cumprod) if scheduler.config.prediction_type == "v_prediction": self.k_diffusion_model = CompVisVDenoiser(model) else: self.k_diffusion_model = CompVisDenoiser(model) # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.set_scheduler def set_scheduler(self, scheduler_type: str): library = importlib.import_module("k_diffusion") sampling = getattr(library, "sampling") try: self.sampler = getattr(sampling, scheduler_type) except Exception: valid_samplers = [] for s in dir(sampling): if "sample_" in s: valid_samplers.append(s) raise ValueError(f"Invalid scheduler type {scheduler_type}. Please choose one of {valid_samplers}.") # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2: pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2: negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds def check_inputs( self, prompt, prompt_2, height, width, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) return latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, use_karras_sigmas: Optional[bool] = False, noise_sampler_seed: Optional[int] = None, clip_skip: Optional[int] = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. Examples: Returns: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) if guidance_scale <= 1.0: raise ValueError("has to use guidance_scale") self._guidance_scale = guidance_scale self._clip_skip = clip_skip # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt lora_scale = None ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=prompt_embeds.device) # 5. Prepare sigmas if use_karras_sigmas: sigma_min: float = self.k_diffusion_model.sigmas[0].item() sigma_max: float = self.k_diffusion_model.sigmas[-1].item() sigmas = get_sigmas_karras(n=num_inference_steps, sigma_min=sigma_min, sigma_max=sigma_max) else: sigmas = self.scheduler.sigmas sigmas = sigmas.to(dtype=prompt_embeds.dtype, device=device) # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) latents = latents * sigmas[0] self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device) # 7. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} # 8. Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) # 9. Define model function def model_fn(x, t): latent_model_input = torch.cat([x] * 2) t = torch.cat([t] * 2) noise_pred = self.k_diffusion_model( latent_model_input, t, cond=prompt_embeds, timestep_cond=timestep_cond, added_cond_kwargs=added_cond_kwargs, ) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) return noise_pred # 10. Run k-diffusion solver sampler_kwargs = {} if "noise_sampler" in inspect.signature(self.sampler).parameters: min_sigma, max_sigma = sigmas[sigmas > 0].min(), sigmas.max() noise_sampler = BrownianTreeNoiseSampler(latents, min_sigma, max_sigma, noise_sampler_seed) sampler_kwargs["noise_sampler"] = noise_sampler if "generator" in inspect.signature(self.sampler).parameters: sampler_kwargs["generator"] = generator latents = self.sampler(model_fn, latents, sigmas, **sampler_kwargs) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py", "repo_id": "diffusers", "token_count": 20304 }
165
# Copyright 2025 Kakao Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import List, Optional, Tuple, Union import torch from torch.nn import functional as F from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel from ...schedulers import UnCLIPScheduler from ...utils import is_torch_xla_available, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput from .text_proj import UnCLIPTextProjModel if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name class UnCLIPPipeline(DeprecatedPipelineMixin, DiffusionPipeline): """ Pipeline for text-to-image generation using unCLIP. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: text_encoder ([`~transformers.CLIPTextModelWithProjection`]): Frozen text-encoder. tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. text_proj ([`UnCLIPTextProjModel`]): Utility class to prepare and combine the embeddings before they are passed to the decoder. decoder ([`UNet2DConditionModel`]): The decoder to invert the image embedding into an image. super_res_first ([`UNet2DModel`]): Super resolution UNet. Used in all but the last step of the super resolution diffusion process. super_res_last ([`UNet2DModel`]): Super resolution UNet. Used in the last step of the super resolution diffusion process. prior_scheduler ([`UnCLIPScheduler`]): Scheduler used in the prior denoising process (a modified [`DDPMScheduler`]). decoder_scheduler ([`UnCLIPScheduler`]): Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]). super_res_scheduler ([`UnCLIPScheduler`]): Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]). """ _last_supported_version = "0.33.1" _exclude_from_cpu_offload = ["prior"] prior: PriorTransformer decoder: UNet2DConditionModel text_proj: UnCLIPTextProjModel text_encoder: CLIPTextModelWithProjection tokenizer: CLIPTokenizer super_res_first: UNet2DModel super_res_last: UNet2DModel prior_scheduler: UnCLIPScheduler decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler model_cpu_offload_seq = "text_encoder->text_proj->decoder->super_res_first->super_res_last" def __init__( self, prior: PriorTransformer, decoder: UNet2DConditionModel, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_proj: UnCLIPTextProjModel, super_res_first: UNet2DModel, super_res_last: UNet2DModel, prior_scheduler: UnCLIPScheduler, decoder_scheduler: UnCLIPScheduler, super_res_scheduler: UnCLIPScheduler, ): super().__init__() self.register_modules( prior=prior, decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, text_proj=text_proj, super_res_first=super_res_first, super_res_last=super_res_last, prior_scheduler=prior_scheduler, decoder_scheduler=decoder_scheduler, super_res_scheduler=super_res_scheduler, ) def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_enc_hid_states = text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_enc_hid_states = negative_prompt_embeds_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_enc_hid_states.shape[1] uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_enc_hid_states, text_mask @torch.no_grad() def __call__( self, prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: int = 1, prior_num_inference_steps: int = 25, decoder_num_inference_steps: int = 25, super_res_num_inference_steps: int = 7, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prior_latents: Optional[torch.Tensor] = None, decoder_latents: Optional[torch.Tensor] = None, super_res_latents: Optional[torch.Tensor] = None, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, prior_guidance_scale: float = 4.0, decoder_guidance_scale: float = 8.0, output_type: Optional[str] = "pil", return_dict: bool = True, ): """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. This can only be left undefined if `text_model_output` and `text_attention_mask` is passed. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. prior_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the prior. More denoising steps usually lead to a higher quality image at the expense of slower inference. decoder_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality image at the expense of slower inference. super_res_num_inference_steps (`int`, *optional*, defaults to 7): The number of denoising steps for super resolution. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prior_latents (`torch.Tensor` of shape (batch size, embeddings dimension), *optional*): Pre-generated noisy latents to be used as inputs for the prior. decoder_latents (`torch.Tensor` of shape (batch size, channels, height, width), *optional*): Pre-generated noisy latents to be used as inputs for the decoder. super_res_latents (`torch.Tensor` of shape (batch size, channels, super res height, super res width), *optional*): Pre-generated noisy latents to be used as inputs for the decoder. prior_guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. decoder_guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. text_model_output (`CLIPTextModelOutput`, *optional*): Pre-defined [`CLIPTextModel`] outputs that can be derived from the text encoder. Pre-defined text outputs can be passed for tasks like text embedding interpolations. Make sure to also pass `text_attention_mask` in this case. `prompt` can the be left `None`. text_attention_mask (`torch.Tensor`, *optional*): Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention masks are necessary when passing `text_model_output`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if prompt is not None: if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") else: batch_size = text_model_output[0].shape[0] device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 prompt_embeds, text_enc_hid_states, text_mask = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask ) # prior self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), prompt_embeds.dtype, device, generator, prior_latents, self.prior_scheduler, ) for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=prompt_embeds, encoder_hidden_states=text_enc_hid_states, attention_mask=text_mask, ).predicted_image_embedding if do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) if i + 1 == prior_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = prior_timesteps_tensor[i + 1] prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, generator=generator, prev_timestep=prev_timestep, ).prev_sample prior_latents = self.prior.post_process_latents(prior_latents) image_embeddings = prior_latents # done prior # decoder text_enc_hid_states, additive_clip_time_embeddings = self.text_proj( image_embeddings=image_embeddings, prompt_embeds=prompt_embeds, text_encoder_hidden_states=text_enc_hid_states, do_classifier_free_guidance=do_classifier_free_guidance, ) if device.type == "mps": # HACK: MPS: There is a panic when padding bool tensors, # so cast to int tensor for the pad and back to bool afterwards text_mask = text_mask.type(torch.int) decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) decoder_text_mask = decoder_text_mask.type(torch.bool) else: decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) decoder_timesteps_tensor = self.decoder_scheduler.timesteps num_channels_latents = self.decoder.config.in_channels height = self.decoder.config.sample_size width = self.decoder.config.sample_size decoder_latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), text_enc_hid_states.dtype, device, generator, decoder_latents, self.decoder_scheduler, ) for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents noise_pred = self.decoder( sample=latent_model_input, timestep=t, encoder_hidden_states=text_enc_hid_states, class_labels=additive_clip_time_embeddings, attention_mask=decoder_text_mask, ).sample if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if i + 1 == decoder_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = decoder_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 decoder_latents = self.decoder_scheduler.step( noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample decoder_latents = decoder_latents.clamp(-1, 1) image_small = decoder_latents # done decoder # super res self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) super_res_timesteps_tensor = self.super_res_scheduler.timesteps channels = self.super_res_first.config.in_channels // 2 height = self.super_res_first.config.sample_size width = self.super_res_first.config.sample_size super_res_latents = self.prepare_latents( (batch_size, channels, height, width), image_small.dtype, device, generator, super_res_latents, self.super_res_scheduler, ) if device.type == "mps": # MPS does not support many interpolations image_upscaled = F.interpolate(image_small, size=[height, width]) else: interpolate_antialias = {} if "antialias" in inspect.signature(F.interpolate).parameters: interpolate_antialias["antialias"] = True image_upscaled = F.interpolate( image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias ) for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): # no classifier free guidance if i == super_res_timesteps_tensor.shape[0] - 1: unet = self.super_res_last else: unet = self.super_res_first latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) noise_pred = unet( sample=latent_model_input, timestep=t, ).sample if i + 1 == super_res_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = super_res_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 super_res_latents = self.super_res_scheduler.step( noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample if XLA_AVAILABLE: xm.mark_step() image = super_res_latents # done super res self.maybe_free_model_hooks() # post processing image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py", "repo_id": "diffusers", "token_count": 10083 }
166
# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html import inspect from typing import Any, Callable, Dict, List, Optional, Union import regex as re import torch from PIL import Image from transformers import AutoTokenizer, UMT5EncoderModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...loaders import WanLoraLoaderMixin from ...models import AutoencoderKLWan, WanTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import WanPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers.utils import export_to_video >>> from diffusers import AutoencoderKLWan, WanVideoToVideoPipeline >>> from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler >>> # Available models: Wan-AI/Wan2.1-T2V-14B-Diffusers, Wan-AI/Wan2.1-T2V-1.3B-Diffusers >>> model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers" >>> vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32) >>> pipe = WanVideoToVideoPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16) >>> flow_shift = 3.0 # 5.0 for 720P, 3.0 for 480P >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift) >>> pipe.to("cuda") >>> prompt = "A robot standing on a mountain top. The sun is setting in the background" >>> negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" >>> video = load_video( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hiker.mp4" ... ) >>> output = pipe( ... video=video, ... prompt=prompt, ... negative_prompt=negative_prompt, ... height=480, ... width=720, ... guidance_scale=5.0, ... strength=0.7, ... ).frames[0] >>> export_to_video(output, "output.mp4", fps=16) ``` """ def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): text = whitespace_clean(basic_clean(text)) return text # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class WanVideoToVideoPipeline(DiffusionPipeline, WanLoraLoaderMixin): r""" Pipeline for video-to-video generation using Wan. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: tokenizer ([`T5Tokenizer`]): Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. transformer ([`WanTransformer3DModel`]): Conditional Transformer to denoise the input latents. scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, tokenizer: AutoTokenizer, text_encoder: UMT5EncoderModel, transformer: WanTransformer3DModel, vae: AutoencoderKLWan, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt prompt = [prompt_clean(u) for u in prompt] batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask seq_lens = mask.gt(0).sum(dim=1).long() prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] prompt_embeds = torch.stack( [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0 ) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, negative_prompt_embeds def check_inputs( self, prompt, negative_prompt, height, width, video=None, latents=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") if video is not None and latents is not None: raise ValueError("Only one of `video` or `latents` should be provided") def prepare_latents( self, video: Optional[torch.Tensor] = None, batch_size: int = 1, num_channels_latents: int = 16, height: int = 480, width: int = 832, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, timestep: Optional[torch.Tensor] = None, ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) num_latent_frames = ( (video.size(2) - 1) // self.vae_scale_factor_temporal + 1 if latents is None else latents.size(1) ) shape = ( batch_size, num_channels_latents, num_latent_frames, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) if latents is None: init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), sample_mode="argmax") for vid in video] init_latents = torch.cat(init_latents, dim=0).to(dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, self.vae.config.z_dim, 1, 1, 1).to(device, dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( device, dtype ) init_latents = (init_latents - latents_mean) * latents_std noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) if hasattr(self.scheduler, "add_noise"): latents = self.scheduler.add_noise(init_latents, noise, timestep) else: latents = self.scheduler.scale_noise(init_latents, timestep, noise) else: latents = latents.to(device) return latents # Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.get_timesteps def get_timesteps(self, num_inference_steps, timesteps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @property def attention_kwargs(self): return self._attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, video: List[Image.Image] = None, prompt: Union[str, List[str]] = None, negative_prompt: Union[str, List[str]] = None, height: int = 480, width: int = 832, num_inference_steps: int = 50, timesteps: Optional[List[int]] = None, guidance_scale: float = 5.0, strength: float = 0.8, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "np", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds` instead. height (`int`, defaults to `480`): The height in pixels of the generated image. width (`int`, defaults to `832`): The width in pixels of the generated image. num_frames (`int`, defaults to `81`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, defaults to `5.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. strength (`float`, defaults to `0.8`): Higher strength leads to more differences between original image and generated video. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`WanPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `512`): The maximum sequence length of the text encoder. If the prompt is longer than this, it will be truncated. If the prompt is shorter, it will be padded to this length. Examples: Returns: [`~WanPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`WanPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial num_videos_per_prompt = 1 # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, negative_prompt, height, width, video, latents, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) transformer_dtype = self.transformer.dtype prompt_embeds = prompt_embeds.to(transformer_dtype) if negative_prompt_embeds is not None: negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) self._num_timesteps = len(timesteps) if latents is None: video = self.video_processor.preprocess_video(video, height=height, width=width).to( device, dtype=torch.float32 ) # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( video, batch_size * num_videos_per_prompt, num_channels_latents, height, width, torch.float32, device, generator, latents, latent_timestep, ) # 6. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = latents.to(transformer_dtype) timestep = t.expand(latents.shape[0]) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: noise_uncond = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=negative_prompt_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_uncond + guidance_scale * (noise_pred - noise_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents.to(self.vae.dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) latents = latents / latents_std + latents_mean video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return WanPipelineOutput(frames=video)
diffusers/src/diffusers/pipelines/wan/pipeline_wan_video2video.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/wan/pipeline_wan_video2video.py", "repo_id": "diffusers", "token_count": 14672 }
167
from .gguf_quantizer import GGUFQuantizer
diffusers/src/diffusers/quantizers/gguf/__init__.py/0
{ "file_path": "diffusers/src/diffusers/quantizers/gguf/__init__.py", "repo_id": "diffusers", "token_count": 13 }
168
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) @dataclass class ConsistencyDecoderSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. """ prev_sample: torch.Tensor class ConsistencyDecoderScheduler(SchedulerMixin, ConfigMixin): order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1024, sigma_data: float = 0.5, ): betas = betas_for_alpha_bar(num_train_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) self.sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod) self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod) sigmas = torch.sqrt(1.0 / alphas_cumprod - 1) sqrt_recip_alphas_cumprod = torch.sqrt(1.0 / alphas_cumprod) self.c_skip = sqrt_recip_alphas_cumprod * sigma_data**2 / (sigmas**2 + sigma_data**2) self.c_out = sigmas * sigma_data / (sigmas**2 + sigma_data**2) ** 0.5 self.c_in = sqrt_recip_alphas_cumprod / (sigmas**2 + sigma_data**2) ** 0.5 def set_timesteps( self, num_inference_steps: Optional[int] = None, device: Union[str, torch.device] = None, ): if num_inference_steps != 2: raise ValueError("Currently more than 2 inference steps are not supported.") self.timesteps = torch.tensor([1008, 512], dtype=torch.long, device=device) self.sqrt_alphas_cumprod = self.sqrt_alphas_cumprod.to(device) self.sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod.to(device) self.c_skip = self.c_skip.to(device) self.c_out = self.c_out.to(device) self.c_in = self.c_in.to(device) @property def init_noise_sigma(self): return self.sqrt_one_minus_alphas_cumprod[self.timesteps[0]] def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.Tensor`: A scaled input sample. """ return sample * self.c_in[timestep] def step( self, model_output: torch.Tensor, timestep: Union[float, torch.Tensor], sample: torch.Tensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[ConsistencyDecoderSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): The direct output from the learned diffusion model. timestep (`float`): The current timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ x_0 = self.c_out[timestep] * model_output + self.c_skip[timestep] * sample timestep_idx = torch.where(self.timesteps == timestep)[0] if timestep_idx == len(self.timesteps) - 1: prev_sample = x_0 else: noise = randn_tensor(x_0.shape, generator=generator, dtype=x_0.dtype, device=x_0.device) prev_sample = ( self.sqrt_alphas_cumprod[self.timesteps[timestep_idx + 1]].to(x_0.dtype) * x_0 + self.sqrt_one_minus_alphas_cumprod[self.timesteps[timestep_idx + 1]].to(x_0.dtype) * noise ) if not return_dict: return (prev_sample,) return ConsistencyDecoderSchedulerOutput(prev_sample=prev_sample)
diffusers/src/diffusers/schedulers/scheduling_consistency_decoder.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_consistency_decoder.py", "repo_id": "diffusers", "token_count": 3023 }
169
import contextlib import copy import gc import math import random import re import warnings from contextlib import contextmanager from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np import torch from .models import UNet2DConditionModel from .pipelines import DiffusionPipeline from .schedulers import SchedulerMixin from .utils import ( convert_state_dict_to_diffusers, convert_state_dict_to_peft, deprecate, is_peft_available, is_torch_npu_available, is_torchvision_available, is_transformers_available, ) if is_transformers_available(): import transformers if transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): import deepspeed if is_peft_available(): from peft import set_peft_model_state_dict if is_torchvision_available(): from torchvision import transforms if is_torch_npu_available(): import torch_npu # noqa: F401 def set_seed(seed: int): """ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. Args: seed (`int`): The seed to set. Returns: `None` """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if is_torch_npu_available(): torch.npu.manual_seed_all(seed) else: torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available def compute_snr(noise_scheduler, timesteps): """ Computes SNR as per https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849 for the given timesteps using the provided noise scheduler. Args: noise_scheduler (`NoiseScheduler`): An object containing the noise schedule parameters, specifically `alphas_cumprod`, which is used to compute the SNR values. timesteps (`torch.Tensor`): A tensor of timesteps for which the SNR is computed. Returns: `torch.Tensor`: A tensor containing the computed SNR values for each timestep. """ alphas_cumprod = noise_scheduler.alphas_cumprod sqrt_alphas_cumprod = alphas_cumprod**0.5 sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 # Expand the tensors. # Adapted from https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L1026 sqrt_alphas_cumprod = sqrt_alphas_cumprod.to(device=timesteps.device)[timesteps].float() while len(sqrt_alphas_cumprod.shape) < len(timesteps.shape): sqrt_alphas_cumprod = sqrt_alphas_cumprod[..., None] alpha = sqrt_alphas_cumprod.expand(timesteps.shape) sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod.to(device=timesteps.device)[timesteps].float() while len(sqrt_one_minus_alphas_cumprod.shape) < len(timesteps.shape): sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[..., None] sigma = sqrt_one_minus_alphas_cumprod.expand(timesteps.shape) # Compute SNR. snr = (alpha / sigma) ** 2 return snr def resolve_interpolation_mode(interpolation_type: str): """ Maps a string describing an interpolation function to the corresponding torchvision `InterpolationMode` enum. The full list of supported enums is documented at https://pytorch.org/vision/0.9/transforms.html#torchvision.transforms.functional.InterpolationMode. Args: interpolation_type (`str`): A string describing an interpolation method. Currently, `bilinear`, `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos` are supported, corresponding to the supported interpolation modes in torchvision. Returns: `torchvision.transforms.InterpolationMode`: an `InterpolationMode` enum used by torchvision's `resize` transform. """ if not is_torchvision_available(): raise ImportError( "Please make sure to install `torchvision` to be able to use the `resolve_interpolation_mode()` function." ) if interpolation_type == "bilinear": interpolation_mode = transforms.InterpolationMode.BILINEAR elif interpolation_type == "bicubic": interpolation_mode = transforms.InterpolationMode.BICUBIC elif interpolation_type == "box": interpolation_mode = transforms.InterpolationMode.BOX elif interpolation_type == "nearest": interpolation_mode = transforms.InterpolationMode.NEAREST elif interpolation_type == "nearest_exact": interpolation_mode = transforms.InterpolationMode.NEAREST_EXACT elif interpolation_type == "hamming": interpolation_mode = transforms.InterpolationMode.HAMMING elif interpolation_type == "lanczos": interpolation_mode = transforms.InterpolationMode.LANCZOS else: raise ValueError( f"The given interpolation mode {interpolation_type} is not supported. Currently supported interpolation" f" modes are `bilinear`, `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`." ) return interpolation_mode def compute_dream_and_update_latents( unet: UNet2DConditionModel, noise_scheduler: SchedulerMixin, timesteps: torch.Tensor, noise: torch.Tensor, noisy_latents: torch.Tensor, target: torch.Tensor, encoder_hidden_states: torch.Tensor, dream_detail_preservation: float = 1.0, ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]: """ Implements "DREAM (Diffusion Rectification and Estimation-Adaptive Models)" from https://huggingface.co/papers/2312.00210. DREAM helps align training with sampling to help training be more efficient and accurate at the cost of an extra forward step without gradients. Args: `unet`: The state unet to use to make a prediction. `noise_scheduler`: The noise scheduler used to add noise for the given timestep. `timesteps`: The timesteps for the noise_scheduler to user. `noise`: A tensor of noise in the shape of noisy_latents. `noisy_latents`: Previously noise latents from the training loop. `target`: The ground-truth tensor to predict after eps is removed. `encoder_hidden_states`: Text embeddings from the text model. `dream_detail_preservation`: A float value that indicates detail preservation level. See reference. Returns: `tuple[torch.Tensor, torch.Tensor]`: Adjusted noisy_latents and target. """ alphas_cumprod = noise_scheduler.alphas_cumprod.to(timesteps.device)[timesteps, None, None, None] sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 # The paper uses lambda = sqrt(1 - alpha) ** p, with p = 1 in their experiments. dream_lambda = sqrt_one_minus_alphas_cumprod**dream_detail_preservation pred = None with torch.no_grad(): pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample _noisy_latents, _target = (None, None) if noise_scheduler.config.prediction_type == "epsilon": predicted_noise = pred delta_noise = (noise - predicted_noise).detach() delta_noise.mul_(dream_lambda) _noisy_latents = noisy_latents.add(sqrt_one_minus_alphas_cumprod * delta_noise) _target = target.add(delta_noise) elif noise_scheduler.config.prediction_type == "v_prediction": raise NotImplementedError("DREAM has not been implemented for v-prediction") else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") return _noisy_latents, _target def unet_lora_state_dict(unet: UNet2DConditionModel) -> Dict[str, torch.Tensor]: r""" Returns: A state dict containing just the LoRA parameters. """ lora_state_dict = {} for name, module in unet.named_modules(): if hasattr(module, "set_lora_layer"): lora_layer = getattr(module, "lora_layer") if lora_layer is not None: current_lora_layer_sd = lora_layer.state_dict() for lora_layer_matrix_name, lora_param in current_lora_layer_sd.items(): # The matrix name can either be "down" or "up". lora_state_dict[f"{name}.lora.{lora_layer_matrix_name}"] = lora_param return lora_state_dict def cast_training_params(model: Union[torch.nn.Module, List[torch.nn.Module]], dtype=torch.float32): """ Casts the training parameters of the model to the specified data type. Args: model: The PyTorch model whose parameters will be cast. dtype: The data type to which the model parameters will be cast. """ if not isinstance(model, list): model = [model] for m in model: for param in m.parameters(): # only upcast trainable parameters into fp32 if param.requires_grad: param.data = param.to(dtype) def _set_state_dict_into_text_encoder( lora_state_dict: Dict[str, torch.Tensor], prefix: str, text_encoder: torch.nn.Module ): """ Sets the `lora_state_dict` into `text_encoder` coming from `transformers`. Args: lora_state_dict: The state dictionary to be set. prefix: String identifier to retrieve the portion of the state dict that belongs to `text_encoder`. text_encoder: Where the `lora_state_dict` is to be set. """ text_encoder_state_dict = { f"{k.replace(prefix, '')}": v for k, v in lora_state_dict.items() if k.startswith(prefix) } text_encoder_state_dict = convert_state_dict_to_peft(convert_state_dict_to_diffusers(text_encoder_state_dict)) set_peft_model_state_dict(text_encoder, text_encoder_state_dict, adapter_name="default") def _collate_lora_metadata(modules_to_save: Dict[str, torch.nn.Module]) -> Dict[str, Any]: metadatas = {} for module_name, module in modules_to_save.items(): if module is not None: metadatas[f"{module_name}_lora_adapter_metadata"] = module.peft_config["default"].to_dict() return metadatas def compute_density_for_timestep_sampling( weighting_scheme: str, batch_size: int, logit_mean: float = None, logit_std: float = None, mode_scale: float = None, device: Union[torch.device, str] = "cpu", generator: Optional[torch.Generator] = None, ): """ Compute the density for sampling the timesteps when doing SD3 training. Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. SD3 paper reference: https://huggingface.co/papers/2403.03206v1. """ if weighting_scheme == "logit_normal": u = torch.normal(mean=logit_mean, std=logit_std, size=(batch_size,), device=device, generator=generator) u = torch.nn.functional.sigmoid(u) elif weighting_scheme == "mode": u = torch.rand(size=(batch_size,), device=device, generator=generator) u = 1 - u - mode_scale * (torch.cos(math.pi * u / 2) ** 2 - 1 + u) else: u = torch.rand(size=(batch_size,), device=device, generator=generator) return u def compute_loss_weighting_for_sd3(weighting_scheme: str, sigmas=None): """ Computes loss weighting scheme for SD3 training. Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. SD3 paper reference: https://huggingface.co/papers/2403.03206v1. """ if weighting_scheme == "sigma_sqrt": weighting = (sigmas**-2.0).float() elif weighting_scheme == "cosmap": bot = 1 - 2 * sigmas + 2 * sigmas**2 weighting = 2 / (math.pi * bot) else: weighting = torch.ones_like(sigmas) return weighting def free_memory(): """ Runs garbage collection. Then clears the cache of the available accelerator. """ gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() elif torch.backends.mps.is_available(): torch.mps.empty_cache() elif is_torch_npu_available(): torch_npu.npu.empty_cache() elif hasattr(torch, "xpu") and torch.xpu.is_available(): torch.xpu.empty_cache() @contextmanager def offload_models( *modules: Union[torch.nn.Module, DiffusionPipeline], device: Union[str, torch.device], offload: bool = True ): """ Context manager that, if offload=True, moves each module to `device` on enter, then moves it back to its original device on exit. Args: device (`str` or `torch.Device`): Device to move the `modules` to. offload (`bool`): Flag to enable offloading. """ if offload: is_model = not any(isinstance(m, DiffusionPipeline) for m in modules) # record where each module was if is_model: original_devices = [next(m.parameters()).device for m in modules] else: assert len(modules) == 1 # For DiffusionPipeline, wrap the device in a list to make it iterable original_devices = [modules[0].device] # move to target device for m in modules: m.to(device) try: yield finally: if offload: # move back to original devices for m, orig_dev in zip(modules, original_devices): m.to(orig_dev) def parse_buckets_string(buckets_str): """Parses a string defining buckets into a list of (height, width) tuples.""" if not buckets_str: raise ValueError("Bucket string cannot be empty.") bucket_pairs = buckets_str.strip().split(";") parsed_buckets = [] for pair_str in bucket_pairs: match = re.match(r"^\s*(\d+)\s*,\s*(\d+)\s*$", pair_str) if not match: raise ValueError(f"Invalid bucket format: '{pair_str}'. Expected 'height,width'.") try: height = int(match.group(1)) width = int(match.group(2)) if height <= 0 or width <= 0: raise ValueError("Bucket dimensions must be positive integers.") if height % 8 != 0 or width % 8 != 0: warnings.warn(f"Bucket dimension ({height},{width}) not divisible by 8. This might cause issues.") parsed_buckets.append((height, width)) except ValueError as e: raise ValueError(f"Invalid integer in bucket pair '{pair_str}': {e}") from e if not parsed_buckets: raise ValueError("No valid buckets found in the provided string.") return parsed_buckets def find_nearest_bucket(h, w, bucket_options): """Finds the closes bucket to the given height and width.""" min_metric = float("inf") best_bucket_idx = None for bucket_idx, (bucket_h, bucket_w) in enumerate(bucket_options): metric = abs(h * bucket_w - w * bucket_h) if metric <= min_metric: min_metric = metric best_bucket_idx = bucket_idx return best_bucket_idx # Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14 class EMAModel: """ Exponential Moving Average of models weights """ def __init__( self, parameters: Iterable[torch.nn.Parameter], decay: float = 0.9999, min_decay: float = 0.0, update_after_step: int = 0, use_ema_warmup: bool = False, inv_gamma: Union[float, int] = 1.0, power: Union[float, int] = 2 / 3, foreach: bool = False, model_cls: Optional[Any] = None, model_config: Dict[str, Any] = None, **kwargs, ): """ Args: parameters (Iterable[torch.nn.Parameter]): The parameters to track. decay (float): The decay factor for the exponential moving average. min_decay (float): The minimum decay factor for the exponential moving average. update_after_step (int): The number of steps to wait before starting to update the EMA weights. use_ema_warmup (bool): Whether to use EMA warmup. inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1. Only used if `use_ema_warmup` is True. power (float): Exponential factor of EMA warmup. Default: 2/3. Only used if `use_ema_warmup` is True. foreach (bool): Use torch._foreach functions for updating shadow parameters. Should be faster. device (Optional[Union[str, torch.device]]): The device to store the EMA weights on. If None, the EMA weights will be stored on CPU. @crowsonkb's notes on EMA Warmup: If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are good values for models you plan to train for a million or more steps (reaches decay factor 0.999 at 31.6K steps, 0.9999 at 1M steps), gamma=1, power=3/4 for models you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at 215.4k steps). """ if isinstance(parameters, torch.nn.Module): deprecation_message = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage`", "1.0.0", deprecation_message, standard_warn=False, ) parameters = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility use_ema_warmup = True if kwargs.get("max_value", None) is not None: deprecation_message = "The `max_value` argument is deprecated. Please use `decay` instead." deprecate("max_value", "1.0.0", deprecation_message, standard_warn=False) decay = kwargs["max_value"] if kwargs.get("min_value", None) is not None: deprecation_message = "The `min_value` argument is deprecated. Please use `min_decay` instead." deprecate("min_value", "1.0.0", deprecation_message, standard_warn=False) min_decay = kwargs["min_value"] parameters = list(parameters) self.shadow_params = [p.clone().detach() for p in parameters] if kwargs.get("device", None) is not None: deprecation_message = "The `device` argument is deprecated. Please use `to` instead." deprecate("device", "1.0.0", deprecation_message, standard_warn=False) self.to(device=kwargs["device"]) self.temp_stored_params = None self.decay = decay self.min_decay = min_decay self.update_after_step = update_after_step self.use_ema_warmup = use_ema_warmup self.inv_gamma = inv_gamma self.power = power self.optimization_step = 0 self.cur_decay_value = None # set in `step()` self.foreach = foreach self.model_cls = model_cls self.model_config = model_config @classmethod def from_pretrained(cls, path, model_cls, foreach=False) -> "EMAModel": _, ema_kwargs = model_cls.from_config(path, return_unused_kwargs=True) model = model_cls.from_pretrained(path) ema_model = cls(model.parameters(), model_cls=model_cls, model_config=model.config, foreach=foreach) ema_model.load_state_dict(ema_kwargs) return ema_model def save_pretrained(self, path): if self.model_cls is None: raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__.") if self.model_config is None: raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__.") model = self.model_cls.from_config(self.model_config) state_dict = self.state_dict() state_dict.pop("shadow_params", None) model.register_to_config(**state_dict) self.copy_to(model.parameters()) model.save_pretrained(path) def get_decay(self, optimization_step: int) -> float: """ Compute the decay factor for the exponential moving average. """ step = max(0, optimization_step - self.update_after_step - 1) if step <= 0: return 0.0 if self.use_ema_warmup: cur_decay_value = 1 - (1 + step / self.inv_gamma) ** -self.power else: cur_decay_value = (1 + step) / (10 + step) cur_decay_value = min(cur_decay_value, self.decay) # make sure decay is not smaller than min_decay cur_decay_value = max(cur_decay_value, self.min_decay) return cur_decay_value @torch.no_grad() def step(self, parameters: Iterable[torch.nn.Parameter]): if isinstance(parameters, torch.nn.Module): deprecation_message = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage.step`", "1.0.0", deprecation_message, standard_warn=False, ) parameters = parameters.parameters() parameters = list(parameters) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. decay = self.get_decay(self.optimization_step) self.cur_decay_value = decay one_minus_decay = 1 - decay context_manager = contextlib.nullcontext() if self.foreach: if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): context_manager = deepspeed.zero.GatheredParameters(parameters, modifier_rank=None) with context_manager: params_grad = [param for param in parameters if param.requires_grad] s_params_grad = [ s_param for s_param, param in zip(self.shadow_params, parameters) if param.requires_grad ] if len(params_grad) < len(parameters): torch._foreach_copy_( [s_param for s_param, param in zip(self.shadow_params, parameters) if not param.requires_grad], [param for param in parameters if not param.requires_grad], non_blocking=True, ) torch._foreach_sub_( s_params_grad, torch._foreach_sub(s_params_grad, params_grad), alpha=one_minus_decay ) else: for s_param, param in zip(self.shadow_params, parameters): if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): context_manager = deepspeed.zero.GatheredParameters(param, modifier_rank=None) with context_manager: if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param)) else: s_param.copy_(param) def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: """ Copy current averaged parameters into given collection of parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored moving averages. If `None`, the parameters with which this `ExponentialMovingAverage` was initialized will be used. """ parameters = list(parameters) if self.foreach: torch._foreach_copy_( [param.data for param in parameters], [s_param.to(param.device).data for s_param, param in zip(self.shadow_params, parameters)], ) else: for s_param, param in zip(self.shadow_params, parameters): param.data.copy_(s_param.to(param.device).data) def pin_memory(self) -> None: r""" Move internal buffers of the ExponentialMovingAverage to pinned memory. Useful for non-blocking transfers for offloading EMA params to the host. """ self.shadow_params = [p.pin_memory() for p in self.shadow_params] def to(self, device=None, dtype=None, non_blocking=False) -> None: r""" Move internal buffers of the ExponentialMovingAverage to `device`. Args: device: like `device` argument to `torch.Tensor.to` """ # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype, non_blocking=non_blocking) if p.is_floating_point() else p.to(device=device, non_blocking=non_blocking) for p in self.shadow_params ] def state_dict(self) -> dict: r""" Returns the state of the ExponentialMovingAverage as a dict. This method is used by accelerate during checkpointing to save the ema state dict. """ # Following PyTorch conventions, references to tensors are returned: # "returns a reference to the state and not its copy!" - # https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def store(self, parameters: Iterable[torch.nn.Parameter]) -> None: r""" Saves the current parameters for restoring later. Args: parameters: Iterable of `torch.nn.Parameter`. The parameters to be temporarily stored. """ self.temp_stored_params = [param.detach().cpu().clone() for param in parameters] def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None: r""" Restore the parameters stored with the `store` method. Useful to validate the model with EMA parameters without: affecting the original optimization process. Store the parameters before the `copy_to()` method. After validation (or model saving), use this to restore the former parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored parameters. If `None`, the parameters with which this `ExponentialMovingAverage` was initialized will be used. """ if self.temp_stored_params is None: raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights to `restore()`") if self.foreach: torch._foreach_copy_( [param.data for param in parameters], [c_param.data for c_param in self.temp_stored_params] ) else: for c_param, param in zip(self.temp_stored_params, parameters): param.data.copy_(c_param.data) # Better memory-wise. self.temp_stored_params = None def load_state_dict(self, state_dict: dict) -> None: r""" Loads the ExponentialMovingAverage state. This method is used by accelerate during checkpointing to save the ema state dict. Args: state_dict (dict): EMA state. Should be an object returned from a call to :meth:`state_dict`. """ # deepcopy, to be consistent with module API state_dict = copy.deepcopy(state_dict) self.decay = state_dict.get("decay", self.decay) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("Decay must be between 0 and 1") self.min_decay = state_dict.get("min_decay", self.min_decay) if not isinstance(self.min_decay, float): raise ValueError("Invalid min_decay") self.optimization_step = state_dict.get("optimization_step", self.optimization_step) if not isinstance(self.optimization_step, int): raise ValueError("Invalid optimization_step") self.update_after_step = state_dict.get("update_after_step", self.update_after_step) if not isinstance(self.update_after_step, int): raise ValueError("Invalid update_after_step") self.use_ema_warmup = state_dict.get("use_ema_warmup", self.use_ema_warmup) if not isinstance(self.use_ema_warmup, bool): raise ValueError("Invalid use_ema_warmup") self.inv_gamma = state_dict.get("inv_gamma", self.inv_gamma) if not isinstance(self.inv_gamma, (float, int)): raise ValueError("Invalid inv_gamma") self.power = state_dict.get("power", self.power) if not isinstance(self.power, (float, int)): raise ValueError("Invalid power") shadow_params = state_dict.get("shadow_params", None) if shadow_params is not None: self.shadow_params = shadow_params if not isinstance(self.shadow_params, list): raise ValueError("shadow_params must be a list") if not all(isinstance(p, torch.Tensor) for p in self.shadow_params): raise ValueError("shadow_params must all be Tensors")
diffusers/src/diffusers/training_utils.py/0
{ "file_path": "diffusers/src/diffusers/training_utils.py", "repo_id": "diffusers", "token_count": 12459 }
170
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class CosineDPMSolverMultistepScheduler(metaclass=DummyObject): _backends = ["torch", "torchsde"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "torchsde"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "torchsde"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "torchsde"]) class DPMSolverSDEScheduler(metaclass=DummyObject): _backends = ["torch", "torchsde"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "torchsde"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "torchsde"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "torchsde"])
diffusers/src/diffusers/utils/dummy_torch_and_torchsde_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_torch_and_torchsde_objects.py", "repo_id": "diffusers", "token_count": 416 }
171
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PEFT utilities: Utilities related to peft library """ import collections import importlib from typing import Optional from packaging import version from . import logging from .import_utils import is_peft_available, is_peft_version, is_torch_available from .torch_utils import empty_device_cache logger = logging.get_logger(__name__) if is_torch_available(): import torch def recurse_remove_peft_layers(model): r""" Recursively replace all instances of `LoraLayer` with corresponding new layers in `model`. """ from peft.tuners.tuners_utils import BaseTunerLayer has_base_layer_pattern = False for module in model.modules(): if isinstance(module, BaseTunerLayer): has_base_layer_pattern = hasattr(module, "base_layer") break if has_base_layer_pattern: from peft.utils import _get_submodules key_list = [key for key, _ in model.named_modules() if "lora" not in key] for key in key_list: try: parent, target, target_name = _get_submodules(model, key) except AttributeError: continue if hasattr(target, "base_layer"): setattr(parent, target_name, target.get_base_layer()) else: # This is for backwards compatibility with PEFT <= 0.6.2. # TODO can be removed once that PEFT version is no longer supported. from peft.tuners.lora import LoraLayer for name, module in model.named_children(): if len(list(module.children())) > 0: ## compound module, go inside it recurse_remove_peft_layers(module) module_replaced = False if isinstance(module, LoraLayer) and isinstance(module, torch.nn.Linear): new_module = torch.nn.Linear( module.in_features, module.out_features, bias=module.bias is not None, ).to(module.weight.device) new_module.weight = module.weight if module.bias is not None: new_module.bias = module.bias module_replaced = True elif isinstance(module, LoraLayer) and isinstance(module, torch.nn.Conv2d): new_module = torch.nn.Conv2d( module.in_channels, module.out_channels, module.kernel_size, module.stride, module.padding, module.dilation, module.groups, ).to(module.weight.device) new_module.weight = module.weight if module.bias is not None: new_module.bias = module.bias module_replaced = True if module_replaced: setattr(model, name, new_module) del module empty_device_cache() return model def scale_lora_layers(model, weight): """ Adjust the weightage given to the LoRA layers of the model. Args: model (`torch.nn.Module`): The model to scale. weight (`float`): The weight to be given to the LoRA layers. """ from peft.tuners.tuners_utils import BaseTunerLayer if weight == 1.0: return for module in model.modules(): if isinstance(module, BaseTunerLayer): module.scale_layer(weight) def unscale_lora_layers(model, weight: Optional[float] = None): """ Removes the previously passed weight given to the LoRA layers of the model. Args: model (`torch.nn.Module`): The model to scale. weight (`float`, *optional*): The weight to be given to the LoRA layers. If no scale is passed the scale of the lora layer will be re-initialized to the correct value. If 0.0 is passed, we will re-initialize the scale with the correct value. """ from peft.tuners.tuners_utils import BaseTunerLayer if weight is None or weight == 1.0: return for module in model.modules(): if isinstance(module, BaseTunerLayer): if weight != 0: module.unscale_layer(weight) else: for adapter_name in module.active_adapters: # if weight == 0 unscale should re-set the scale to the original value. module.set_scale(adapter_name, 1.0) def get_peft_kwargs( rank_dict, network_alpha_dict, peft_state_dict, is_unet=True, model_state_dict=None, adapter_name=None ): rank_pattern = {} alpha_pattern = {} r = lora_alpha = list(rank_dict.values())[0] if len(set(rank_dict.values())) > 1: # get the rank occurring the most number of times r = collections.Counter(rank_dict.values()).most_common()[0][0] # for modules with rank different from the most occurring rank, add it to the `rank_pattern` rank_pattern = dict(filter(lambda x: x[1] != r, rank_dict.items())) rank_pattern = {k.split(".lora_B.")[0]: v for k, v in rank_pattern.items()} if network_alpha_dict is not None and len(network_alpha_dict) > 0: if len(set(network_alpha_dict.values())) > 1: # get the alpha occurring the most number of times lora_alpha = collections.Counter(network_alpha_dict.values()).most_common()[0][0] # for modules with alpha different from the most occurring alpha, add it to the `alpha_pattern` alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, network_alpha_dict.items())) if is_unet: alpha_pattern = { ".".join(k.split(".lora_A.")[0].split(".")).replace(".alpha", ""): v for k, v in alpha_pattern.items() } else: alpha_pattern = {".".join(k.split(".down.")[0].split(".")[:-1]): v for k, v in alpha_pattern.items()} else: lora_alpha = set(network_alpha_dict.values()).pop() target_modules = list({name.split(".lora")[0] for name in peft_state_dict.keys()}) use_dora = any("lora_magnitude_vector" in k for k in peft_state_dict) # for now we know that the "bias" keys are only associated with `lora_B`. lora_bias = any("lora_B" in k and k.endswith(".bias") for k in peft_state_dict) lora_config_kwargs = { "r": r, "lora_alpha": lora_alpha, "rank_pattern": rank_pattern, "alpha_pattern": alpha_pattern, "target_modules": target_modules, "use_dora": use_dora, "lora_bias": lora_bias, } return lora_config_kwargs def get_adapter_name(model): from peft.tuners.tuners_utils import BaseTunerLayer for module in model.modules(): if isinstance(module, BaseTunerLayer): return f"default_{len(module.r)}" return "default_0" def set_adapter_layers(model, enabled=True): from peft.tuners.tuners_utils import BaseTunerLayer for module in model.modules(): if isinstance(module, BaseTunerLayer): # The recent version of PEFT needs to call `enable_adapters` instead if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=enabled) else: module.disable_adapters = not enabled def delete_adapter_layers(model, adapter_name): from peft.tuners.tuners_utils import BaseTunerLayer for module in model.modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, "delete_adapter"): module.delete_adapter(adapter_name) else: raise ValueError( "The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1" ) # For transformers integration - we need to pop the adapter from the config if getattr(model, "_hf_peft_config_loaded", False) and hasattr(model, "peft_config"): model.peft_config.pop(adapter_name, None) # In case all adapters are deleted, we need to delete the config # and make sure to set the flag to False if len(model.peft_config) == 0: del model.peft_config model._hf_peft_config_loaded = None def set_weights_and_activate_adapters(model, adapter_names, weights): from peft.tuners.tuners_utils import BaseTunerLayer def get_module_weight(weight_for_adapter, module_name): if not isinstance(weight_for_adapter, dict): # If weight_for_adapter is a single number, always return it. return weight_for_adapter for layer_name, weight_ in weight_for_adapter.items(): if layer_name in module_name: return weight_ parts = module_name.split(".") # e.g. key = "down_blocks.1.attentions.0" key = f"{parts[0]}.{parts[1]}.attentions.{parts[3]}" block_weight = weight_for_adapter.get(key, 1.0) return block_weight for module_name, module in model.named_modules(): if isinstance(module, BaseTunerLayer): # For backward compatibility with previous PEFT versions, set multiple active adapters if hasattr(module, "set_adapter"): module.set_adapter(adapter_names) else: module.active_adapter = adapter_names # Set the scaling weight for each adapter for this module for adapter_name, weight in zip(adapter_names, weights): module.set_scale(adapter_name, get_module_weight(weight, module_name)) def check_peft_version(min_version: str) -> None: r""" Checks if the version of PEFT is compatible. Args: version (`str`): The version of PEFT to check against. """ if not is_peft_available(): raise ValueError("PEFT is not installed. Please install it with `pip install peft`") is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) > version.parse(min_version) if not is_peft_version_compatible: raise ValueError( f"The version of PEFT you are using is not compatible, please use a version that is greater" f" than {min_version}" ) def _create_lora_config( state_dict, network_alphas, metadata, rank_pattern_dict, is_unet=True, model_state_dict=None, adapter_name=None ): from peft import LoraConfig if metadata is not None: lora_config_kwargs = metadata else: lora_config_kwargs = get_peft_kwargs( rank_pattern_dict, network_alpha_dict=network_alphas, peft_state_dict=state_dict, is_unet=is_unet, model_state_dict=model_state_dict, adapter_name=adapter_name, ) _maybe_raise_error_for_ambiguous_keys(lora_config_kwargs) # Version checks for DoRA and lora_bias if "use_dora" in lora_config_kwargs and lora_config_kwargs["use_dora"]: if is_peft_version("<", "0.9.0"): raise ValueError("DoRA requires PEFT >= 0.9.0. Please upgrade.") if "lora_bias" in lora_config_kwargs and lora_config_kwargs["lora_bias"]: if is_peft_version("<=", "0.13.2"): raise ValueError("lora_bias requires PEFT >= 0.14.0. Please upgrade.") try: return LoraConfig(**lora_config_kwargs) except TypeError as e: raise TypeError("`LoraConfig` class could not be instantiated.") from e def _maybe_raise_error_for_ambiguous_keys(config): rank_pattern = config["rank_pattern"].copy() target_modules = config["target_modules"] for key in list(rank_pattern.keys()): # try to detect ambiguity # `target_modules` can also be a str, in which case this loop would loop # over the chars of the str. The technically correct way to match LoRA keys # in PEFT is to use LoraModel._check_target_module_exists (lora_config, key). # But this cuts it for now. exact_matches = [mod for mod in target_modules if mod == key] substring_matches = [mod for mod in target_modules if key in mod and mod != key] if exact_matches and substring_matches: if is_peft_version("<", "0.14.1"): raise ValueError( "There are ambiguous keys present in this LoRA. To load it, please update your `peft` installation - `pip install -U peft`." ) def _maybe_warn_for_unhandled_keys(incompatible_keys, adapter_name): warn_msg = "" if incompatible_keys is not None: # Check only for unexpected keys. unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) if unexpected_keys: lora_unexpected_keys = [k for k in unexpected_keys if "lora_" in k and adapter_name in k] if lora_unexpected_keys: warn_msg = ( f"Loading adapter weights from state_dict led to unexpected keys found in the model:" f" {', '.join(lora_unexpected_keys)}. " ) # Filter missing keys specific to the current adapter. missing_keys = getattr(incompatible_keys, "missing_keys", None) if missing_keys: lora_missing_keys = [k for k in missing_keys if "lora_" in k and adapter_name in k] if lora_missing_keys: warn_msg += ( f"Loading adapter weights from state_dict led to missing keys in the model:" f" {', '.join(lora_missing_keys)}." ) if warn_msg: logger.warning(warn_msg)
diffusers/src/diffusers/utils/peft_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/peft_utils.py", "repo_id": "diffusers", "token_count": 6167 }
172
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from diffusers.hooks import HookRegistry, ModelHook from diffusers.training_utils import free_memory from diffusers.utils.logging import get_logger from diffusers.utils.testing_utils import CaptureLogger, torch_device logger = get_logger(__name__) # pylint: disable=invalid-name class DummyBlock(torch.nn.Module): def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None: super().__init__() self.proj_in = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.proj_out = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj_in(x) x = self.activation(x) x = self.proj_out(x) return x class DummyModel(torch.nn.Module): def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None: super().__init__() self.linear_1 = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.blocks = torch.nn.ModuleList( [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] ) self.linear_2 = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.linear_1(x) x = self.activation(x) for block in self.blocks: x = block(x) x = self.linear_2(x) return x class AddHook(ModelHook): def __init__(self, value: int): super().__init__() self.value = value def pre_forward(self, module: torch.nn.Module, *args, **kwargs): logger.debug("AddHook pre_forward") args = ((x + self.value) if torch.is_tensor(x) else x for x in args) return args, kwargs def post_forward(self, module, output): logger.debug("AddHook post_forward") return output class MultiplyHook(ModelHook): def __init__(self, value: int): super().__init__() self.value = value def pre_forward(self, module, *args, **kwargs): logger.debug("MultiplyHook pre_forward") args = ((x * self.value) if torch.is_tensor(x) else x for x in args) return args, kwargs def post_forward(self, module, output): logger.debug("MultiplyHook post_forward") return output def __repr__(self): return f"MultiplyHook(value={self.value})" class StatefulAddHook(ModelHook): _is_stateful = True def __init__(self, value: int): super().__init__() self.value = value self.increment = 0 def pre_forward(self, module, *args, **kwargs): logger.debug("StatefulAddHook pre_forward") add_value = self.value + self.increment self.increment += 1 args = ((x + add_value) if torch.is_tensor(x) else x for x in args) return args, kwargs def reset_state(self, module): self.increment = 0 class SkipLayerHook(ModelHook): def __init__(self, skip_layer: bool): super().__init__() self.skip_layer = skip_layer def pre_forward(self, module, *args, **kwargs): logger.debug("SkipLayerHook pre_forward") return args, kwargs def new_forward(self, module, *args, **kwargs): logger.debug("SkipLayerHook new_forward") if self.skip_layer: return args[0] return self.fn_ref.original_forward(*args, **kwargs) def post_forward(self, module, output): logger.debug("SkipLayerHook post_forward") return output class HookTests(unittest.TestCase): in_features = 4 hidden_features = 8 out_features = 4 num_layers = 2 def setUp(self): params = self.get_module_parameters() self.model = DummyModel(**params) self.model.to(torch_device) def tearDown(self): super().tearDown() del self.model gc.collect() free_memory() def get_module_parameters(self): return { "in_features": self.in_features, "hidden_features": self.hidden_features, "out_features": self.out_features, "num_layers": self.num_layers, } def get_generator(self): return torch.manual_seed(0) def test_hook_registry(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(AddHook(1), "add_hook") registry.register_hook(MultiplyHook(2), "multiply_hook") registry_repr = repr(registry) expected_repr = "HookRegistry(\n (0) add_hook - AddHook\n (1) multiply_hook - MultiplyHook(value=2)\n)" self.assertEqual(len(registry.hooks), 2) self.assertEqual(registry._hook_order, ["add_hook", "multiply_hook"]) self.assertEqual(registry_repr, expected_repr) registry.remove_hook("add_hook") self.assertEqual(len(registry.hooks), 1) self.assertEqual(registry._hook_order, ["multiply_hook"]) def test_stateful_hook(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(StatefulAddHook(1), "stateful_add_hook") self.assertEqual(registry.hooks["stateful_add_hook"].increment, 0) input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) num_repeats = 3 for i in range(num_repeats): result = self.model(input) if i == 0: output1 = result self.assertEqual(registry.get_hook("stateful_add_hook").increment, num_repeats) registry.reset_stateful_hooks() output2 = self.model(input) self.assertEqual(registry.get_hook("stateful_add_hook").increment, 1) self.assertTrue(torch.allclose(output1, output2)) def test_inference(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(AddHook(1), "add_hook") registry.register_hook(MultiplyHook(2), "multiply_hook") input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) output1 = self.model(input).mean().detach().cpu().item() registry.remove_hook("multiply_hook") new_input = input * 2 output2 = self.model(new_input).mean().detach().cpu().item() registry.remove_hook("add_hook") new_input = input * 2 + 1 output3 = self.model(new_input).mean().detach().cpu().item() self.assertAlmostEqual(output1, output2, places=5) self.assertAlmostEqual(output1, output3, places=5) def test_skip_layer_hook(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook") input = torch.zeros(1, 4, device=torch_device) output = self.model(input).mean().detach().cpu().item() self.assertEqual(output, 0.0) registry.remove_hook("skip_layer_hook") registry.register_hook(SkipLayerHook(skip_layer=False), "skip_layer_hook") output = self.model(input).mean().detach().cpu().item() self.assertNotEqual(output, 0.0) def test_skip_layer_internal_block(self): registry = HookRegistry.check_if_exists_or_initialize(self.model.linear_1) input = torch.zeros(1, 4, device=torch_device) registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook") with self.assertRaises(RuntimeError) as cm: self.model(input).mean().detach().cpu().item() self.assertIn("mat1 and mat2 shapes cannot be multiplied", str(cm.exception)) registry.remove_hook("skip_layer_hook") output = self.model(input).mean().detach().cpu().item() self.assertNotEqual(output, 0.0) registry = HookRegistry.check_if_exists_or_initialize(self.model.blocks[1]) registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook") output = self.model(input).mean().detach().cpu().item() self.assertNotEqual(output, 0.0) def test_invocation_order_stateful_first(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(StatefulAddHook(1), "add_hook") registry.register_hook(AddHook(2), "add_hook_2") registry.register_hook(MultiplyHook(3), "multiply_hook") input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) logger = get_logger(__name__) logger.setLevel("DEBUG") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ( "MultiplyHook pre_forward\n" "AddHook pre_forward\n" "StatefulAddHook pre_forward\n" "AddHook post_forward\n" "MultiplyHook post_forward\n" ) .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) registry.remove_hook("add_hook") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ("MultiplyHook pre_forward\nAddHook pre_forward\nAddHook post_forward\nMultiplyHook post_forward\n") .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) def test_invocation_order_stateful_middle(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(AddHook(2), "add_hook") registry.register_hook(StatefulAddHook(1), "add_hook_2") registry.register_hook(MultiplyHook(3), "multiply_hook") input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) logger = get_logger(__name__) logger.setLevel("DEBUG") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ( "MultiplyHook pre_forward\n" "StatefulAddHook pre_forward\n" "AddHook pre_forward\n" "AddHook post_forward\n" "MultiplyHook post_forward\n" ) .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) registry.remove_hook("add_hook") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ("MultiplyHook pre_forward\nStatefulAddHook pre_forward\nMultiplyHook post_forward\n") .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) registry.remove_hook("add_hook_2") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ("MultiplyHook pre_forward\nMultiplyHook post_forward\n").replace(" ", "").replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) def test_invocation_order_stateful_last(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(AddHook(1), "add_hook") registry.register_hook(MultiplyHook(2), "multiply_hook") registry.register_hook(StatefulAddHook(3), "add_hook_2") input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) logger = get_logger(__name__) logger.setLevel("DEBUG") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ( "StatefulAddHook pre_forward\n" "MultiplyHook pre_forward\n" "AddHook pre_forward\n" "AddHook post_forward\n" "MultiplyHook post_forward\n" ) .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) registry.remove_hook("add_hook") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ("StatefulAddHook pre_forward\nMultiplyHook pre_forward\nMultiplyHook post_forward\n") .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log)
diffusers/tests/hooks/test_hooks.py/0
{ "file_path": "diffusers/tests/hooks/test_hooks.py", "repo_id": "diffusers", "token_count": 6091 }
173
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import re import tempfile import unittest from itertools import product import numpy as np import pytest import torch from parameterized import parameterized from diffusers import ( AutoencoderKL, DDIMScheduler, LCMScheduler, UNet2DConditionModel, ) from diffusers.utils import logging from diffusers.utils.import_utils import is_peft_available from diffusers.utils.testing_utils import ( CaptureLogger, check_if_dicts_are_equal, floats_tensor, is_torch_version, require_peft_backend, require_peft_version_greater, require_torch_accelerator, require_transformers_version_greater, skip_mps, torch_device, ) if is_peft_available(): from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import get_peft_model_state_dict def state_dicts_almost_equal(sd1, sd2): sd1 = dict(sorted(sd1.items())) sd2 = dict(sorted(sd2.items())) models_are_equal = True for ten1, ten2 in zip(sd1.values(), sd2.values()): if (ten1 - ten2).abs().max() > 1e-3: models_are_equal = False return models_are_equal def check_if_lora_correctly_set(model) -> bool: """ Checks if the LoRA layers are correctly set with peft """ for module in model.modules(): if isinstance(module, BaseTunerLayer): return True return False def check_module_lora_metadata(parsed_metadata: dict, lora_metadatas: dict, module_key: str): extracted = { k.removeprefix(f"{module_key}."): v for k, v in parsed_metadata.items() if k.startswith(f"{module_key}.") } check_if_dicts_are_equal(extracted, lora_metadatas[f"{module_key}_lora_adapter_metadata"]) def initialize_dummy_state_dict(state_dict): if not all(v.device.type == "meta" for _, v in state_dict.items()): raise ValueError("`state_dict` has non-meta values.") return {k: torch.randn(v.shape, device=torch_device, dtype=v.dtype) for k, v in state_dict.items()} POSSIBLE_ATTENTION_KWARGS_NAMES = ["cross_attention_kwargs", "joint_attention_kwargs", "attention_kwargs"] def determine_attention_kwargs_name(pipeline_class): call_signature_keys = inspect.signature(pipeline_class.__call__).parameters.keys() # TODO(diffusers): Discuss a common naming convention across library for 1.0.0 release for possible_attention_kwargs in POSSIBLE_ATTENTION_KWARGS_NAMES: if possible_attention_kwargs in call_signature_keys: attention_kwargs_name = possible_attention_kwargs break assert attention_kwargs_name is not None return attention_kwargs_name @require_peft_backend class PeftLoraLoaderMixinTests: pipeline_class = None scheduler_cls = None scheduler_kwargs = None scheduler_classes = [DDIMScheduler, LCMScheduler] has_two_text_encoders = False has_three_text_encoders = False text_encoder_cls, text_encoder_id, text_encoder_subfolder = None, None, "" text_encoder_2_cls, text_encoder_2_id, text_encoder_2_subfolder = None, None, "" text_encoder_3_cls, text_encoder_3_id, text_encoder_3_subfolder = None, None, "" tokenizer_cls, tokenizer_id, tokenizer_subfolder = None, None, "" tokenizer_2_cls, tokenizer_2_id, tokenizer_2_subfolder = None, None, "" tokenizer_3_cls, tokenizer_3_id, tokenizer_3_subfolder = None, None, "" unet_kwargs = None transformer_cls = None transformer_kwargs = None vae_cls = AutoencoderKL vae_kwargs = None text_encoder_target_modules = ["q_proj", "k_proj", "v_proj", "out_proj"] denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0"] def get_dummy_components(self, scheduler_cls=None, use_dora=False, lora_alpha=None): if self.unet_kwargs and self.transformer_kwargs: raise ValueError("Both `unet_kwargs` and `transformer_kwargs` cannot be specified.") if self.has_two_text_encoders and self.has_three_text_encoders: raise ValueError("Both `has_two_text_encoders` and `has_three_text_encoders` cannot be True.") scheduler_cls = self.scheduler_cls if scheduler_cls is None else scheduler_cls rank = 4 lora_alpha = rank if lora_alpha is None else lora_alpha torch.manual_seed(0) if self.unet_kwargs is not None: unet = UNet2DConditionModel(**self.unet_kwargs) else: transformer = self.transformer_cls(**self.transformer_kwargs) scheduler = scheduler_cls(**self.scheduler_kwargs) torch.manual_seed(0) vae = self.vae_cls(**self.vae_kwargs) text_encoder = self.text_encoder_cls.from_pretrained( self.text_encoder_id, subfolder=self.text_encoder_subfolder ) tokenizer = self.tokenizer_cls.from_pretrained(self.tokenizer_id, subfolder=self.tokenizer_subfolder) if self.text_encoder_2_cls is not None: text_encoder_2 = self.text_encoder_2_cls.from_pretrained( self.text_encoder_2_id, subfolder=self.text_encoder_2_subfolder ) tokenizer_2 = self.tokenizer_2_cls.from_pretrained( self.tokenizer_2_id, subfolder=self.tokenizer_2_subfolder ) if self.text_encoder_3_cls is not None: text_encoder_3 = self.text_encoder_3_cls.from_pretrained( self.text_encoder_3_id, subfolder=self.text_encoder_3_subfolder ) tokenizer_3 = self.tokenizer_3_cls.from_pretrained( self.tokenizer_3_id, subfolder=self.tokenizer_3_subfolder ) text_lora_config = LoraConfig( r=rank, lora_alpha=lora_alpha, target_modules=self.text_encoder_target_modules, init_lora_weights=False, use_dora=use_dora, ) denoiser_lora_config = LoraConfig( r=rank, lora_alpha=lora_alpha, target_modules=self.denoiser_target_modules, init_lora_weights=False, use_dora=use_dora, ) pipeline_components = { "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } # Denoiser if self.unet_kwargs is not None: pipeline_components.update({"unet": unet}) elif self.transformer_kwargs is not None: pipeline_components.update({"transformer": transformer}) # Remaining text encoders. if self.text_encoder_2_cls is not None: pipeline_components.update({"tokenizer_2": tokenizer_2, "text_encoder_2": text_encoder_2}) if self.text_encoder_3_cls is not None: pipeline_components.update({"tokenizer_3": tokenizer_3, "text_encoder_3": text_encoder_3}) # Remaining stuff init_params = inspect.signature(self.pipeline_class.__init__).parameters if "safety_checker" in init_params: pipeline_components.update({"safety_checker": None}) if "feature_extractor" in init_params: pipeline_components.update({"feature_extractor": None}) if "image_encoder" in init_params: pipeline_components.update({"image_encoder": None}) return pipeline_components, text_lora_config, denoiser_lora_config @property def output_shape(self): raise NotImplementedError def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 10 num_channels = 4 sizes = (32, 32) generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "A painting of a squirrel eating a burger", "num_inference_steps": 5, "guidance_scale": 6.0, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs # Copied from: https://colab.research.google.com/gist/sayakpaul/df2ef6e1ae6d8c10a49d859883b10860/scratchpad.ipynb def get_dummy_tokens(self): max_seq_length = 77 inputs = torch.randint(2, 56, size=(1, max_seq_length), generator=torch.manual_seed(0)) prepared_inputs = {} prepared_inputs["input_ids"] = inputs return prepared_inputs def _get_lora_state_dicts(self, modules_to_save): state_dicts = {} for module_name, module in modules_to_save.items(): if module is not None: state_dicts[f"{module_name}_lora_layers"] = get_peft_model_state_dict(module) return state_dicts def _get_lora_adapter_metadata(self, modules_to_save): metadatas = {} for module_name, module in modules_to_save.items(): if module is not None: metadatas[f"{module_name}_lora_adapter_metadata"] = module.peft_config["default"].to_dict() return metadatas def _get_modules_to_save(self, pipe, has_denoiser=False): modules_to_save = {} lora_loadable_modules = self.pipeline_class._lora_loadable_modules if ( "text_encoder" in lora_loadable_modules and hasattr(pipe, "text_encoder") and getattr(pipe.text_encoder, "peft_config", None) is not None ): modules_to_save["text_encoder"] = pipe.text_encoder if ( "text_encoder_2" in lora_loadable_modules and hasattr(pipe, "text_encoder_2") and getattr(pipe.text_encoder_2, "peft_config", None) is not None ): modules_to_save["text_encoder_2"] = pipe.text_encoder_2 if has_denoiser: if "unet" in lora_loadable_modules and hasattr(pipe, "unet"): modules_to_save["unet"] = pipe.unet if "transformer" in lora_loadable_modules and hasattr(pipe, "transformer"): modules_to_save["transformer"] = pipe.transformer return modules_to_save def add_adapters_to_pipeline(self, pipe, text_lora_config=None, denoiser_lora_config=None, adapter_name="default"): if text_lora_config is not None: if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, adapter_name=adapter_name) self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) if denoiser_lora_config is not None: denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, adapter_name=adapter_name) self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") else: denoiser = None if text_lora_config is not None and self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder_2.add_adapter(text_lora_config, adapter_name=adapter_name) self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) return pipe, denoiser def test_simple_inference(self): """ Tests a simple inference and makes sure it works as expected """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs() output_no_lora = pipe(**inputs)[0] self.assertTrue(output_no_lora.shape == self.output_shape) def test_simple_inference_with_text_lora(self): """ Tests a simple inference with lora attached on the text encoder and makes sure it works as expected """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output" ) @require_peft_version_greater("0.13.1") def test_low_cpu_mem_usage_with_injection(self): """Tests if we can inject LoRA state dict with low_cpu_mem_usage.""" for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) if "text_encoder" in self.pipeline_class._lora_loadable_modules: inject_adapter_in_model(text_lora_config, pipe.text_encoder, low_cpu_mem_usage=True) self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder." ) self.assertTrue( "meta" in {p.device.type for p in pipe.text_encoder.parameters()}, "The LoRA params should be on 'meta' device.", ) te_state_dict = initialize_dummy_state_dict(get_peft_model_state_dict(pipe.text_encoder)) set_peft_model_state_dict(pipe.text_encoder, te_state_dict, low_cpu_mem_usage=True) self.assertTrue( "meta" not in {p.device.type for p in pipe.text_encoder.parameters()}, "No param should be on 'meta' device.", ) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet inject_adapter_in_model(denoiser_lora_config, denoiser, low_cpu_mem_usage=True) self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") self.assertTrue( "meta" in {p.device.type for p in denoiser.parameters()}, "The LoRA params should be on 'meta' device." ) denoiser_state_dict = initialize_dummy_state_dict(get_peft_model_state_dict(denoiser)) set_peft_model_state_dict(denoiser, denoiser_state_dict, low_cpu_mem_usage=True) self.assertTrue( "meta" not in {p.device.type for p in denoiser.parameters()}, "No param should be on 'meta' device." ) if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: inject_adapter_in_model(text_lora_config, pipe.text_encoder_2, low_cpu_mem_usage=True) self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) self.assertTrue( "meta" in {p.device.type for p in pipe.text_encoder_2.parameters()}, "The LoRA params should be on 'meta' device.", ) te2_state_dict = initialize_dummy_state_dict(get_peft_model_state_dict(pipe.text_encoder_2)) set_peft_model_state_dict(pipe.text_encoder_2, te2_state_dict, low_cpu_mem_usage=True) self.assertTrue( "meta" not in {p.device.type for p in pipe.text_encoder_2.parameters()}, "No param should be on 'meta' device.", ) _, _, inputs = self.get_dummy_inputs() output_lora = pipe(**inputs)[0] self.assertTrue(output_lora.shape == self.output_shape) @require_peft_version_greater("0.13.1") @require_transformers_version_greater("4.45.2") def test_low_cpu_mem_usage_with_loading(self): """Tests if we can load LoRA state dict with low_cpu_mem_usage.""" for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] with tempfile.TemporaryDirectory() as tmpdirname: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights( save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) pipe.unload_lora_weights() pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), low_cpu_mem_usage=False) for module_name, module in modules_to_save.items(): self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}") images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3), "Loading from saved checkpoints should give same results.", ) # Now, check for `low_cpu_mem_usage.` pipe.unload_lora_weights() pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), low_cpu_mem_usage=True) for module_name, module in modules_to_save.items(): self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}") images_lora_from_pretrained_low_cpu = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose( images_lora_from_pretrained_low_cpu, images_lora_from_pretrained, atol=1e-3, rtol=1e-3 ), "Loading from saved checkpoints with `low_cpu_mem_usage` should give same results.", ) def test_simple_inference_with_text_lora_and_scale(self): """ Tests a simple inference with lora attached on the text encoder + scale argument and makes sure it works as expected """ attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class) for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output" ) attention_kwargs = {attention_kwargs_name: {"scale": 0.5}} output_lora_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] self.assertTrue( not np.allclose(output_lora, output_lora_scale, atol=1e-3, rtol=1e-3), "Lora + scale should change the output", ) attention_kwargs = {attention_kwargs_name: {"scale": 0.0}} output_lora_0_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] self.assertTrue( np.allclose(output_no_lora, output_lora_0_scale, atol=1e-3, rtol=1e-3), "Lora + 0 scale should lead to same result as no LoRA", ) def test_simple_inference_with_text_lora_fused(self): """ Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model and makes sure it works as expected """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) pipe.fuse_lora() # Fusing should still keep the LoRA layers self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) ouput_fused = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse( np.allclose(ouput_fused, output_no_lora, atol=1e-3, rtol=1e-3), "Fused lora should change the output" ) def test_simple_inference_with_text_lora_unloaded(self): """ Tests a simple inference with lora attached to text encoder, then unloads the lora weights and makes sure it works as expected """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) pipe.unload_lora_weights() # unloading should remove the LoRA layers self.assertFalse( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly unloaded in text encoder" ) if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: self.assertFalse( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly unloaded in text encoder 2", ) ouput_unloaded = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(ouput_unloaded, output_no_lora, atol=1e-3, rtol=1e-3), "Fused lora should change the output", ) def test_simple_inference_with_text_lora_save_load(self): """ Tests a simple usecase where users could use saving utilities for LoRA. """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] with tempfile.TemporaryDirectory() as tmpdirname: modules_to_save = self._get_modules_to_save(pipe) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights( save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) pipe.unload_lora_weights() pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin")) for module_name, module in modules_to_save.items(): self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}") images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3), "Loading from saved checkpoints should give same results.", ) def test_simple_inference_with_partial_text_lora(self): """ Tests a simple inference with lora attached on the text encoder with different ranks and some adapters removed and makes sure it works as expected """ for scheduler_cls in self.scheduler_classes: components, _, _ = self.get_dummy_components(scheduler_cls) # Verify `StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder` handles different ranks per module (PR#8324). text_lora_config = LoraConfig( r=4, rank_pattern={self.text_encoder_target_modules[i]: i + 1 for i in range(3)}, lora_alpha=4, target_modules=self.text_encoder_target_modules, init_lora_weights=False, use_dora=False, ) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) state_dict = {} if "text_encoder" in self.pipeline_class._lora_loadable_modules: # Gather the state dict for the PEFT model, excluding `layers.4`, to ensure `load_lora_into_text_encoder` # supports missing layers (PR#8324). state_dict = { f"text_encoder.{module_name}": param for module_name, param in get_peft_model_state_dict(pipe.text_encoder).items() if "text_model.encoder.layers.4" not in module_name } if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: state_dict.update( { f"text_encoder_2.{module_name}": param for module_name, param in get_peft_model_state_dict(pipe.text_encoder_2).items() if "text_model.encoder.layers.4" not in module_name } ) output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output" ) # Unload lora and load it back using the pipe.load_lora_weights machinery pipe.unload_lora_weights() pipe.load_lora_weights(state_dict) output_partial_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( not np.allclose(output_partial_lora, output_lora, atol=1e-3, rtol=1e-3), "Removing adapters should change the output", ) def test_simple_inference_save_pretrained_with_text_lora(self): """ Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname) pipe_from_pretrained.to(torch_device) if "text_encoder" in self.pipeline_class._lora_loadable_modules: self.assertTrue( check_if_lora_correctly_set(pipe_from_pretrained.text_encoder), "Lora not correctly set in text encoder", ) if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: self.assertTrue( check_if_lora_correctly_set(pipe_from_pretrained.text_encoder_2), "Lora not correctly set in text encoder 2", ) images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3), "Loading from saved checkpoints should give same results.", ) def test_simple_inference_with_text_denoiser_lora_save_load(self): """ Tests a simple usecase where users could use saving utilities for LoRA for Unet + text encoder """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] with tempfile.TemporaryDirectory() as tmpdirname: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights( save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) pipe.unload_lora_weights() pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin")) for module_name, module in modules_to_save.items(): self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}") images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3), "Loading from saved checkpoints should give same results.", ) def test_simple_inference_with_text_denoiser_lora_and_scale(self): """ Tests a simple inference with lora attached on the text encoder + Unet + scale argument and makes sure it works as expected """ attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class) for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output" ) attention_kwargs = {attention_kwargs_name: {"scale": 0.5}} output_lora_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] self.assertTrue( not np.allclose(output_lora, output_lora_scale, atol=1e-3, rtol=1e-3), "Lora + scale should change the output", ) attention_kwargs = {attention_kwargs_name: {"scale": 0.0}} output_lora_0_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] self.assertTrue( np.allclose(output_no_lora, output_lora_0_scale, atol=1e-3, rtol=1e-3), "Lora + 0 scale should lead to same result as no LoRA", ) if "text_encoder" in self.pipeline_class._lora_loadable_modules: self.assertTrue( pipe.text_encoder.text_model.encoder.layers[0].self_attn.q_proj.scaling["default"] == 1.0, "The scaling parameter has not been correctly restored!", ) def test_simple_inference_with_text_lora_denoiser_fused(self): """ Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model and makes sure it works as expected - with unet """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules) # Fusing should still keep the LoRA layers if "text_encoder" in self.pipeline_class._lora_loadable_modules: self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) output_fused = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse( np.allclose(output_fused, output_no_lora, atol=1e-3, rtol=1e-3), "Fused lora should change the output" ) def test_simple_inference_with_text_denoiser_lora_unloaded(self): """ Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights and makes sure it works as expected """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) pipe.unload_lora_weights() # unloading should remove the LoRA layers self.assertFalse( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly unloaded in text encoder" ) self.assertFalse(check_if_lora_correctly_set(denoiser), "Lora not correctly unloaded in denoiser") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: self.assertFalse( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly unloaded in text encoder 2", ) output_unloaded = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(output_unloaded, output_no_lora, atol=1e-3, rtol=1e-3), "Fused lora should change the output", ) def test_simple_inference_with_text_denoiser_lora_unfused( self, expected_atol: float = 1e-3, expected_rtol: float = 1e-3 ): """ Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights and makes sure it works as expected """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules) self.assertTrue(pipe.num_fused_loras == 1, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") output_fused_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.unfuse_lora(components=self.pipeline_class._lora_loadable_modules) self.assertTrue(pipe.num_fused_loras == 0, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") output_unfused_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] # unloading should remove the LoRA layers if "text_encoder" in self.pipeline_class._lora_loadable_modules: self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Unfuse should still keep LoRA layers") self.assertTrue(check_if_lora_correctly_set(denoiser), "Unfuse should still keep LoRA layers") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Unfuse should still keep LoRA layers" ) # Fuse and unfuse should lead to the same results self.assertTrue( np.allclose(output_fused_lora, output_unfused_lora, atol=expected_atol, rtol=expected_rtol), "Fused lora should not change the output", ) def test_simple_inference_with_text_denoiser_multi_adapter(self): """ Tests a simple inference with lora attached to text encoder and unet, attaches multiple adapters and set them """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") denoiser.add_adapter(denoiser_lora_config, "adapter-2") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) pipe.set_adapters("adapter-1") output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse( np.allclose(output_no_lora, output_adapter_1, atol=1e-3, rtol=1e-3), "Adapter outputs should be different.", ) pipe.set_adapters("adapter-2") output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse( np.allclose(output_no_lora, output_adapter_2, atol=1e-3, rtol=1e-3), "Adapter outputs should be different.", ) pipe.set_adapters(["adapter-1", "adapter-2"]) output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse( np.allclose(output_no_lora, output_adapter_mixed, atol=1e-3, rtol=1e-3), "Adapter outputs should be different.", ) # Fuse and unfuse should lead to the same results self.assertFalse( np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), "Adapter 1 and 2 should give different results", ) self.assertFalse( np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), "Adapter 1 and mixed adapters should give different results", ) self.assertFalse( np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), "Adapter 2 and mixed adapters should give different results", ) pipe.disable_lora() output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3), "output with no lora and output with lora disabled should give same results", ) def test_wrong_adapter_name_raises_error(self): adapter_name = "adapter-1" scheduler_cls = self.scheduler_classes[0] components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe, _ = self.add_adapters_to_pipeline( pipe, text_lora_config, denoiser_lora_config, adapter_name=adapter_name ) with self.assertRaises(ValueError) as err_context: pipe.set_adapters("test") self.assertTrue("not in the list of present adapters" in str(err_context.exception)) # test this works. pipe.set_adapters(adapter_name) _ = pipe(**inputs, generator=torch.manual_seed(0))[0] def test_multiple_wrong_adapter_name_raises_error(self): adapter_name = "adapter-1" scheduler_cls = self.scheduler_classes[0] components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe, _ = self.add_adapters_to_pipeline( pipe, text_lora_config, denoiser_lora_config, adapter_name=adapter_name ) scale_with_wrong_components = {"foo": 0.0, "bar": 0.0, "tik": 0.0} logger = logging.get_logger("diffusers.loaders.lora_base") logger.setLevel(30) with CaptureLogger(logger) as cap_logger: pipe.set_adapters(adapter_name, adapter_weights=scale_with_wrong_components) wrong_components = sorted(set(scale_with_wrong_components.keys())) msg = f"The following components in `adapter_weights` are not part of the pipeline: {wrong_components}. " self.assertTrue(msg in str(cap_logger.out)) # test this works. pipe.set_adapters(adapter_name) _ = pipe(**inputs, generator=torch.manual_seed(0))[0] def test_simple_inference_with_text_denoiser_block_scale(self): """ Tests a simple inference with lora attached to text encoder and unet, attaches one adapter and set different weights for different blocks (i.e. block lora) """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) weights_1 = {"text_encoder": 2, "unet": {"down": 5}} pipe.set_adapters("adapter-1", weights_1) output_weights_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] weights_2 = {"unet": {"up": 5}} pipe.set_adapters("adapter-1", weights_2) output_weights_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse( np.allclose(output_weights_1, output_weights_2, atol=1e-3, rtol=1e-3), "LoRA weights 1 and 2 should give different results", ) self.assertFalse( np.allclose(output_no_lora, output_weights_1, atol=1e-3, rtol=1e-3), "No adapter and LoRA weights 1 should give different results", ) self.assertFalse( np.allclose(output_no_lora, output_weights_2, atol=1e-3, rtol=1e-3), "No adapter and LoRA weights 2 should give different results", ) pipe.disable_lora() output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3), "output with no lora and output with lora disabled should give same results", ) def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): """ Tests a simple inference with lora attached to text encoder and unet, attaches multiple adapters and set different weights for different blocks (i.e. block lora) """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") denoiser.add_adapter(denoiser_lora_config, "adapter-2") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) scales_1 = {"text_encoder": 2, "unet": {"down": 5}} scales_2 = {"unet": {"down": 5, "mid": 5}} pipe.set_adapters("adapter-1", scales_1) output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.set_adapters("adapter-2", scales_2) output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.set_adapters(["adapter-1", "adapter-2"], [scales_1, scales_2]) output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0] # Fuse and unfuse should lead to the same results self.assertFalse( np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), "Adapter 1 and 2 should give different results", ) self.assertFalse( np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), "Adapter 1 and mixed adapters should give different results", ) self.assertFalse( np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), "Adapter 2 and mixed adapters should give different results", ) pipe.disable_lora() output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3), "output with no lora and output with lora disabled should give same results", ) # a mismatching number of adapter_names and adapter_weights should raise an error with self.assertRaises(ValueError): pipe.set_adapters(["adapter-1", "adapter-2"], [scales_1]) def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): """Tests that any valid combination of lora block scales can be used in pipe.set_adapter""" def updown_options(blocks_with_tf, layers_per_block, value): """ Generate every possible combination for how a lora weight dict for the up/down part can be. E.g. 2, {"block_1": 2}, {"block_1": [2,2,2]}, {"block_1": 2, "block_2": [2,2,2]}, ... """ num_val = value list_val = [value] * layers_per_block node_opts = [None, num_val, list_val] node_opts_foreach_block = [node_opts] * len(blocks_with_tf) updown_opts = [num_val] for nodes in product(*node_opts_foreach_block): if all(n is None for n in nodes): continue opt = {} for b, n in zip(blocks_with_tf, nodes): if n is not None: opt["block_" + str(b)] = n updown_opts.append(opt) return updown_opts def all_possible_dict_opts(unet, value): """ Generate every possible combination for how a lora weight dict can be. E.g. 2, {"unet: {"down": 2}}, {"unet: {"down": [2,2,2]}}, {"unet: {"mid": 2, "up": [2,2,2]}}, ... """ down_blocks_with_tf = [i for i, d in enumerate(unet.down_blocks) if hasattr(d, "attentions")] up_blocks_with_tf = [i for i, u in enumerate(unet.up_blocks) if hasattr(u, "attentions")] layers_per_block = unet.config.layers_per_block text_encoder_opts = [None, value] text_encoder_2_opts = [None, value] mid_opts = [None, value] down_opts = [None] + updown_options(down_blocks_with_tf, layers_per_block, value) up_opts = [None] + updown_options(up_blocks_with_tf, layers_per_block + 1, value) opts = [] for t1, t2, d, m, u in product(text_encoder_opts, text_encoder_2_opts, down_opts, mid_opts, up_opts): if all(o is None for o in (t1, t2, d, m, u)): continue opt = {} if t1 is not None: opt["text_encoder"] = t1 if t2 is not None: opt["text_encoder_2"] = t2 if all(o is None for o in (d, m, u)): # no unet scaling continue opt["unet"] = {} if d is not None: opt["unet"]["down"] = d if m is not None: opt["unet"]["mid"] = m if u is not None: opt["unet"]["up"] = u opts.append(opt) return opts components, text_lora_config, denoiser_lora_config = self.get_dummy_components(self.scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules if "text_encoder_2" in lora_loadable_components: pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") for scale_dict in all_possible_dict_opts(pipe.unet, value=1234): # test if lora block scales can be set with this scale_dict if not self.has_two_text_encoders and "text_encoder_2" in scale_dict: del scale_dict["text_encoder_2"] pipe.set_adapters("adapter-1", scale_dict) # test will fail if this line throws an error def test_simple_inference_with_text_denoiser_multi_adapter_delete_adapter(self): """ Tests a simple inference with lora attached to text encoder and unet, attaches multiple adapters and set/delete them """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") denoiser.add_adapter(denoiser_lora_config, "adapter-2") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules if "text_encoder_2" in lora_loadable_components: pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) pipe.set_adapters("adapter-1") output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.set_adapters("adapter-2") output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.set_adapters(["adapter-1", "adapter-2"]) output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse( np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), "Adapter 1 and 2 should give different results", ) self.assertFalse( np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), "Adapter 1 and mixed adapters should give different results", ) self.assertFalse( np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), "Adapter 2 and mixed adapters should give different results", ) pipe.delete_adapters("adapter-1") output_deleted_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(output_deleted_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), "Adapter 1 and 2 should give different results", ) pipe.delete_adapters("adapter-2") output_deleted_adapters = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(output_no_lora, output_deleted_adapters, atol=1e-3, rtol=1e-3), "output with no lora and output with lora disabled should give same results", ) if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") denoiser.add_adapter(denoiser_lora_config, "adapter-2") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") pipe.set_adapters(["adapter-1", "adapter-2"]) pipe.delete_adapters(["adapter-1", "adapter-2"]) output_deleted_adapters = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(output_no_lora, output_deleted_adapters, atol=1e-3, rtol=1e-3), "output with no lora and output with lora disabled should give same results", ) def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self): """ Tests a simple inference with lora attached to text encoder and unet, attaches multiple adapters and set them """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") denoiser.add_adapter(denoiser_lora_config, "adapter-2") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules if "text_encoder_2" in lora_loadable_components: pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) pipe.set_adapters("adapter-1") output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.set_adapters("adapter-2") output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.set_adapters(["adapter-1", "adapter-2"]) output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0] # Fuse and unfuse should lead to the same results self.assertFalse( np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), "Adapter 1 and 2 should give different results", ) self.assertFalse( np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), "Adapter 1 and mixed adapters should give different results", ) self.assertFalse( np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), "Adapter 2 and mixed adapters should give different results", ) pipe.set_adapters(["adapter-1", "adapter-2"], [0.5, 0.6]) output_adapter_mixed_weighted = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse( np.allclose(output_adapter_mixed_weighted, output_adapter_mixed, atol=1e-3, rtol=1e-3), "Weighted adapter and mixed adapter should give different results", ) pipe.disable_lora() output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3), "output with no lora and output with lora disabled should give same results", ) @skip_mps @pytest.mark.xfail( condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"), reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.", strict=False, ) def test_lora_fuse_nan(self): for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") # corrupt one LoRA weight with `inf` values with torch.no_grad(): if self.unet_kwargs: pipe.unet.mid_block.attentions[0].transformer_blocks[0].attn1.to_q.lora_A[ "adapter-1" ].weight += float("inf") else: named_modules = [name for name, _ in pipe.transformer.named_modules()] possible_tower_names = [ "transformer_blocks", "blocks", "joint_transformer_blocks", "single_transformer_blocks", ] filtered_tower_names = [ tower_name for tower_name in possible_tower_names if hasattr(pipe.transformer, tower_name) ] if len(filtered_tower_names) == 0: reason = ( f"`pipe.transformer` didn't have any of the following attributes: {possible_tower_names}." ) raise ValueError(reason) for tower_name in filtered_tower_names: transformer_tower = getattr(pipe.transformer, tower_name) has_attn1 = any("attn1" in name for name in named_modules) if has_attn1: transformer_tower[0].attn1.to_q.lora_A["adapter-1"].weight += float("inf") else: transformer_tower[0].attn.to_q.lora_A["adapter-1"].weight += float("inf") # with `safe_fusing=True` we should see an Error with self.assertRaises(ValueError): pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True) # without we should not see an error, but every image will be black pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) out = pipe(**inputs)[0] self.assertTrue(np.isnan(out).all()) def test_get_adapters(self): """ Tests a simple usecase where we attach multiple adapters and check if the results are the expected results """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") adapter_names = pipe.get_active_adapters() self.assertListEqual(adapter_names, ["adapter-1"]) pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") denoiser.add_adapter(denoiser_lora_config, "adapter-2") adapter_names = pipe.get_active_adapters() self.assertListEqual(adapter_names, ["adapter-2"]) pipe.set_adapters(["adapter-1", "adapter-2"]) self.assertListEqual(pipe.get_active_adapters(), ["adapter-1", "adapter-2"]) def test_get_list_adapters(self): """ Tests a simple usecase where we attach multiple adapters and check if the results are the expected results """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # 1. dicts_to_be_checked = {} if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") dicts_to_be_checked = {"text_encoder": ["adapter-1"]} if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") dicts_to_be_checked.update({"unet": ["adapter-1"]}) else: pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") dicts_to_be_checked.update({"transformer": ["adapter-1"]}) self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked) # 2. dicts_to_be_checked = {} if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]} if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2"]}) else: pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2"]}) self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked) # 3. pipe.set_adapters(["adapter-1", "adapter-2"]) dicts_to_be_checked = {} if "text_encoder" in self.pipeline_class._lora_loadable_modules: dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]} if self.unet_kwargs is not None: dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2"]}) else: dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2"]}) self.assertDictEqual( pipe.get_list_adapters(), dicts_to_be_checked, ) # 4. dicts_to_be_checked = {} if "text_encoder" in self.pipeline_class._lora_loadable_modules: dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]} if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-3") dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2", "adapter-3"]}) else: pipe.transformer.add_adapter(denoiser_lora_config, "adapter-3") dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2", "adapter-3"]}) self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked) @require_peft_version_greater(peft_version="0.6.2") def test_simple_inference_with_text_lora_denoiser_fused_multi( self, expected_atol: float = 1e-3, expected_rtol: float = 1e-3 ): """ Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model and makes sure it works as expected - with unet and multi-adapter case """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") denoiser.add_adapter(denoiser_lora_config, "adapter-2") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules if "text_encoder_2" in lora_loadable_components: pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2") # set them to multi-adapter inference mode pipe.set_adapters(["adapter-1", "adapter-2"]) outputs_all_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.set_adapters(["adapter-1"]) outputs_lora_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, adapter_names=["adapter-1"]) self.assertTrue(pipe.num_fused_loras == 1, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") # Fusing should still keep the LoRA layers so output should remain the same outputs_lora_1_fused = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(outputs_lora_1, outputs_lora_1_fused, atol=expected_atol, rtol=expected_rtol), "Fused lora should not change the output", ) pipe.unfuse_lora(components=self.pipeline_class._lora_loadable_modules) self.assertTrue(pipe.num_fused_loras == 0, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") if "text_encoder" in self.pipeline_class._lora_loadable_modules: self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Unfuse should still keep LoRA layers") self.assertTrue(check_if_lora_correctly_set(denoiser), "Unfuse should still keep LoRA layers") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Unfuse should still keep LoRA layers" ) pipe.fuse_lora( components=self.pipeline_class._lora_loadable_modules, adapter_names=["adapter-2", "adapter-1"] ) self.assertTrue(pipe.num_fused_loras == 2, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") # Fusing should still keep the LoRA layers output_all_lora_fused = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(output_all_lora_fused, outputs_all_lora, atol=expected_atol, rtol=expected_rtol), "Fused lora should not change the output", ) pipe.unfuse_lora(components=self.pipeline_class._lora_loadable_modules) self.assertTrue(pipe.num_fused_loras == 0, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") def test_lora_scale_kwargs_match_fusion(self, expected_atol: float = 1e-3, expected_rtol: float = 1e-3): attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class) for lora_scale in [1.0, 0.8]: for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules if "text_encoder_2" in lora_loadable_components: pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2", ) pipe.set_adapters(["adapter-1"]) attention_kwargs = {attention_kwargs_name: {"scale": lora_scale}} outputs_lora_1 = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] pipe.fuse_lora( components=self.pipeline_class._lora_loadable_modules, adapter_names=["adapter-1"], lora_scale=lora_scale, ) self.assertTrue(pipe.num_fused_loras == 1, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") outputs_lora_1_fused = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(outputs_lora_1, outputs_lora_1_fused, atol=expected_atol, rtol=expected_rtol), "Fused lora should not change the output", ) self.assertFalse( np.allclose(output_no_lora, outputs_lora_1, atol=expected_atol, rtol=expected_rtol), "LoRA should change the output", ) @require_peft_version_greater(peft_version="0.9.0") def test_simple_inference_with_dora(self): for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components( scheduler_cls, use_dora=True ) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_dora_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_dora_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) output_dora_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse( np.allclose(output_dora_lora, output_no_dora_lora, atol=1e-3, rtol=1e-3), "DoRA lora should change the output", ) def test_missing_keys_warning(self): scheduler_cls = self.scheduler_classes[0] # Skip text encoder check for now as that is handled with `transformers`. components, _, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") with tempfile.TemporaryDirectory() as tmpdirname: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights( save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts ) pipe.unload_lora_weights() self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) state_dict = torch.load(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), weights_only=True) # To make things dynamic since we cannot settle with a single key for all the models where we # offer PEFT support. missing_key = [k for k in state_dict if "lora_A" in k][0] del state_dict[missing_key] logger = logging.get_logger("diffusers.utils.peft_utils") logger.setLevel(30) with CaptureLogger(logger) as cap_logger: pipe.load_lora_weights(state_dict) # Since the missing key won't contain the adapter name ("default_0"). # Also strip out the component prefix (such as "unet." from `missing_key`). component = list({k.split(".")[0] for k in state_dict})[0] self.assertTrue(missing_key.replace(f"{component}.", "") in cap_logger.out.replace("default_0.", "")) def test_unexpected_keys_warning(self): scheduler_cls = self.scheduler_classes[0] # Skip text encoder check for now as that is handled with `transformers`. components, _, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") with tempfile.TemporaryDirectory() as tmpdirname: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights( save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts ) pipe.unload_lora_weights() self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) state_dict = torch.load(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), weights_only=True) unexpected_key = [k for k in state_dict if "lora_A" in k][0] + ".diffusers_cat" state_dict[unexpected_key] = torch.tensor(1.0, device=torch_device) logger = logging.get_logger("diffusers.utils.peft_utils") logger.setLevel(30) with CaptureLogger(logger) as cap_logger: pipe.load_lora_weights(state_dict) self.assertTrue(".diffusers_cat" in cap_logger.out) @unittest.skip("This is failing for now - need to investigate") def test_simple_inference_with_text_denoiser_lora_unfused_torch_compile(self): """ Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights and makes sure it works as expected """ for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.text_encoder = torch.compile(pipe.text_encoder, mode="reduce-overhead", fullgraph=True) if self.has_two_text_encoders or self.has_three_text_encoders: pipe.text_encoder_2 = torch.compile(pipe.text_encoder_2, mode="reduce-overhead", fullgraph=True) # Just makes sure it works.. _ = pipe(**inputs, generator=torch.manual_seed(0))[0] def test_modify_padding_mode(self): def set_pad_mode(network, mode="circular"): for _, module in network.named_modules(): if isinstance(module, torch.nn.Conv2d): module.padding_mode = mode for scheduler_cls in self.scheduler_classes: components, _, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _pad_mode = "circular" set_pad_mode(pipe.vae, _pad_mode) set_pad_mode(pipe.unet, _pad_mode) _, _, inputs = self.get_dummy_inputs() _ = pipe(**inputs)[0] def test_logs_info_when_no_lora_keys_found(self): scheduler_cls = self.scheduler_classes[0] # Skip text encoder check for now as that is handled with `transformers`. components, _, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) original_out = pipe(**inputs, generator=torch.manual_seed(0))[0] no_op_state_dict = {"lora_foo": torch.tensor(2.0), "lora_bar": torch.tensor(3.0)} logger = logging.get_logger("diffusers.loaders.peft") logger.setLevel(logging.WARNING) with CaptureLogger(logger) as cap_logger: pipe.load_lora_weights(no_op_state_dict) out_after_lora_attempt = pipe(**inputs, generator=torch.manual_seed(0))[0] denoiser = getattr(pipe, "unet") if self.unet_kwargs is not None else getattr(pipe, "transformer") self.assertTrue(cap_logger.out.startswith(f"No LoRA keys associated to {denoiser.__class__.__name__}")) self.assertTrue(np.allclose(original_out, out_after_lora_attempt, atol=1e-5, rtol=1e-5)) # test only for text encoder for lora_module in self.pipeline_class._lora_loadable_modules: if "text_encoder" in lora_module: text_encoder = getattr(pipe, lora_module) if lora_module == "text_encoder": prefix = "text_encoder" elif lora_module == "text_encoder_2": prefix = "text_encoder_2" logger = logging.get_logger("diffusers.loaders.lora_base") logger.setLevel(logging.WARNING) with CaptureLogger(logger) as cap_logger: self.pipeline_class.load_lora_into_text_encoder( no_op_state_dict, network_alphas=None, text_encoder=text_encoder, prefix=prefix ) self.assertTrue( cap_logger.out.startswith(f"No LoRA keys associated to {text_encoder.__class__.__name__}") ) def test_set_adapters_match_attention_kwargs(self): """Test to check if outputs after `set_adapters()` and attention kwargs match.""" attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class) for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) lora_scale = 0.5 attention_kwargs = {attention_kwargs_name: {"scale": lora_scale}} output_lora_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] self.assertFalse( np.allclose(output_no_lora, output_lora_scale, atol=1e-3, rtol=1e-3), "Lora + scale should change the output", ) pipe.set_adapters("default", lora_scale) output_lora_scale_wo_kwargs = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( not np.allclose(output_no_lora, output_lora_scale_wo_kwargs, atol=1e-3, rtol=1e-3), "Lora + scale should change the output", ) self.assertTrue( np.allclose(output_lora_scale, output_lora_scale_wo_kwargs, atol=1e-3, rtol=1e-3), "Lora + scale should match the output of `set_adapters()`.", ) with tempfile.TemporaryDirectory() as tmpdirname: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights( save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) for module_name, module in modules_to_save.items(): self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}") output_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] self.assertTrue( not np.allclose(output_no_lora, output_lora_from_pretrained, atol=1e-3, rtol=1e-3), "Lora + scale should change the output", ) self.assertTrue( np.allclose(output_lora_scale, output_lora_from_pretrained, atol=1e-3, rtol=1e-3), "Loading from saved checkpoints should give same results as attention_kwargs.", ) self.assertTrue( np.allclose(output_lora_scale_wo_kwargs, output_lora_from_pretrained, atol=1e-3, rtol=1e-3), "Loading from saved checkpoints should give same results as set_adapters().", ) @require_peft_version_greater("0.13.2") def test_lora_B_bias(self): # Currently, this test is only relevant for Flux Control LoRA as we are not # aware of any other LoRA checkpoint that has its `lora_B` biases trained. components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # keep track of the bias values of the base layers to perform checks later. bias_values = {} denoiser = pipe.unet if self.unet_kwargs is not None else pipe.transformer for name, module in denoiser.named_modules(): if any(k in name for k in self.denoiser_target_modules): if module.bias is not None: bias_values[name] = module.bias.data.clone() _, _, inputs = self.get_dummy_inputs(with_generator=False) original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] denoiser_lora_config.lora_bias = False if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") else: pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") lora_bias_false_output = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.delete_adapters("adapter-1") denoiser_lora_config.lora_bias = True if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") else: pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") lora_bias_true_output = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse(np.allclose(original_output, lora_bias_false_output, atol=1e-3, rtol=1e-3)) self.assertFalse(np.allclose(original_output, lora_bias_true_output, atol=1e-3, rtol=1e-3)) self.assertFalse(np.allclose(lora_bias_false_output, lora_bias_true_output, atol=1e-3, rtol=1e-3)) def test_correct_lora_configs_with_different_ranks(self): components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") else: pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") lora_output_same_rank = pipe(**inputs, generator=torch.manual_seed(0))[0] if self.unet_kwargs is not None: pipe.unet.delete_adapters("adapter-1") else: pipe.transformer.delete_adapters("adapter-1") denoiser = pipe.unet if self.unet_kwargs is not None else pipe.transformer for name, _ in denoiser.named_modules(): if "to_k" in name and "attn" in name and "lora" not in name: module_name_to_rank_update = name.replace(".base_layer.", ".") break # change the rank_pattern updated_rank = denoiser_lora_config.r * 2 denoiser_lora_config.rank_pattern = {module_name_to_rank_update: updated_rank} if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") updated_rank_pattern = pipe.unet.peft_config["adapter-1"].rank_pattern else: pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") updated_rank_pattern = pipe.transformer.peft_config["adapter-1"].rank_pattern self.assertTrue(updated_rank_pattern == {module_name_to_rank_update: updated_rank}) lora_output_diff_rank = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(not np.allclose(original_output, lora_output_same_rank, atol=1e-3, rtol=1e-3)) self.assertTrue(not np.allclose(lora_output_diff_rank, lora_output_same_rank, atol=1e-3, rtol=1e-3)) if self.unet_kwargs is not None: pipe.unet.delete_adapters("adapter-1") else: pipe.transformer.delete_adapters("adapter-1") # similarly change the alpha_pattern updated_alpha = denoiser_lora_config.lora_alpha * 2 denoiser_lora_config.alpha_pattern = {module_name_to_rank_update: updated_alpha} if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") self.assertTrue( pipe.unet.peft_config["adapter-1"].alpha_pattern == {module_name_to_rank_update: updated_alpha} ) else: pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") self.assertTrue( pipe.transformer.peft_config["adapter-1"].alpha_pattern == {module_name_to_rank_update: updated_alpha} ) lora_output_diff_alpha = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(not np.allclose(original_output, lora_output_diff_alpha, atol=1e-3, rtol=1e-3)) self.assertTrue(not np.allclose(lora_output_diff_alpha, lora_output_same_rank, atol=1e-3, rtol=1e-3)) def test_layerwise_casting_inference_denoiser(self): from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS from diffusers.hooks.layerwise_casting import DEFAULT_SKIP_MODULES_PATTERN def check_linear_dtype(module, storage_dtype, compute_dtype): patterns_to_check = DEFAULT_SKIP_MODULES_PATTERN if getattr(module, "_skip_layerwise_casting_patterns", None) is not None: patterns_to_check += tuple(module._skip_layerwise_casting_patterns) for name, submodule in module.named_modules(): if not isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS): continue dtype_to_check = storage_dtype if "lora" in name or any(re.search(pattern, name) for pattern in patterns_to_check): dtype_to_check = compute_dtype if getattr(submodule, "weight", None) is not None: self.assertEqual(submodule.weight.dtype, dtype_to_check) if getattr(submodule, "bias", None) is not None: self.assertEqual(submodule.bias.dtype, dtype_to_check) def initialize_pipeline(storage_dtype=None, compute_dtype=torch.float32): components, text_lora_config, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device, dtype=compute_dtype) pipe.set_progress_bar_config(disable=None) pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) if storage_dtype is not None: denoiser.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) check_linear_dtype(denoiser, storage_dtype, compute_dtype) return pipe _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe_fp32 = initialize_pipeline(storage_dtype=None) pipe_fp32(**inputs, generator=torch.manual_seed(0))[0] pipe_float8_e4m3_fp32 = initialize_pipeline(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.float32) pipe_float8_e4m3_fp32(**inputs, generator=torch.manual_seed(0))[0] pipe_float8_e4m3_bf16 = initialize_pipeline(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16) pipe_float8_e4m3_bf16(**inputs, generator=torch.manual_seed(0))[0] @require_peft_version_greater("0.14.0") def test_layerwise_casting_peft_input_autocast_denoiser(self): r""" A test that checks if layerwise casting works correctly with PEFT layers and forward pass does not fail. This is different from `test_layerwise_casting_inference_denoiser` as that disables the application of layerwise cast hooks on the PEFT layers (relevant logic in `models.modeling_utils.ModelMixin.enable_layerwise_casting`). In this test, we enable the layerwise casting on the PEFT layers as well. If run with PEFT version <= 0.14.0, this test will fail with the following error: ``` RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Float8_e4m3fn != float ``` See the docstring of [`hooks.layerwise_casting.PeftInputAutocastDisableHook`] for more details. """ from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS from diffusers.hooks.layerwise_casting import ( _PEFT_AUTOCAST_DISABLE_HOOK, DEFAULT_SKIP_MODULES_PATTERN, apply_layerwise_casting, ) storage_dtype = torch.float8_e4m3fn compute_dtype = torch.float32 def check_module(denoiser): # This will also check if the peft layers are in torch.float8_e4m3fn dtype (unlike test_layerwise_casting_inference_denoiser) for name, module in denoiser.named_modules(): if not isinstance(module, _GO_LC_SUPPORTED_PYTORCH_LAYERS): continue dtype_to_check = storage_dtype if any(re.search(pattern, name) for pattern in patterns_to_check): dtype_to_check = compute_dtype if getattr(module, "weight", None) is not None: self.assertEqual(module.weight.dtype, dtype_to_check) if getattr(module, "bias", None) is not None: self.assertEqual(module.bias.dtype, dtype_to_check) if isinstance(module, BaseTunerLayer): self.assertTrue(getattr(module, "_diffusers_hook", None) is not None) self.assertTrue(module._diffusers_hook.get_hook(_PEFT_AUTOCAST_DISABLE_HOOK) is not None) # 1. Test forward with add_adapter components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device, dtype=compute_dtype) pipe.set_progress_bar_config(disable=None) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") patterns_to_check = DEFAULT_SKIP_MODULES_PATTERN if getattr(denoiser, "_skip_layerwise_casting_patterns", None) is not None: patterns_to_check += tuple(denoiser._skip_layerwise_casting_patterns) apply_layerwise_casting( denoiser, storage_dtype=storage_dtype, compute_dtype=compute_dtype, skip_modules_pattern=patterns_to_check ) check_module(denoiser) _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe(**inputs, generator=torch.manual_seed(0))[0] # 2. Test forward with load_lora_weights with tempfile.TemporaryDirectory() as tmpdirname: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights( save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) components, _, _ = self.get_dummy_components(self.scheduler_classes[0]) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device, dtype=compute_dtype) pipe.set_progress_bar_config(disable=None) pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet apply_layerwise_casting( denoiser, storage_dtype=storage_dtype, compute_dtype=compute_dtype, skip_modules_pattern=patterns_to_check, ) check_module(denoiser) _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe(**inputs, generator=torch.manual_seed(0))[0] @parameterized.expand([4, 8, 16]) def test_lora_adapter_metadata_is_loaded_correctly(self, lora_alpha): scheduler_cls = self.scheduler_classes[0] components, text_lora_config, denoiser_lora_config = self.get_dummy_components( scheduler_cls, lora_alpha=lora_alpha ) pipe = self.pipeline_class(**components) pipe, _ = self.add_adapters_to_pipeline( pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config ) with tempfile.TemporaryDirectory() as tmpdir: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) lora_metadatas = self._get_lora_adapter_metadata(modules_to_save) self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts, **lora_metadatas) pipe.unload_lora_weights() out = pipe.lora_state_dict(tmpdir, return_lora_metadata=True) if len(out) == 3: _, _, parsed_metadata = out elif len(out) == 2: _, parsed_metadata = out denoiser_key = ( f"{self.pipeline_class.transformer_name}" if self.transformer_kwargs is not None else f"{self.pipeline_class.unet_name}" ) self.assertTrue(any(k.startswith(f"{denoiser_key}.") for k in parsed_metadata)) check_module_lora_metadata( parsed_metadata=parsed_metadata, lora_metadatas=lora_metadatas, module_key=denoiser_key ) if "text_encoder" in self.pipeline_class._lora_loadable_modules: text_encoder_key = self.pipeline_class.text_encoder_name self.assertTrue(any(k.startswith(f"{text_encoder_key}.") for k in parsed_metadata)) check_module_lora_metadata( parsed_metadata=parsed_metadata, lora_metadatas=lora_metadatas, module_key=text_encoder_key ) if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: text_encoder_2_key = "text_encoder_2" self.assertTrue(any(k.startswith(f"{text_encoder_2_key}.") for k in parsed_metadata)) check_module_lora_metadata( parsed_metadata=parsed_metadata, lora_metadatas=lora_metadatas, module_key=text_encoder_2_key ) @parameterized.expand([4, 8, 16]) def test_lora_adapter_metadata_save_load_inference(self, lora_alpha): scheduler_cls = self.scheduler_classes[0] components, text_lora_config, denoiser_lora_config = self.get_dummy_components( scheduler_cls, lora_alpha=lora_alpha ) pipe = self.pipeline_class(**components).to(torch_device) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) pipe, _ = self.add_adapters_to_pipeline( pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config ) output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] with tempfile.TemporaryDirectory() as tmpdir: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) lora_metadatas = self._get_lora_adapter_metadata(modules_to_save) self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts, **lora_metadatas) pipe.unload_lora_weights() pipe.load_lora_weights(tmpdir) output_lora_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(output_lora, output_lora_pretrained, atol=1e-3, rtol=1e-3), "Lora outputs should match." ) def test_lora_unload_add_adapter(self): """Tests if `unload_lora_weights()` -> `add_adapter()` works.""" scheduler_cls = self.scheduler_classes[0] components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components).to(torch_device) _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe, _ = self.add_adapters_to_pipeline( pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config ) _ = pipe(**inputs, generator=torch.manual_seed(0))[0] # unload and then add. pipe.unload_lora_weights() pipe, _ = self.add_adapters_to_pipeline( pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config ) _ = pipe(**inputs, generator=torch.manual_seed(0))[0] def test_inference_load_delete_load_adapters(self): "Tests if `load_lora_weights()` -> `delete_adapters()` -> `load_lora_weights()` works." for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config) self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules if "text_encoder_2" in lora_loadable_components: pipe.text_encoder_2.add_adapter(text_lora_config) self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" ) output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] with tempfile.TemporaryDirectory() as tmpdirname: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights(save_directory=tmpdirname, **lora_state_dicts) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) # First, delete adapter and compare. pipe.delete_adapters(pipe.get_active_adapters()[0]) output_no_adapter = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertFalse(np.allclose(output_adapter_1, output_no_adapter, atol=1e-3, rtol=1e-3)) self.assertTrue(np.allclose(output_no_lora, output_no_adapter, atol=1e-3, rtol=1e-3)) # Then load adapter and compare. pipe.load_lora_weights(tmpdirname) output_lora_loaded = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(np.allclose(output_adapter_1, output_lora_loaded, atol=1e-3, rtol=1e-3)) def _test_group_offloading_inference_denoiser(self, offload_type, use_stream): from diffusers.hooks.group_offloading import _get_top_level_group_offload_hook onload_device = torch_device offload_device = torch.device("cpu") components, text_lora_config, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") with tempfile.TemporaryDirectory() as tmpdirname: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights( save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts ) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) components, _, _ = self.get_dummy_components(self.scheduler_classes[0]) pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) check_if_lora_correctly_set(denoiser) _, _, inputs = self.get_dummy_inputs(with_generator=False) # Test group offloading with load_lora_weights denoiser.enable_group_offload( onload_device=onload_device, offload_device=offload_device, offload_type=offload_type, num_blocks_per_group=1, use_stream=use_stream, ) # Place other model-level components on `torch_device`. for _, component in pipe.components.items(): if isinstance(component, torch.nn.Module): component.to(torch_device) group_offload_hook_1 = _get_top_level_group_offload_hook(denoiser) self.assertTrue(group_offload_hook_1 is not None) output_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] # Test group offloading after removing the lora pipe.unload_lora_weights() group_offload_hook_2 = _get_top_level_group_offload_hook(denoiser) self.assertTrue(group_offload_hook_2 is not None) output_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] # noqa: F841 # Add the lora again and check if group offloading works pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) check_if_lora_correctly_set(denoiser) group_offload_hook_3 = _get_top_level_group_offload_hook(denoiser) self.assertTrue(group_offload_hook_3 is not None) output_3 = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(np.allclose(output_1, output_3, atol=1e-3, rtol=1e-3)) @parameterized.expand([("block_level", True), ("leaf_level", False), ("leaf_level", True)]) @require_torch_accelerator def test_group_offloading_inference_denoiser(self, offload_type, use_stream): for cls in inspect.getmro(self.__class__): if "test_group_offloading_inference_denoiser" in cls.__dict__ and cls is not PeftLoraLoaderMixinTests: # Skip this test if it is overwritten by child class. We need to do this because parameterized # materializes the test methods on invocation which cannot be overridden. return self._test_group_offloading_inference_denoiser(offload_type, use_stream) @require_torch_accelerator def test_lora_loading_model_cpu_offload(self): components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config) self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] with tempfile.TemporaryDirectory() as tmpdirname: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights( save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts ) # reinitialize the pipeline to mimic the inference workflow. components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) pipe = self.pipeline_class(**components) pipe.enable_model_cpu_offload(device=torch_device) pipe.load_lora_weights(tmpdirname) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") output_lora_loaded = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(np.allclose(output_lora, output_lora_loaded, atol=1e-3, rtol=1e-3))
diffusers/tests/lora/utils.py/0
{ "file_path": "diffusers/tests/lora/utils.py", "repo_id": "diffusers", "token_count": 59521 }
174
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import ConsistencyDecoderVAE, StableDiffusionPipeline from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, load_image, slow, torch_all_close, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class ConsistencyDecoderVAETests(ModelTesterMixin, unittest.TestCase): model_class = ConsistencyDecoderVAE main_input_name = "sample" base_precision = 1e-2 forward_requires_fresh_args = True def get_consistency_vae_config(self, block_out_channels=None, norm_num_groups=None): block_out_channels = block_out_channels or [2, 4] norm_num_groups = norm_num_groups or 2 return { "encoder_block_out_channels": block_out_channels, "encoder_in_channels": 3, "encoder_out_channels": 4, "encoder_down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), "decoder_add_attention": False, "decoder_block_out_channels": block_out_channels, "decoder_down_block_types": ["ResnetDownsampleBlock2D"] * len(block_out_channels), "decoder_downsample_padding": 1, "decoder_in_channels": 7, "decoder_layers_per_block": 1, "decoder_norm_eps": 1e-05, "decoder_norm_num_groups": norm_num_groups, "encoder_norm_num_groups": norm_num_groups, "decoder_num_train_timesteps": 1024, "decoder_out_channels": 6, "decoder_resnet_time_scale_shift": "scale_shift", "decoder_time_embedding_type": "learned", "decoder_up_block_types": ["ResnetUpsampleBlock2D"] * len(block_out_channels), "scaling_factor": 1, "latent_channels": 4, } def inputs_dict(self, seed=None): if seed is None: generator = torch.Generator("cpu").manual_seed(0) else: generator = torch.Generator("cpu").manual_seed(seed) image = randn_tensor((4, 3, 32, 32), generator=generator, device=torch.device(torch_device)) return {"sample": image, "generator": generator} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) @property def init_dict(self): return self.get_consistency_vae_config() def prepare_init_args_and_inputs_for_common(self): return self.init_dict, self.inputs_dict() def test_enable_disable_tiling(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) _ = inputs_dict.pop("generator") torch.manual_seed(0) output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] torch.manual_seed(0) model.enable_tiling() output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertLess( (output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(), 0.5, "VAE tiling should not affect the inference results", ) torch.manual_seed(0) model.disable_tiling() output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertEqual( output_without_tiling.detach().cpu().numpy().all(), output_without_tiling_2.detach().cpu().numpy().all(), "Without tiling outputs should match with the outputs when tiling is manually disabled.", ) def test_enable_disable_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) _ = inputs_dict.pop("generator") torch.manual_seed(0) output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] torch.manual_seed(0) model.enable_slicing() output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertLess( (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), 0.5, "VAE slicing should not affect the inference results", ) torch.manual_seed(0) model.disable_slicing() output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertEqual( output_without_slicing.detach().cpu().numpy().all(), output_without_slicing_2.detach().cpu().numpy().all(), "Without slicing outputs should match with the outputs when slicing is manually disabled.", ) @slow class ConsistencyDecoderVAEIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) @torch.no_grad() def test_encode_decode(self): vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update vae.to(torch_device) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ).resize((256, 256)) image = torch.from_numpy(np.array(image).transpose(2, 0, 1).astype(np.float32) / 127.5 - 1)[None, :, :, :].to( torch_device ) latent = vae.encode(image).latent_dist.mean sample = vae.decode(latent, generator=torch.Generator("cpu").manual_seed(0)).sample actual_output = sample[0, :2, :2, :2].flatten().cpu() expected_output = torch.tensor([-0.0141, -0.0014, 0.0115, 0.0086, 0.1051, 0.1053, 0.1031, 0.1024]) assert torch_all_close(actual_output, expected_output, atol=5e-3) def test_sd(self): vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, safety_checker=None ) pipe.to(torch_device) out = pipe( "horse", num_inference_steps=2, output_type="pt", generator=torch.Generator("cpu").manual_seed(0), ).images[0] actual_output = out[:2, :2, :2].flatten().cpu() expected_output = torch.tensor([0.7686, 0.8228, 0.6489, 0.7455, 0.8661, 0.8797, 0.8241, 0.8759]) assert torch_all_close(actual_output, expected_output, atol=5e-3) def test_encode_decode_f16(self): vae = ConsistencyDecoderVAE.from_pretrained( "openai/consistency-decoder", torch_dtype=torch.float16 ) # TODO - update vae.to(torch_device) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ).resize((256, 256)) image = ( torch.from_numpy(np.array(image).transpose(2, 0, 1).astype(np.float32) / 127.5 - 1)[None, :, :, :] .half() .to(torch_device) ) latent = vae.encode(image).latent_dist.mean sample = vae.decode(latent, generator=torch.Generator("cpu").manual_seed(0)).sample actual_output = sample[0, :2, :2, :2].flatten().cpu() expected_output = torch.tensor( [-0.0111, -0.0125, -0.0017, -0.0007, 0.1257, 0.1465, 0.1450, 0.1471], dtype=torch.float16, ) assert torch_all_close(actual_output, expected_output, atol=5e-3) def test_sd_f16(self): vae = ConsistencyDecoderVAE.from_pretrained( "openai/consistency-decoder", torch_dtype=torch.float16 ) # TODO - update pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, vae=vae, safety_checker=None, ) pipe.to(torch_device) out = pipe( "horse", num_inference_steps=2, output_type="pt", generator=torch.Generator("cpu").manual_seed(0), ).images[0] actual_output = out[:2, :2, :2].flatten().cpu() expected_output = torch.tensor( [0.0000, 0.0249, 0.0000, 0.0000, 0.1709, 0.2773, 0.0471, 0.1035], dtype=torch.float16, ) assert torch_all_close(actual_output, expected_output, atol=5e-3) def test_vae_tiling(self): vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16) pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, safety_checker=None, torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out_1 = pipe( "horse", num_inference_steps=2, output_type="pt", generator=torch.Generator("cpu").manual_seed(0), ).images[0] # make sure tiled vae decode yields the same result pipe.enable_vae_tiling() out_2 = pipe( "horse", num_inference_steps=2, output_type="pt", generator=torch.Generator("cpu").manual_seed(0), ).images[0] assert torch_all_close(out_1, out_2, atol=5e-3) # test that tiled decode works with various shapes shapes = [(1, 4, 73, 97), (1, 4, 97, 73), (1, 4, 49, 65), (1, 4, 65, 49)] with torch.no_grad(): for shape in shapes: image = torch.zeros(shape, device=torch_device, dtype=pipe.vae.dtype) pipe.vae.decode(image)
diffusers/tests/models/autoencoders/test_models_consistency_decoder_vae.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_consistency_decoder_vae.py", "repo_id": "diffusers", "token_count": 5115 }
175
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import BriaTransformer2DModel from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 from diffusers.models.embeddings import ImageProjection from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin enable_full_determinism() def create_bria_ip_adapter_state_dict(model): # "ip_adapter" (cross-attention weights) ip_cross_attn_state_dict = {} key_id = 0 for name in model.attn_processors.keys(): if name.startswith("single_transformer_blocks"): continue joint_attention_dim = model.config["joint_attention_dim"] hidden_size = model.config["num_attention_heads"] * model.config["attention_head_dim"] sd = FluxIPAdapterJointAttnProcessor2_0( hidden_size=hidden_size, cross_attention_dim=joint_attention_dim, scale=1.0 ).state_dict() ip_cross_attn_state_dict.update( { f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], f"{key_id}.to_k_ip.bias": sd["to_k_ip.0.bias"], f"{key_id}.to_v_ip.bias": sd["to_v_ip.0.bias"], } ) key_id += 1 # "image_proj" (ImageProjection layer weights) image_projection = ImageProjection( cross_attention_dim=model.config["joint_attention_dim"], image_embed_dim=model.config["pooled_projection_dim"], num_image_text_embeds=4, ) ip_image_projection_state_dict = {} sd = image_projection.state_dict() ip_image_projection_state_dict.update( { "proj.weight": sd["image_embeds.weight"], "proj.bias": sd["image_embeds.bias"], "norm.weight": sd["norm.weight"], "norm.bias": sd["norm.bias"], } ) del sd ip_state_dict = {} ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) return ip_state_dict class BriaTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = BriaTransformer2DModel main_input_name = "hidden_states" # We override the items here because the transformer under consideration is small. model_split_percents = [0.8, 0.7, 0.7] # Skip setting testing with default: AttnProcessor uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 1 num_latent_channels = 4 num_image_channels = 3 height = width = 4 sequence_length = 48 embedding_dim = 32 hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) text_ids = torch.randn((sequence_length, num_image_channels)).to(torch_device) image_ids = torch.randn((height * width, num_image_channels)).to(torch_device) timestep = torch.tensor([1.0]).to(torch_device).expand(batch_size) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "img_ids": image_ids, "txt_ids": text_ids, "timestep": timestep, } @property def input_shape(self): return (16, 4) @property def output_shape(self): return (16, 4) def prepare_init_args_and_inputs_for_common(self): init_dict = { "patch_size": 1, "in_channels": 4, "num_layers": 1, "num_single_layers": 1, "attention_head_dim": 8, "num_attention_heads": 2, "joint_attention_dim": 32, "pooled_projection_dim": None, "axes_dims_rope": [0, 4, 4], } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_deprecated_inputs_img_txt_ids_3d(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output_1 = model(**inputs_dict).to_tuple()[0] # update inputs_dict with txt_ids and img_ids as 3d tensors (deprecated) text_ids_3d = inputs_dict["txt_ids"].unsqueeze(0) image_ids_3d = inputs_dict["img_ids"].unsqueeze(0) assert text_ids_3d.ndim == 3, "text_ids_3d should be a 3d tensor" assert image_ids_3d.ndim == 3, "img_ids_3d should be a 3d tensor" inputs_dict["txt_ids"] = text_ids_3d inputs_dict["img_ids"] = image_ids_3d with torch.no_grad(): output_2 = model(**inputs_dict).to_tuple()[0] self.assertEqual(output_1.shape, output_2.shape) self.assertTrue( torch.allclose(output_1, output_2, atol=1e-5), msg="output with deprecated inputs (img_ids and txt_ids as 3d torch tensors) are not equal as them as 2d inputs", ) def test_gradient_checkpointing_is_applied(self): expected_set = {"BriaTransformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class BriaTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = BriaTransformer2DModel def prepare_init_args_and_inputs_for_common(self): return BriaTransformerTests().prepare_init_args_and_inputs_for_common() class BriaTransformerLoRAHotSwapTests(LoraHotSwappingForModelTesterMixin, unittest.TestCase): model_class = BriaTransformer2DModel def prepare_init_args_and_inputs_for_common(self): return BriaTransformerTests().prepare_init_args_and_inputs_for_common()
diffusers/tests/models/transformers/test_models_transformer_bria.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_bria.py", "repo_id": "diffusers", "token_count": 2836 }
176
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import Lumina2Transformer2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class Lumina2Transformer2DModelTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = Lumina2Transformer2DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 2 # N num_channels = 4 # C height = width = 16 # H, W embedding_dim = 32 # D sequence_length = 16 # L hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) timestep = torch.rand(size=(batch_size,)).to(torch_device) attention_mask = torch.ones(size=(batch_size, sequence_length), dtype=torch.bool).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, "encoder_attention_mask": attention_mask, } @property def input_shape(self): return (4, 16, 16) @property def output_shape(self): return (4, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 16, "patch_size": 2, "in_channels": 4, "hidden_size": 24, "num_layers": 2, "num_refiner_layers": 1, "num_attention_heads": 3, "num_kv_heads": 1, "multiple_of": 2, "ffn_dim_multiplier": None, "norm_eps": 1e-5, "scaling_factor": 1.0, "axes_dim_rope": (4, 2, 2), "axes_lens": (128, 128, 128), "cap_feat_dim": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"Lumina2Transformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
diffusers/tests/models/transformers/test_models_transformer_lumina2.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_lumina2.py", "repo_id": "diffusers", "token_count": 1221 }
177
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import numpy as np import torch from diffusers import MotionAdapter, UNet2DConditionModel, UNetMotionModel from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin logger = logging.get_logger(__name__) enable_full_determinism() class UNetMotionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNetMotionModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 num_frames = 4 sizes = (16, 16) noise = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size * num_frames, 4, 16)).to(torch_device) return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} @property def input_shape(self): return (4, 4, 16, 16) @property def output_shape(self): return (4, 4, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (16, 32), "norm_num_groups": 16, "down_block_types": ("CrossAttnDownBlockMotion", "DownBlockMotion"), "up_block_types": ("UpBlockMotion", "CrossAttnUpBlockMotion"), "cross_attention_dim": 16, "num_attention_heads": 2, "out_channels": 4, "in_channels": 4, "layers_per_block": 1, "sample_size": 16, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_unet2d(self): torch.manual_seed(0) unet2d = UNet2DConditionModel() torch.manual_seed(1) model = self.model_class.from_unet2d(unet2d) model_state_dict = model.state_dict() for param_name, param_value in unet2d.named_parameters(): self.assertTrue(torch.equal(model_state_dict[param_name], param_value)) def test_freeze_unet2d(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.freeze_unet2d_params() for param_name, param_value in model.named_parameters(): if "motion_modules" not in param_name: self.assertFalse(param_value.requires_grad) else: self.assertTrue(param_value.requires_grad) def test_loading_motion_adapter(self): model = self.model_class() adapter = MotionAdapter() model.load_motion_modules(adapter) for idx, down_block in enumerate(model.down_blocks): adapter_state_dict = adapter.down_blocks[idx].motion_modules.state_dict() for param_name, param_value in down_block.motion_modules.named_parameters(): self.assertTrue(torch.equal(adapter_state_dict[param_name], param_value)) for idx, up_block in enumerate(model.up_blocks): adapter_state_dict = adapter.up_blocks[idx].motion_modules.state_dict() for param_name, param_value in up_block.motion_modules.named_parameters(): self.assertTrue(torch.equal(adapter_state_dict[param_name], param_value)) mid_block_adapter_state_dict = adapter.mid_block.motion_modules.state_dict() for param_name, param_value in model.mid_block.motion_modules.named_parameters(): self.assertTrue(torch.equal(mid_block_adapter_state_dict[param_name], param_value)) def test_saving_motion_modules(self): torch.manual_seed(0) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: model.save_motion_modules(tmpdirname) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "diffusion_pytorch_model.safetensors"))) adapter_loaded = MotionAdapter.from_pretrained(tmpdirname) torch.manual_seed(0) model_loaded = self.model_class(**init_dict) model_loaded.load_motion_modules(adapter_loaded) model_loaded.to(torch_device) with torch.no_grad(): output = model(**inputs_dict)[0] output_loaded = model_loaded(**inputs_dict)[0] max_diff = (output - output_loaded).abs().max().item() self.assertLessEqual(max_diff, 1e-4, "Models give different forward passes") @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_enable_works(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.enable_xformers_memory_efficient_attention() assert ( model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ == "XFormersAttnProcessor" ), "xformers is not enabled" def test_gradient_checkpointing_is_applied(self): expected_set = { "CrossAttnUpBlockMotion", "CrossAttnDownBlockMotion", "UNetMidBlockCrossAttnMotion", "UpBlockMotion", "Transformer2DModel", "DownBlockMotion", } super().test_gradient_checkpointing_is_applied(expected_set=expected_set) def test_feed_forward_chunking(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (32, 64) init_dict["norm_num_groups"] = 32 model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict)[0] model.enable_forward_chunking() with torch.no_grad(): output_2 = model(**inputs_dict)[0] self.assertEqual(output.shape, output_2.shape, "Shape doesn't match") assert np.abs(output.cpu() - output_2.cpu()).max() < 1e-2 def test_pickle(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): sample = model(**inputs_dict).sample sample_copy = copy.copy(sample) assert (sample - sample_copy).abs().max() < 1e-4 def test_from_save_pretrained(self, expected_max_diff=5e-5): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) torch.manual_seed(0) new_model = self.model_class.from_pretrained(tmpdirname) new_model.to(torch_device) with torch.no_grad(): image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") def test_from_save_pretrained_variant(self, expected_max_diff=5e-5): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False) torch.manual_seed(0) new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16") # non-variant cannot be loaded with self.assertRaises(OSError) as error_context: self.model_class.from_pretrained(tmpdirname) # make sure that error message states what keys are missing assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception) new_model.to(torch_device) with torch.no_grad(): image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["block_out_channels"] = (16, 32) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_asymmetric_motion_model(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["layers_per_block"] = (2, 3) init_dict["transformer_layers_per_block"] = ((1, 2), (3, 4, 5)) init_dict["reverse_transformer_layers_per_block"] = ((7, 6, 7, 4), (4, 2, 2)) init_dict["temporal_transformer_layers_per_block"] = ((2, 5), (2, 3, 5)) init_dict["reverse_temporal_transformer_layers_per_block"] = ((5, 4, 3, 4), (3, 2, 2)) init_dict["num_attention_heads"] = (2, 4) init_dict["motion_num_attention_heads"] = (4, 4) init_dict["reverse_motion_num_attention_heads"] = (2, 2) init_dict["use_motion_mid_block"] = True init_dict["mid_block_layers"] = 2 init_dict["transformer_layers_per_mid_block"] = (1, 5) init_dict["temporal_transformer_layers_per_mid_block"] = (2, 4) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
diffusers/tests/models/unets/test_models_unet_motion.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_motion.py", "repo_id": "diffusers", "token_count": 5224 }
178
import pickle as pkl import unittest from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from diffusers.utils.outputs import BaseOutput from diffusers.utils.testing_utils import require_torch @dataclass class CustomOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] class ConfigTester(unittest.TestCase): def test_outputs_single_attribute(self): outputs = CustomOutput(images=np.random.rand(1, 3, 4, 4)) # check every way of getting the attribute assert isinstance(outputs.images, np.ndarray) assert outputs.images.shape == (1, 3, 4, 4) assert isinstance(outputs["images"], np.ndarray) assert outputs["images"].shape == (1, 3, 4, 4) assert isinstance(outputs[0], np.ndarray) assert outputs[0].shape == (1, 3, 4, 4) # test with a non-tensor attribute outputs = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) # check every way of getting the attribute assert isinstance(outputs.images, list) assert isinstance(outputs.images[0], PIL.Image.Image) assert isinstance(outputs["images"], list) assert isinstance(outputs["images"][0], PIL.Image.Image) assert isinstance(outputs[0], list) assert isinstance(outputs[0][0], PIL.Image.Image) def test_outputs_dict_init(self): # test output reinitialization with a `dict` for compatibility with `accelerate` outputs = CustomOutput({"images": np.random.rand(1, 3, 4, 4)}) # check every way of getting the attribute assert isinstance(outputs.images, np.ndarray) assert outputs.images.shape == (1, 3, 4, 4) assert isinstance(outputs["images"], np.ndarray) assert outputs["images"].shape == (1, 3, 4, 4) assert isinstance(outputs[0], np.ndarray) assert outputs[0].shape == (1, 3, 4, 4) # test with a non-tensor attribute outputs = CustomOutput({"images": [PIL.Image.new("RGB", (4, 4))]}) # check every way of getting the attribute assert isinstance(outputs.images, list) assert isinstance(outputs.images[0], PIL.Image.Image) assert isinstance(outputs["images"], list) assert isinstance(outputs["images"][0], PIL.Image.Image) assert isinstance(outputs[0], list) assert isinstance(outputs[0][0], PIL.Image.Image) def test_outputs_serialization(self): outputs_orig = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) serialized = pkl.dumps(outputs_orig) outputs_copy = pkl.loads(serialized) # Check original and copy are equal assert dir(outputs_orig) == dir(outputs_copy) assert dict(outputs_orig) == dict(outputs_copy) assert vars(outputs_orig) == vars(outputs_copy) @require_torch def test_torch_pytree(self): # ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves) # this is important for DistributedDataParallel gradient synchronization with static_graph=True import torch import torch.utils._pytree data = np.random.rand(1, 3, 4, 4) x = CustomOutput(images=data) self.assertFalse(torch.utils._pytree._is_leaf(x)) expected_flat_outs = [data] expected_tree_spec = torch.utils._pytree.TreeSpec(CustomOutput, ["images"], [torch.utils._pytree.LeafSpec()]) actual_flat_outs, actual_tree_spec = torch.utils._pytree.tree_flatten(x) self.assertEqual(expected_flat_outs, actual_flat_outs) self.assertEqual(expected_tree_spec, actual_tree_spec) unflattened_x = torch.utils._pytree.tree_unflatten(actual_flat_outs, actual_tree_spec) self.assertEqual(x, unflattened_x)
diffusers/tests/others/test_outputs.py/0
{ "file_path": "diffusers/tests/others/test_outputs.py", "repo_id": "diffusers", "token_count": 1506 }
179
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from diffusers import ( IFPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, backend_reset_peak_memory_stats, load_numpy, require_accelerator, require_hf_hub_version_greater, require_torch_accelerator, require_transformers_version_greater, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): pipeline_class = IFPipeline params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} def get_dummy_components(self): return self._get_dummy_components() def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "np", } return inputs @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def test_save_load_local(self): self._test_save_load_local() def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-2, ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @require_hf_hub_version_greater("0.26.5") @require_transformers_version_greater("4.47.1") def test_save_load_dduf(self): super().test_save_load_dduf(atol=1e-2, rtol=1e-2) @unittest.skip("Functionality is tested elsewhere.") def test_save_load_optional_components(self): pass @slow @require_torch_accelerator class IFPipelineSlowTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_if_text_to_image(self): pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) pipe.unet.set_attn_processor(AttnAddedKVProcessor()) pipe.enable_model_cpu_offload(device=torch_device) backend_reset_max_memory_allocated(torch_device) backend_empty_cache(torch_device) backend_reset_peak_memory_stats(torch_device) generator = torch.Generator(device="cpu").manual_seed(0) output = pipe( prompt="anime turtle", num_inference_steps=2, generator=generator, output_type="np", ) image = output.images[0] mem_bytes = backend_max_memory_allocated(torch_device) assert mem_bytes < 12 * 10**9 expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(image, expected_image) pipe.remove_all_hooks()
diffusers/tests/pipelines/deepfloyd_if/test_if.py/0
{ "file_path": "diffusers/tests/pipelines/deepfloyd_if/test_if.py", "repo_id": "diffusers", "token_count": 2051 }
180
import random import unittest import numpy as np import torch from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxImg2ImgPipeline, FluxTransformer2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin enable_full_determinism() class FluxImg2ImgPipelineFastTests(unittest.TestCase, PipelineTesterMixin, FluxIPAdapterTesterMixin): pipeline_class = FluxImg2ImgPipeline params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) batch_params = frozenset(["prompt"]) test_xformers_attention = False def get_dummy_components(self): torch.manual_seed(0) transformer = FluxTransformer2DModel( patch_size=1, in_channels=4, num_layers=1, num_single_layers=1, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[4, 4, 8], ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModel(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=1, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "transformer": transformer, "vae": vae, "image_encoder": None, "feature_extractor": None, } def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 8, "width": 8, "max_sequence_length": 48, "strength": 0.8, "output_type": "np", } return inputs def test_flux_different_prompts(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_same_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "a different prompt" output_different_prompts = pipe(**inputs).images[0] max_diff = np.abs(output_same_prompt - output_different_prompts).max() # Outputs should be different here # For some reasons, they don't show large differences assert max_diff > 1e-6 def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) height_width_pairs = [(32, 32), (72, 57)] for height, width in height_width_pairs: expected_height = height - height % (pipe.vae_scale_factor * 2) expected_width = width - width % (pipe.vae_scale_factor * 2) inputs.update({"height": height, "width": width}) image = pipe(**inputs).images[0] output_height, output_width, _ = image.shape assert (output_height, output_width) == (expected_height, expected_width)
diffusers/tests/pipelines/flux/test_pipeline_flux_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/flux/test_pipeline_flux_img2img.py", "repo_id": "diffusers", "token_count": 2384 }
181
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLLTXVideo, FlowMatchEulerDiscreteScheduler, LTXPipeline, LTXVideoTransformer3DModel from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import FirstBlockCacheTesterMixin, PipelineTesterMixin, to_np enable_full_determinism() class LTXPipelineFastTests(PipelineTesterMixin, FirstBlockCacheTesterMixin, unittest.TestCase): pipeline_class = LTXPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self, num_layers: int = 1): torch.manual_seed(0) transformer = LTXVideoTransformer3DModel( in_channels=8, out_channels=8, patch_size=1, patch_size_t=1, num_attention_heads=4, attention_head_dim=8, cross_attention_dim=32, num_layers=num_layers, caption_channels=32, ) torch.manual_seed(0) vae = AutoencoderKLLTXVideo( in_channels=3, out_channels=3, latent_channels=8, block_out_channels=(8, 8, 8, 8), decoder_block_out_channels=(8, 8, 8, 8), layers_per_block=(1, 1, 1, 1, 1), decoder_layers_per_block=(1, 1, 1, 1, 1), spatio_temporal_scaling=(True, True, False, False), decoder_spatio_temporal_scaling=(True, True, False, False), decoder_inject_noise=(False, False, False, False, False), upsample_residual=(False, False, False, False), upsample_factor=(1, 1, 1, 1), timestep_conditioning=False, patch_size=1, patch_size_t=1, encoder_causal=True, decoder_causal=False, ) vae.use_framewise_encoding = False vae.use_framewise_decoding = False torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler() text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "negative_prompt": "", "generator": generator, "num_inference_steps": 2, "guidance_scale": 3.0, "height": 32, "width": 32, # 8 * k + 1 is the recommendation "num_frames": 9, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (9, 3, 32, 32)) expected_video = torch.randn(9, 3, 32, 32) max_diff = np.abs(generated_video - expected_video).max() self.assertLessEqual(max_diff, 1e10) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_vae_tiling(self, expected_diff_max: float = 0.2): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to("cpu") pipe.set_progress_bar_config(disable=None) # Without tiling inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_without_tiling = pipe(**inputs)[0] # With tiling pipe.vae.enable_tiling( tile_sample_min_height=96, tile_sample_min_width=96, tile_sample_stride_height=64, tile_sample_stride_width=64, ) inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_with_tiling = pipe(**inputs)[0] self.assertLess( (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), expected_diff_max, "VAE tiling should not affect the inference results", )
diffusers/tests/pipelines/ltx/test_ltx.py/0
{ "file_path": "diffusers/tests/pipelines/ltx/test_ltx.py", "repo_id": "diffusers", "token_count": 4626 }
182
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, StableCascadeDecoderPipeline from diffusers.models import StableCascadeUNet from diffusers.pipelines.wuerstchen import PaellaVQModel from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, load_numpy, load_pt, numpy_cosine_similarity_distance, require_torch_accelerator, skip_mps, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class StableCascadeDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableCascadeDecoderPipeline params = ["prompt"] batch_params = ["image_embeddings", "prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeddings", "text_encoder_hidden_states"] @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, "out_channels": 4, "conditioning_dim": 128, "block_out_channels": [16, 32, 64, 128], "num_attention_heads": [-1, -1, 1, 2], "down_num_layers_per_block": [1, 1, 1, 1], "up_num_layers_per_block": [1, 1, 1, 1], "down_blocks_repeat_mappers": [1, 1, 1, 1], "up_blocks_repeat_mappers": [3, 3, 2, 2], "block_types_per_layer": [ ["SDCascadeResBlock", "SDCascadeTimestepBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ], "switch_level": None, "clip_text_pooled_in_channels": 32, "dropout": [0.1, 0.1, 0.1, 0.1], } model = StableCascadeUNet(**model_kwargs) return model.eval() def get_dummy_components(self): decoder = self.dummy_decoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer vqgan = self.dummy_vqgan scheduler = DDPMWuerstchenScheduler() components = { "decoder": decoder, "vqgan": vqgan, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "latent_dim_scale": 4.0, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeddings": torch.ones((1, 4, 4, 4), device=device), "prompt": "horse", "generator": generator, "guidance_scale": 2.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_wuerstchen_decoder(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False) image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-2) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) @unittest.skip(reason="fp16 not supported") def test_float16_inference(self): super().test_float16_inference() def test_stable_cascade_decoder_single_prompt_multiple_image_embeddings(self): device = "cpu" components = self.get_dummy_components() pipe = StableCascadeDecoderPipeline(**components) pipe.set_progress_bar_config(disable=None) prior_num_images_per_prompt = 2 decoder_num_images_per_prompt = 2 prompt = ["a cat"] batch_size = len(prompt) generator = torch.Generator(device) image_embeddings = randn_tensor( (batch_size * prior_num_images_per_prompt, 4, 4, 4), generator=generator.manual_seed(0) ) decoder_output = pipe( image_embeddings=image_embeddings, prompt=prompt, num_inference_steps=1, output_type="np", guidance_scale=0.0, generator=generator.manual_seed(0), num_images_per_prompt=decoder_num_images_per_prompt, ) assert decoder_output.images.shape[0] == ( batch_size * prior_num_images_per_prompt * decoder_num_images_per_prompt ) def test_stable_cascade_decoder_single_prompt_multiple_image_embeddings_with_guidance(self): device = "cpu" components = self.get_dummy_components() pipe = StableCascadeDecoderPipeline(**components) pipe.set_progress_bar_config(disable=None) prior_num_images_per_prompt = 2 decoder_num_images_per_prompt = 2 prompt = ["a cat"] batch_size = len(prompt) generator = torch.Generator(device) image_embeddings = randn_tensor( (batch_size * prior_num_images_per_prompt, 4, 4, 4), generator=generator.manual_seed(0) ) decoder_output = pipe( image_embeddings=image_embeddings, prompt=prompt, num_inference_steps=1, output_type="np", guidance_scale=2.0, generator=generator.manual_seed(0), num_images_per_prompt=decoder_num_images_per_prompt, ) assert decoder_output.images.shape[0] == ( batch_size * prior_num_images_per_prompt * decoder_num_images_per_prompt ) def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "batch_size": 1, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) @slow @require_torch_accelerator class StableCascadeDecoderPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_stable_cascade_decoder(self): pipe = StableCascadeDecoderPipeline.from_pretrained( "stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16 ) pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." generator = torch.Generator(device="cpu").manual_seed(0) image_embedding = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_cascade/image_embedding.pt", map_location=torch_device, ) image = pipe( prompt=prompt, image_embeddings=image_embedding, output_type="np", num_inference_steps=2, generator=generator, ).images[0] assert image.shape == (1024, 1024, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_cascade/stable_cascade_decoder_image.npy" ) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 2e-4
diffusers/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py/0
{ "file_path": "diffusers/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py", "repo_id": "diffusers", "token_count": 5095 }
183
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, backend_reset_peak_memory_stats, enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusion2InpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) @slow @require_torch_accelerator class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_stable_diffusion_inpaint_pipeline(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def test_stable_diffusion_inpaint_pipeline_fp16(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, torch_dtype=torch.float16, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pndm = PNDMScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, safety_checker=None, scheduler=pndm, torch_dtype=torch.float16, ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload(device=torch_device) prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) _ = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, num_inference_steps=2, output_type="np", ) mem_bytes = backend_max_memory_allocated(torch_device) # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py", "repo_id": "diffusers", "token_count": 5086 }
184
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import random import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LCMScheduler, StableDiffusionXLInpaintPipeline, UNet2DConditionModel, UniPCMultistepScheduler, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionXLInpaintPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( { "add_text_embeds", "add_time_ids", "mask", "masked_image_latents", } ) supports_dduf = False def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, time_cond_proj_dim=time_cond_proj_dim, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, image_size=224, projection_dim=32, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": image_encoder, "feature_extractor": feature_extractor, "requires_aesthetics_score": True, } return components def get_dummy_inputs(self, device, seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # create mask image[8:, 8:, :] = 255 mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "strength": 1.0, "output_type": "np", } return inputs def get_dummy_inputs_2images(self, device, seed=0, img_res=64): # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image1 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) image2 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed + 22)).to(device) # Convert images to [-1, 1] init_image1 = 2.0 * image1 - 1.0 init_image2 = 2.0 * image2 - 1.0 # empty mask mask_image = torch.zeros((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator1 = torch.manual_seed(seed) generator2 = torch.manual_seed(seed) else: generator1 = torch.Generator(device=device).manual_seed(seed) generator2 = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": ["A painting of a squirrel eating a burger"] * 2, "image": [init_image1, init_image2], "mask_image": [mask_image] * 2, "generator": [generator1, generator2], "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_ip_adapter(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.8274, 0.5538, 0.6141, 0.5843, 0.6865, 0.7082, 0.5861, 0.6123, 0.5344]) return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def test_stable_diffusion_xl_inpaint_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8279, 0.5673, 0.6088, 0.6156, 0.6923, 0.7347, 0.6547, 0.6108, 0.5198]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_inpaint_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6611, 0.5569, 0.5531, 0.5471, 0.5918, 0.6393, 0.5074, 0.5468, 0.5185]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_inpaint_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6611, 0.5569, 0.5531, 0.5471, 0.5918, 0.6393, 0.5074, 0.5468, 0.5185]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @unittest.skip("Skip for now.") def test_save_load_optional_components(self): pass @require_torch_accelerator def test_stable_diffusion_xl_inpaint_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 @require_torch_accelerator def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.enable_model_cpu_offload(device=torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.enable_sequential_cpu_offload(device=torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_refiner(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(skip_first_text_encoder=True) sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.7540, 0.5231, 0.5833, 0.6217, 0.6339, 0.7067, 0.6507, 0.5672, 0.5030]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_ts = num_train_timesteps - int(round(num_train_timesteps * split)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}} pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" for steps in [7, 20]: assert_run_mixture(steps, 0.33, EulerDiscreteScheduler) assert_run_mixture(steps, 0.33, HeunDiscreteScheduler) @slow def test_stable_diffusion_two_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_ts = num_train_timesteps - int(round(num_train_timesteps * split)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}} pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" for steps in [5, 8, 20]: for split in [0.33, 0.49, 0.71]: for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split, scheduler_cls) @slow def test_stable_diffusion_three_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() pipe_3 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_3.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split_1, split_2, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1)) split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list( filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps) ) expected_steps_3 = expected_steps_2[-1:] + list(filter(lambda ts: ts < split_2_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 + expected_steps_3 else: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps)) expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, ( f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" ) inputs_2 = { **inputs, **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"}, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}} pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] assert expected_steps == done_steps, ( f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" ) for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split_1, split_2, scheduler_cls) def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_stable_diffusion_xl_inpaint_mask_latents(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(device) sd_pipe.set_progress_bar_config(disable=None) # normal mask + normal image ## `image`: pil, `mask_image``: pil, `masked_image_latents``: None inputs = self.get_dummy_inputs(device) inputs["strength"] = 0.9 out_0 = sd_pipe(**inputs).images # image latents + mask latents inputs = self.get_dummy_inputs(device) image = sd_pipe.image_processor.preprocess(inputs["image"]).to(sd_pipe.device) mask = sd_pipe.mask_processor.preprocess(inputs["mask_image"]).to(sd_pipe.device) masked_image = image * (mask < 0.5) generator = torch.Generator(device=device).manual_seed(0) image_latents = sd_pipe._encode_vae_image(image, generator=generator) torch.randn((1, 4, 32, 32), generator=generator) mask_latents = sd_pipe._encode_vae_image(masked_image, generator=generator) inputs["image"] = image_latents inputs["masked_image_latents"] = mask_latents inputs["mask_image"] = mask inputs["strength"] = 0.9 generator = torch.Generator(device=device).manual_seed(0) torch.randn((1, 4, 32, 32), generator=generator) inputs["generator"] = generator out_1 = sd_pipe(**inputs).images assert np.abs(out_0 - out_1).max() < 1e-2 def test_stable_diffusion_xl_inpaint_2_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) # test to confirm if we pass two same image, we will get same output inputs = self.get_dummy_inputs(device) gen1 = torch.Generator(device=device).manual_seed(0) gen2 = torch.Generator(device=device).manual_seed(0) for name in ["prompt", "image", "mask_image"]: inputs[name] = [inputs[name]] * 2 inputs["generator"] = [gen1, gen2] images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() < 1e-4 # test to confirm that if we pass two different images, we will get different output inputs = self.get_dummy_inputs_2images(device) images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() > 1e-2 def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = "hey" num_inference_steps = 5 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4)
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py", "repo_id": "diffusers", "token_count": 15871 }
185
import gc import tempfile import unittest from diffusers import FluxPipeline, FluxTransformer2DModel, QuantoConfig from diffusers.models.attention_processor import Attention from diffusers.utils import is_optimum_quanto_available, is_torch_available from diffusers.utils.testing_utils import ( backend_empty_cache, backend_reset_peak_memory_stats, enable_full_determinism, nightly, numpy_cosine_similarity_distance, require_accelerate, require_big_accelerator, require_torch_cuda_compatibility, torch_device, ) if is_optimum_quanto_available(): from optimum.quanto import QLinear if is_torch_available(): import torch from ..utils import LoRALayer, get_memory_consumption_stat enable_full_determinism() @nightly @require_big_accelerator @require_accelerate class QuantoBaseTesterMixin: model_id = None pipeline_model_id = None model_cls = None torch_dtype = torch.bfloat16 # the expected reduction in peak memory used compared to an unquantized model expressed as a percentage expected_memory_reduction = 0.0 keep_in_fp32_module = "" modules_to_not_convert = "" _test_torch_compile = False def setUp(self): backend_reset_peak_memory_stats(torch_device) backend_empty_cache(torch_device) gc.collect() def tearDown(self): backend_reset_peak_memory_stats(torch_device) backend_empty_cache(torch_device) gc.collect() def get_dummy_init_kwargs(self): return {"weights_dtype": "float8"} def get_dummy_model_init_kwargs(self): return { "pretrained_model_name_or_path": self.model_id, "torch_dtype": self.torch_dtype, "quantization_config": QuantoConfig(**self.get_dummy_init_kwargs()), } def test_quanto_layers(self): model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): assert isinstance(module, QLinear) def test_quanto_memory_usage(self): inputs = self.get_dummy_inputs() inputs = { k: v.to(device=torch_device, dtype=torch.bfloat16) for k, v in inputs.items() if not isinstance(v, bool) } unquantized_model = self.model_cls.from_pretrained(self.model_id, torch_dtype=self.torch_dtype) unquantized_model.to(torch_device) unquantized_model_memory = get_memory_consumption_stat(unquantized_model, inputs) quantized_model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) quantized_model.to(torch_device) quantized_model_memory = get_memory_consumption_stat(quantized_model, inputs) assert unquantized_model_memory / quantized_model_memory >= self.expected_memory_reduction def test_keep_modules_in_fp32(self): r""" A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32. Also ensures if inference works. """ _keep_in_fp32_modules = self.model_cls._keep_in_fp32_modules self.model_cls._keep_in_fp32_modules = self.keep_in_fp32_module model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) model.to(torch_device) for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): if name in model._keep_in_fp32_modules: assert module.weight.dtype == torch.float32 self.model_cls._keep_in_fp32_modules = _keep_in_fp32_modules def test_modules_to_not_convert(self): init_kwargs = self.get_dummy_model_init_kwargs() quantization_config_kwargs = self.get_dummy_init_kwargs() quantization_config_kwargs.update({"modules_to_not_convert": self.modules_to_not_convert}) quantization_config = QuantoConfig(**quantization_config_kwargs) init_kwargs.update({"quantization_config": quantization_config}) model = self.model_cls.from_pretrained(**init_kwargs) model.to(torch_device) for name, module in model.named_modules(): if name in self.modules_to_not_convert: assert not isinstance(module, QLinear) def test_dtype_assignment(self): model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) with self.assertRaises(ValueError): # Tries with a `dtype` model.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` and `dtype` device_0 = f"{torch_device}:0" model.to(device=device_0, dtype=torch.float16) with self.assertRaises(ValueError): # Tries with a cast model.float() with self.assertRaises(ValueError): # Tries with a cast model.half() # This should work model.to(torch_device) def test_serialization(self): model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) inputs = self.get_dummy_inputs() model.to(torch_device) with torch.no_grad(): model_output = model(**inputs) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) saved_model = self.model_cls.from_pretrained( tmp_dir, torch_dtype=torch.bfloat16, ) saved_model.to(torch_device) with torch.no_grad(): saved_model_output = saved_model(**inputs) assert torch.allclose(model_output.sample, saved_model_output.sample, rtol=1e-5, atol=1e-5) def test_torch_compile(self): if not self._test_torch_compile: return model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) compiled_model = torch.compile(model, mode="max-autotune", fullgraph=True, dynamic=False) model.to(torch_device) with torch.no_grad(): model_output = model(**self.get_dummy_inputs()).sample compiled_model.to(torch_device) with torch.no_grad(): compiled_model_output = compiled_model(**self.get_dummy_inputs()).sample model_output = model_output.detach().float().cpu().numpy() compiled_model_output = compiled_model_output.detach().float().cpu().numpy() max_diff = numpy_cosine_similarity_distance(model_output.flatten(), compiled_model_output.flatten()) assert max_diff < 1e-3 def test_device_map_error(self): with self.assertRaises(ValueError): _ = self.model_cls.from_pretrained( **self.get_dummy_model_init_kwargs(), device_map={0: "8GB", "cpu": "16GB"} ) class FluxTransformerQuantoMixin(QuantoBaseTesterMixin): model_id = "hf-internal-testing/tiny-flux-transformer" model_cls = FluxTransformer2DModel pipeline_cls = FluxPipeline torch_dtype = torch.bfloat16 keep_in_fp32_module = "proj_out" modules_to_not_convert = ["proj_out"] _test_torch_compile = False def get_dummy_inputs(self): return { "hidden_states": torch.randn((1, 4096, 64), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "encoder_hidden_states": torch.randn( (1, 512, 4096), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "pooled_projections": torch.randn( (1, 768), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), "img_ids": torch.randn((4096, 3), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "txt_ids": torch.randn((512, 3), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "guidance": torch.tensor([3.5]).to(torch_device, self.torch_dtype), } def get_dummy_training_inputs(self, device=None, seed: int = 0): batch_size = 1 num_latent_channels = 4 num_image_channels = 3 height = width = 4 sequence_length = 48 embedding_dim = 32 torch.manual_seed(seed) hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to( device, dtype=torch.bfloat16 ) torch.manual_seed(seed) pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) text_ids = torch.randn((sequence_length, num_image_channels)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) image_ids = torch.randn((height * width, num_image_channels)).to(device, dtype=torch.bfloat16) timestep = torch.tensor([1.0]).to(device, dtype=torch.bfloat16).expand(batch_size) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_prompt_embeds, "txt_ids": text_ids, "img_ids": image_ids, "timestep": timestep, } def test_model_cpu_offload(self): init_kwargs = self.get_dummy_init_kwargs() transformer = self.model_cls.from_pretrained( "hf-internal-testing/tiny-flux-pipe", quantization_config=QuantoConfig(**init_kwargs), subfolder="transformer", torch_dtype=torch.bfloat16, ) pipe = self.pipeline_cls.from_pretrained( "hf-internal-testing/tiny-flux-pipe", transformer=transformer, torch_dtype=torch.bfloat16 ) pipe.enable_model_cpu_offload(device=torch_device) _ = pipe("a cat holding a sign that says hello", num_inference_steps=2) def test_training(self): quantization_config = QuantoConfig(**self.get_dummy_init_kwargs()) quantized_model = self.model_cls.from_pretrained( "hf-internal-testing/tiny-flux-pipe", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, ).to(torch_device) for param in quantized_model.parameters(): # freeze the model as only adapter layers will be trained param.requires_grad = False if param.ndim == 1: param.data = param.data.to(torch.float32) for _, module in quantized_model.named_modules(): if isinstance(module, Attention): module.to_q = LoRALayer(module.to_q, rank=4) module.to_k = LoRALayer(module.to_k, rank=4) module.to_v = LoRALayer(module.to_v, rank=4) with torch.amp.autocast(str(torch_device), dtype=torch.bfloat16): inputs = self.get_dummy_training_inputs(torch_device) output = quantized_model(**inputs)[0] output.norm().backward() for module in quantized_model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) class FluxTransformerFloat8WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): expected_memory_reduction = 0.6 def get_dummy_init_kwargs(self): return {"weights_dtype": "float8"} class FluxTransformerInt8WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): expected_memory_reduction = 0.6 _test_torch_compile = True def get_dummy_init_kwargs(self): return {"weights_dtype": "int8"} @require_torch_cuda_compatibility(8.0) class FluxTransformerInt4WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): expected_memory_reduction = 0.55 def get_dummy_init_kwargs(self): return {"weights_dtype": "int4"} @require_torch_cuda_compatibility(8.0) class FluxTransformerInt2WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): expected_memory_reduction = 0.65 def get_dummy_init_kwargs(self): return {"weights_dtype": "int2"}
diffusers/tests/quantization/quanto/test_quanto.py/0
{ "file_path": "diffusers/tests/quantization/quanto/test_quanto.py", "repo_id": "diffusers", "token_count": 5633 }
186
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from diffusers import StableCascadeUNet from diffusers.utils import logging from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) logger = logging.get_logger(__name__) enable_full_determinism() @slow @require_torch_accelerator class StableCascadeUNetSingleFileTest(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_single_file_components_stage_b(self): model_single_file = StableCascadeUNet.from_single_file( "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_bf16.safetensors", torch_dtype=torch.bfloat16, ) model = StableCascadeUNet.from_pretrained( "stabilityai/stable-cascade", variant="bf16", subfolder="decoder", use_safetensors=True ) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between single file loading and pretrained loading" ) def test_single_file_components_stage_b_lite(self): model_single_file = StableCascadeUNet.from_single_file( "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_lite_bf16.safetensors", torch_dtype=torch.bfloat16, ) model = StableCascadeUNet.from_pretrained( "stabilityai/stable-cascade", variant="bf16", subfolder="decoder_lite" ) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between single file loading and pretrained loading" ) def test_single_file_components_stage_c(self): model_single_file = StableCascadeUNet.from_single_file( "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_bf16.safetensors", torch_dtype=torch.bfloat16, ) model = StableCascadeUNet.from_pretrained( "stabilityai/stable-cascade-prior", variant="bf16", subfolder="prior" ) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between single file loading and pretrained loading" ) def test_single_file_components_stage_c_lite(self): model_single_file = StableCascadeUNet.from_single_file( "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_lite_bf16.safetensors", torch_dtype=torch.bfloat16, ) model = StableCascadeUNet.from_pretrained( "stabilityai/stable-cascade-prior", variant="bf16", subfolder="prior_lite" ) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between single file loading and pretrained loading" )
diffusers/tests/single_file/test_model_sd_cascade_unet_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_model_sd_cascade_unet_single_file.py", "repo_id": "diffusers", "token_count": 1959 }
187
import gc import unittest import torch from diffusers import ( StableDiffusionXLPipeline, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) from .single_file_testing_utils import SDXLSingleFileTesterMixin enable_full_determinism() @slow @require_torch_accelerator class StableDiffusionXLPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin): pipeline_class = StableDiffusionXLPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" repo_id = "stabilityai/stable-diffusion-xl-base-1.0" original_config = ( "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" ) def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "generator": generator, "num_inference_steps": 2, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_single_file_format_inference_is_same_as_pretrained(self): super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
diffusers/tests/single_file/test_stable_diffusion_xl_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_stable_diffusion_xl_single_file.py", "repo_id": "diffusers", "token_count": 738 }
188
# coding=utf-8 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import requests # Configuration GITHUB_REPO = "huggingface/diffusers" GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID") SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL") def main(args): action_url = f"https://github.com/{GITHUB_REPO}/actions/runs/{GITHUB_RUN_ID}" if args.status == "success": hub_path = "https://huggingface.co/datasets/diffusers/benchmarks/blob/main/collated_results.csv" message = ( "✅ New benchmark workflow successfully run.\n" f"🕸️ GitHub Action URL: {action_url}.\n" f"🤗 Check out the benchmarks here: {hub_path}." ) else: message = ( "❌ Something wrong happened in the benchmarking workflow.\n" f"Check out the GitHub Action to know more: {action_url}." ) payload = {"text": message} response = requests.post(SLACK_WEBHOOK_URL, json=payload) if response.status_code == 200: print("Notification sent to Slack successfully.") else: print("Failed to send notification to Slack.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--status", type=str, default="success", choices=["success", "failure"]) args = parser.parse_args() main(args)
diffusers/utils/notify_benchmarking_status.py/0
{ "file_path": "diffusers/utils/notify_benchmarking_status.py", "repo_id": "diffusers", "token_count": 699 }
189
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. default_language_version: python: python3.10 exclude: "tests/artifacts/.*\\.safetensors$" repos: ##### Meta ##### - repo: meta hooks: - id: check-useless-excludes - id: check-hooks-apply ##### General Code Quality & Formatting ##### - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: check-added-large-files args: ['--maxkb=1024'] - id: debug-statements - id: check-merge-conflict - id: check-case-conflict - id: check-yaml - id: check-toml - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.12.4 hooks: - id: ruff-format - id: ruff args: [--fix, --exit-non-zero-on-fix] - repo: https://github.com/adhtruong/mirrors-typos rev: v1.34.0 hooks: - id: typos args: [--force-exclude] - repo: https://github.com/asottile/pyupgrade rev: v3.20.0 hooks: - id: pyupgrade args: [--py310-plus] ##### Markdown Quality ##### - repo: https://github.com/rbubley/mirrors-prettier rev: v3.6.2 hooks: - id: prettier name: Format Markdown with Prettier types_or: [markdown, mdx] args: [--prose-wrap=preserve] ##### Security ##### - repo: https://github.com/gitleaks/gitleaks rev: v8.27.2 hooks: - id: gitleaks - repo: https://github.com/woodruffw/zizmor-pre-commit rev: v1.11.0 hooks: - id: zizmor - repo: https://github.com/PyCQA/bandit rev: 1.8.6 hooks: - id: bandit args: ["-c", "pyproject.toml"] additional_dependencies: ["bandit[toml]"] # TODO(Steven): Uncomment when ready to use ##### Static Analysis & Typing ##### # - repo: https://github.com/pre-commit/mirrors-mypy # rev: v1.16.0 # hooks: # - id: mypy # args: [--python-version=3.10] ##### Docstring Checks ##### # - repo: https://github.com/akaihola/darglint2 # rev: v1.8.2 # hooks: # - id: darglint2 # args: ["--docstring-style", "google", "-v", "2"] # exclude: ^tests/.*$ # - repo: https://github.com/econchick/interrogate # rev: 1.7.0 # hooks: # - id: interrogate # args: ["-vv", "--config=pyproject.toml"]
lerobot/.pre-commit-config.yaml/0
{ "file_path": "lerobot/.pre-commit-config.yaml", "repo_id": "lerobot", "token_count": 1232 }
190
# Backward compatibility ## Hardware API redesign PR [#777](https://github.com/huggingface/lerobot/pull/777) improves the LeRobot calibration but is **not backward-compatible**. Below is a overview of what changed and how you can continue to work with datasets created before this pull request. ### What changed? | | Before PR #777 | After PR #777 | | --------------------------------- | ------------------------------------------------- | ------------------------------------------------------------ | | **Joint range** | Degrees `-180...180°` | **Normalised range** Joints: `–100...100` Gripper: `0...100` | | **Zero position (SO100 / SO101)** | Arm fully extended horizontally | **In middle of the range for each joint** | | **Boundary handling** | Software safeguards to detect ±180 ° wrap-arounds | No wrap-around logic needed due to mid-range zero | --- ### Impact on existing datasets - Recorded trajectories created **before** PR #777 will replay incorrectly if loaded directly: - Joint angles are offset and incorrectly normalized. - Any models directly finetuned or trained on the old data will need their inputs and outputs converted. ### Using datasets made with the previous calibration system We provide a migration example script for replaying an episode recorded with the previous calibration here: `examples/backward_compatibility/replay.py`. Below we take you through the modifications that are done in the example script to make the previous calibration datasets work. ```diff + key = f"{name.removeprefix('main_')}.pos" action[key] = action_array[i].item() + action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90) + action["elbow_flex.pos"] -= 90 ``` Let's break this down. New codebase uses `.pos` suffix for the position observations and we have removed `main_` prefix: <!-- prettier-ignore-start --> ```python key = f"{name.removeprefix('main_')}.pos" ``` <!-- prettier-ignore-end --> For `"shoulder_lift"` (id = 2), the 0 position is changed by -90 degrees and the direction is reversed compared to old calibration/code. <!-- prettier-ignore-start --> ```python action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90) ``` <!-- prettier-ignore-end --> For `"elbow_flex"` (id = 3), the 0 position is changed by -90 degrees compared to old calibration/code. <!-- prettier-ignore-start --> ```python action["elbow_flex.pos"] -= 90 ``` <!-- prettier-ignore-end --> To use degrees normalization we then set the `--robot.use_degrees` option to `true`. ```diff python examples/backward_compatibility/replay.py \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem5A460814411 \ --robot.id=blue \ + --robot.use_degrees=true \ --dataset.repo_id=my_dataset_id \ --dataset.episode=0 ``` ### Using policies trained with the previous calibration system Policies output actions in the same format as the datasets (`torch.Tensors`). Therefore, the same transformations should be applied. To find these transformations, we recommend to first try and and replay an episode of the dataset your policy was trained on using the section above. Then, add these same transformations on your inference script (shown here in the `record.py` script): ```diff action_values = predict_action( observation_frame, policy, get_safe_torch_device(policy.config.device), policy.config.use_amp, task=single_task, robot_type=robot.robot_type, ) action = {key: action_values[i].item() for i, key in enumerate(robot.action_features)} + action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90) + action["elbow_flex.pos"] -= 90 robot.send_action(action) ``` If you have questions or run into migration issues, feel free to ask them on [Discord](https://discord.gg/s3KuuzsPFb)
lerobot/docs/source/backwardcomp.mdx/0
{ "file_path": "lerobot/docs/source/backwardcomp.mdx", "repo_id": "lerobot", "token_count": 1357 }
191
## Paper https://arxiv.org/abs/2506.01844 ## Citation ```bibtex @article{shukor2025smolvla, title={SmolVLA: A Vision-Language-Action Model for Affordable and Efficient Robotics}, author={Shukor, Mustafa and Aubakirova, Dana and Capuano, Francesco and Kooijmans, Pepijn and Palma, Steven and Zouitine, Adil and Aractingi, Michel and Pascal, Caroline and Russi, Martino and Marafioti, Andres and Alibert, Simon and Cord, Matthieu and Wolf, Thomas and Cadene, Remi}, journal={arXiv preprint arXiv:2506.01844}, year={2025} } ```
lerobot/docs/source/policy_smolvla_README.md/0
{ "file_path": "lerobot/docs/source/policy_smolvla_README.md", "repo_id": "lerobot", "token_count": 190 }
192
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Helper to recalibrate your device (robot or teleoperator). Example: ```shell lerobot-calibrate \ --teleop.type=so100_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ --teleop.id=blue ``` """ import logging from dataclasses import asdict, dataclass from pprint import pformat import draccus from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401 from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401 from lerobot.robots import ( # noqa: F401 Robot, RobotConfig, hope_jr, koch_follower, lekiwi, make_robot_from_config, so100_follower, so101_follower, ) from lerobot.teleoperators import ( # noqa: F401 Teleoperator, TeleoperatorConfig, homunculus, koch_leader, make_teleoperator_from_config, so100_leader, so101_leader, ) from lerobot.utils.utils import init_logging @dataclass class CalibrateConfig: teleop: TeleoperatorConfig | None = None robot: RobotConfig | None = None def __post_init__(self): if bool(self.teleop) == bool(self.robot): raise ValueError("Choose either a teleop or a robot.") self.device = self.robot if self.robot else self.teleop @draccus.wrap() def calibrate(cfg: CalibrateConfig): init_logging() logging.info(pformat(asdict(cfg))) if isinstance(cfg.device, RobotConfig): device = make_robot_from_config(cfg.device) elif isinstance(cfg.device, TeleoperatorConfig): device = make_teleoperator_from_config(cfg.device) device.connect(calibrate=False) device.calibrate() device.disconnect() def main(): calibrate() if __name__ == "__main__": main()
lerobot/src/lerobot/calibrate.py/0
{ "file_path": "lerobot/src/lerobot/calibrate.py", "repo_id": "lerobot", "token_count": 849 }
193
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Note: We subclass str so that serialization is straightforward # https://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json from dataclasses import dataclass from enum import Enum from typing import Any, Protocol class FeatureType(str, Enum): STATE = "STATE" VISUAL = "VISUAL" ENV = "ENV" ACTION = "ACTION" REWARD = "REWARD" class NormalizationMode(str, Enum): MIN_MAX = "MIN_MAX" MEAN_STD = "MEAN_STD" IDENTITY = "IDENTITY" class DictLike(Protocol): def __getitem__(self, key: Any) -> Any: ... @dataclass class PolicyFeature: type: FeatureType shape: tuple
lerobot/src/lerobot/configs/types.py/0
{ "file_path": "lerobot/src/lerobot/configs/types.py", "repo_id": "lerobot", "token_count": 393 }
194
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is for internal use to convert all datasets under the 'lerobot' hub user account to v2.1. """ import traceback from pathlib import Path from huggingface_hub import HfApi from lerobot import available_datasets from lerobot.datasets.v21.convert_dataset_v20_to_v21 import V21, convert_dataset LOCAL_DIR = Path("data/") def batch_convert(): status = {} LOCAL_DIR.mkdir(parents=True, exist_ok=True) logfile = LOCAL_DIR / "conversion_log_v21.txt" hub_api = HfApi() for num, repo_id in enumerate(available_datasets): print(f"\nConverting {repo_id} ({num}/{len(available_datasets)})") print("---------------------------------------------------------") try: if hub_api.revision_exists(repo_id, V21, repo_type="dataset"): status = f"{repo_id}: success (already in {V21})." else: convert_dataset(repo_id) status = f"{repo_id}: success." except Exception: status = f"{repo_id}: failed\n {traceback.format_exc()}" with open(logfile, "a") as file: file.write(status + "\n") if __name__ == "__main__": batch_convert()
lerobot/src/lerobot/datasets/v21/batch_convert_dataset_v20_to_v21.py/0
{ "file_path": "lerobot/src/lerobot/datasets/v21/batch_convert_dataset_v20_to_v21.py", "repo_id": "lerobot", "token_count": 693 }
195
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO(Steven): Consider doing the following: # from enum import Enum # class MyControlTableKey(Enum): # ID = "ID" # GOAL_SPEED = "Goal_Speed" # ... # # MY_CONTROL_TABLE ={ # MyControlTableKey.ID.value: (5,1) # MyControlTableKey.GOAL_SPEED.value: (46, 2) # ... # } # This allows me do to: # bus.write(MyControlTableKey.GOAL_SPEED, ...) # Instead of: # bus.write("Goal_Speed", ...) # This is important for two reasons: # 1. The linter will tell me if I'm trying to use an invalid key, instead of me realizing when I get the RunTimeError # 2. We can change the value of the MyControlTableKey enums without impacting the client code # {data_name: (address, size_byte)} # https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#control-table X_SERIES_CONTROL_TABLE = { "Model_Number": (0, 2), "Model_Information": (2, 4), "Firmware_Version": (6, 1), "ID": (7, 1), "Baud_Rate": (8, 1), "Return_Delay_Time": (9, 1), "Drive_Mode": (10, 1), "Operating_Mode": (11, 1), "Secondary_ID": (12, 1), "Protocol_Type": (13, 1), "Homing_Offset": (20, 4), "Moving_Threshold": (24, 4), "Temperature_Limit": (31, 1), "Max_Voltage_Limit": (32, 2), "Min_Voltage_Limit": (34, 2), "PWM_Limit": (36, 2), "Current_Limit": (38, 2), "Acceleration_Limit": (40, 4), "Velocity_Limit": (44, 4), "Max_Position_Limit": (48, 4), "Min_Position_Limit": (52, 4), "Shutdown": (63, 1), "Torque_Enable": (64, 1), "LED": (65, 1), "Status_Return_Level": (68, 1), "Registered_Instruction": (69, 1), "Hardware_Error_Status": (70, 1), "Velocity_I_Gain": (76, 2), "Velocity_P_Gain": (78, 2), "Position_D_Gain": (80, 2), "Position_I_Gain": (82, 2), "Position_P_Gain": (84, 2), "Feedforward_2nd_Gain": (88, 2), "Feedforward_1st_Gain": (90, 2), "Bus_Watchdog": (98, 1), "Goal_PWM": (100, 2), "Goal_Current": (102, 2), "Goal_Velocity": (104, 4), "Profile_Acceleration": (108, 4), "Profile_Velocity": (112, 4), "Goal_Position": (116, 4), "Realtime_Tick": (120, 2), "Moving": (122, 1), "Moving_Status": (123, 1), "Present_PWM": (124, 2), "Present_Current": (126, 2), "Present_Velocity": (128, 4), "Present_Position": (132, 4), "Velocity_Trajectory": (136, 4), "Position_Trajectory": (140, 4), "Present_Input_Voltage": (144, 2), "Present_Temperature": (146, 1), } # https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#baud-rate8 X_SERIES_BAUDRATE_TABLE = { 9_600: 0, 57_600: 1, 115_200: 2, 1_000_000: 3, 2_000_000: 4, 3_000_000: 5, 4_000_000: 6, } # {data_name: size_byte} X_SERIES_ENCODINGS_TABLE = { "Homing_Offset": X_SERIES_CONTROL_TABLE["Homing_Offset"][1], "Goal_PWM": X_SERIES_CONTROL_TABLE["Goal_PWM"][1], "Goal_Current": X_SERIES_CONTROL_TABLE["Goal_Current"][1], "Goal_Velocity": X_SERIES_CONTROL_TABLE["Goal_Velocity"][1], "Goal_Position": X_SERIES_CONTROL_TABLE["Goal_Position"][1], "Present_Position": X_SERIES_CONTROL_TABLE["Present_Position"][1], "Present_PWM": X_SERIES_CONTROL_TABLE["Present_PWM"][1], "Present_Current": X_SERIES_CONTROL_TABLE["Present_Current"][1], "Present_Velocity": X_SERIES_CONTROL_TABLE["Present_Velocity"][1], } MODEL_ENCODING_TABLE = { "x_series": X_SERIES_ENCODINGS_TABLE, "xl330-m077": X_SERIES_ENCODINGS_TABLE, "xl330-m288": X_SERIES_ENCODINGS_TABLE, "xl430-w250": X_SERIES_ENCODINGS_TABLE, "xm430-w350": X_SERIES_ENCODINGS_TABLE, "xm540-w270": X_SERIES_ENCODINGS_TABLE, "xc430-w150": X_SERIES_ENCODINGS_TABLE, } # {model: model_resolution} # https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#specifications MODEL_RESOLUTION = { "x_series": 4096, "xl330-m077": 4096, "xl330-m288": 4096, "xl430-w250": 4096, "xm430-w350": 4096, "xm540-w270": 4096, "xc430-w150": 4096, } # {model: model_number} # https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#control-table-of-eeprom-area MODEL_NUMBER_TABLE = { "xl330-m077": 1190, "xl330-m288": 1200, "xl430-w250": 1060, "xm430-w350": 1020, "xm540-w270": 1120, "xc430-w150": 1070, } # {model: available_operating_modes} # https://emanual.robotis.com/docs/en/dxl/x/{MODEL}/#operating-mode11 MODEL_OPERATING_MODES = { "xl330-m077": [0, 1, 3, 4, 5, 16], "xl330-m288": [0, 1, 3, 4, 5, 16], "xl430-w250": [1, 3, 4, 16], "xm430-w350": [0, 1, 3, 4, 5, 16], "xm540-w270": [0, 1, 3, 4, 5, 16], "xc430-w150": [1, 3, 4, 16], } MODEL_CONTROL_TABLE = { "x_series": X_SERIES_CONTROL_TABLE, "xl330-m077": X_SERIES_CONTROL_TABLE, "xl330-m288": X_SERIES_CONTROL_TABLE, "xl430-w250": X_SERIES_CONTROL_TABLE, "xm430-w350": X_SERIES_CONTROL_TABLE, "xm540-w270": X_SERIES_CONTROL_TABLE, "xc430-w150": X_SERIES_CONTROL_TABLE, } MODEL_BAUDRATE_TABLE = { "x_series": X_SERIES_BAUDRATE_TABLE, "xl330-m077": X_SERIES_BAUDRATE_TABLE, "xl330-m288": X_SERIES_BAUDRATE_TABLE, "xl430-w250": X_SERIES_BAUDRATE_TABLE, "xm430-w350": X_SERIES_BAUDRATE_TABLE, "xm540-w270": X_SERIES_BAUDRATE_TABLE, "xc430-w150": X_SERIES_BAUDRATE_TABLE, } AVAILABLE_BAUDRATES = [ 9_600, 19_200, 38_400, 57_600, 115_200, 230_400, 460_800, 500_000, 576_000, 921_600, 1_000_000, 1_152_000, 2_000_000, 2_500_000, 3_000_000, 3_500_000, 4_000_000, ]
lerobot/src/lerobot/motors/dynamixel/tables.py/0
{ "file_path": "lerobot/src/lerobot/motors/dynamixel/tables.py", "repo_id": "lerobot", "token_count": 2837 }
196
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from torch import nn from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import FeatureType from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata from lerobot.datasets.utils import dataset_to_policy_features from lerobot.envs.configs import EnvConfig from lerobot.envs.utils import env_to_policy_features from lerobot.policies.act.configuration_act import ACTConfig from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.policies.pi0.configuration_pi0 import PI0Config from lerobot.policies.pi0fast.configuration_pi0fast import PI0FASTConfig from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.policies.sac.configuration_sac import SACConfig from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig from lerobot.policies.smolvla.configuration_smolvla import SmolVLAConfig from lerobot.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig def get_policy_class(name: str) -> PreTrainedPolicy: """Get the policy's class and config class given a name (matching the policy class' `name` attribute).""" if name == "tdmpc": from lerobot.policies.tdmpc.modeling_tdmpc import TDMPCPolicy return TDMPCPolicy elif name == "diffusion": from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy return DiffusionPolicy elif name == "act": from lerobot.policies.act.modeling_act import ACTPolicy return ACTPolicy elif name == "vqbet": from lerobot.policies.vqbet.modeling_vqbet import VQBeTPolicy return VQBeTPolicy elif name == "pi0": from lerobot.policies.pi0.modeling_pi0 import PI0Policy return PI0Policy elif name == "pi0fast": from lerobot.policies.pi0fast.modeling_pi0fast import PI0FASTPolicy return PI0FASTPolicy elif name == "sac": from lerobot.policies.sac.modeling_sac import SACPolicy return SACPolicy elif name == "reward_classifier": from lerobot.policies.sac.reward_model.modeling_classifier import Classifier return Classifier elif name == "smolvla": from lerobot.policies.smolvla.modeling_smolvla import SmolVLAPolicy return SmolVLAPolicy else: raise NotImplementedError(f"Policy with name {name} is not implemented.") def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig: if policy_type == "tdmpc": return TDMPCConfig(**kwargs) elif policy_type == "diffusion": return DiffusionConfig(**kwargs) elif policy_type == "act": return ACTConfig(**kwargs) elif policy_type == "vqbet": return VQBeTConfig(**kwargs) elif policy_type == "pi0": return PI0Config(**kwargs) elif policy_type == "pi0fast": return PI0FASTConfig(**kwargs) elif policy_type == "sac": return SACConfig(**kwargs) elif policy_type == "smolvla": return SmolVLAConfig(**kwargs) elif policy_type == "reward_classifier": return RewardClassifierConfig(**kwargs) else: raise ValueError(f"Policy type '{policy_type}' is not available.") def make_policy( cfg: PreTrainedConfig, ds_meta: LeRobotDatasetMetadata | None = None, env_cfg: EnvConfig | None = None, ) -> PreTrainedPolicy: """Make an instance of a policy class. This function exists because (for now) we need to parse features from either a dataset or an environment in order to properly dimension and instantiate a policy for that dataset or environment. Args: cfg (PreTrainedConfig): The config of the policy to make. If `pretrained_path` is set, the policy will be loaded with the weights from that path. ds_meta (LeRobotDatasetMetadata | None, optional): Dataset metadata to take input/output shapes and statistics to use for (un)normalization of inputs/outputs in the policy. Defaults to None. env_cfg (EnvConfig | None, optional): The config of a gym environment to parse features from. Must be provided if ds_meta is not. Defaults to None. Raises: ValueError: Either ds_meta or env and env_cfg must be provided. NotImplementedError: if the policy.type is 'vqbet' and the policy device 'mps' (due to an incompatibility) Returns: PreTrainedPolicy: _description_ """ if bool(ds_meta) == bool(env_cfg): raise ValueError("Either one of a dataset metadata or a sim env must be provided.") # NOTE: Currently, if you try to run vqbet with mps backend, you'll get this error. # TODO(aliberts, rcadene): Implement a check_backend_compatibility in policies? # NotImplementedError: The operator 'aten::unique_dim' is not currently implemented for the MPS device. If # you want this op to be added in priority during the prototype phase of this feature, please comment on # https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment # variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be # slower than running natively on MPS. if cfg.type == "vqbet" and cfg.device == "mps": raise NotImplementedError( "Current implementation of VQBeT does not support `mps` backend. " "Please use `cpu` or `cuda` backend." ) policy_cls = get_policy_class(cfg.type) kwargs = {} if ds_meta is not None: features = dataset_to_policy_features(ds_meta.features) kwargs["dataset_stats"] = ds_meta.stats else: if not cfg.pretrained_path: logging.warning( "You are instantiating a policy from scratch and its features are parsed from an environment " "rather than a dataset. Normalization modules inside the policy will have infinite values " "by default without stats from a dataset." ) features = env_to_policy_features(env_cfg) cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} cfg.input_features = {key: ft for key, ft in features.items() if key not in cfg.output_features} kwargs["config"] = cfg if cfg.pretrained_path: # Load a pretrained policy and override the config if needed (for example, if there are inference-time # hyperparameters that we want to vary). kwargs["pretrained_name_or_path"] = cfg.pretrained_path policy = policy_cls.from_pretrained(**kwargs) else: # Make a fresh policy. policy = policy_cls(**kwargs) policy.to(cfg.device) assert isinstance(policy, nn.Module) # policy = torch.compile(policy, mode="reduce-overhead") return policy
lerobot/src/lerobot/policies/factory.py/0
{ "file_path": "lerobot/src/lerobot/policies/factory.py", "repo_id": "lerobot", "token_count": 2795 }
197
# !/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import torch from torch import Tensor, nn from lerobot.constants import OBS_IMAGE, REWARD from lerobot.policies.normalize import Normalize, Unnormalize from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig class ClassifierOutput: """Wrapper for classifier outputs with additional metadata.""" def __init__( self, logits: Tensor, probabilities: Tensor | None = None, hidden_states: Tensor | None = None, ): self.logits = logits self.probabilities = probabilities self.hidden_states = hidden_states def __repr__(self): return ( f"ClassifierOutput(logits={self.logits}, " f"probabilities={self.probabilities}, " f"hidden_states={self.hidden_states})" ) class SpatialLearnedEmbeddings(nn.Module): def __init__(self, height, width, channel, num_features=8): """ PyTorch implementation of learned spatial embeddings Args: height: Spatial height of input features width: Spatial width of input features channel: Number of input channels num_features: Number of output embedding dimensions """ super().__init__() self.height = height self.width = width self.channel = channel self.num_features = num_features self.kernel = nn.Parameter(torch.empty(channel, height, width, num_features)) nn.init.kaiming_normal_(self.kernel, mode="fan_in", nonlinearity="linear") def forward(self, features): """ Forward pass for spatial embedding Args: features: Input tensor of shape [B, H, W, C] or [H, W, C] if no batch Returns: Output tensor of shape [B, C*F] or [C*F] if no batch """ features = features.last_hidden_state original_shape = features.shape if features.dim() == 3: features = features.unsqueeze(0) # Add batch dim features_expanded = features.unsqueeze(-1) # [B, H, W, C, 1] kernel_expanded = self.kernel.unsqueeze(0) # [1, H, W, C, F] # Element-wise multiplication and spatial reduction output = (features_expanded * kernel_expanded).sum(dim=(2, 3)) # Sum H,W # Reshape to combine channel and feature dimensions output = output.view(output.size(0), -1) # [B, C*F] # Remove batch dim if len(original_shape) == 3: output = output.squeeze(0) return output class Classifier(PreTrainedPolicy): """Image classifier built on top of a pre-trained encoder.""" name = "reward_classifier" config_class = RewardClassifierConfig def __init__( self, config: RewardClassifierConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None, ): from transformers import AutoModel super().__init__(config) self.config = config # Initialize normalization (standardized with the policy framework) self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) self.normalize_targets = Normalize( config.output_features, config.normalization_mapping, dataset_stats ) self.unnormalize_outputs = Unnormalize( config.output_features, config.normalization_mapping, dataset_stats ) # Set up encoder encoder = AutoModel.from_pretrained(self.config.model_name, trust_remote_code=True) # Extract vision model if we're given a multimodal model if hasattr(encoder, "vision_model"): logging.info("Multimodal model detected - using vision encoder only") self.encoder = encoder.vision_model self.vision_config = encoder.config.vision_config else: self.encoder = encoder self.vision_config = getattr(encoder, "config", None) # Model type from config self.is_cnn = self.config.model_type == "cnn" # For CNNs, initialize backbone if self.is_cnn: self._setup_cnn_backbone() self._freeze_encoder() # Extract image keys from input_features self.image_keys = [ key.replace(".", "_") for key in config.input_features if key.startswith(OBS_IMAGE) ] if self.is_cnn: self.encoders = nn.ModuleDict() for image_key in self.image_keys: encoder = self._create_single_encoder() self.encoders[image_key] = encoder self._build_classifier_head() def _setup_cnn_backbone(self): """Set up CNN encoder""" if hasattr(self.encoder, "fc"): self.feature_dim = self.encoder.fc.in_features self.encoder = nn.Sequential(*list(self.encoder.children())[:-1]) elif hasattr(self.encoder.config, "hidden_sizes"): self.feature_dim = self.encoder.config.hidden_sizes[-1] # Last channel dimension else: raise ValueError("Unsupported CNN architecture") def _freeze_encoder(self) -> None: """Freeze the encoder parameters.""" for param in self.encoder.parameters(): param.requires_grad = False def _create_single_encoder(self): encoder = nn.Sequential( self.encoder, SpatialLearnedEmbeddings( height=4, width=4, channel=self.feature_dim, num_features=self.config.image_embedding_pooling_dim, ), nn.Dropout(self.config.dropout_rate), nn.Linear(self.feature_dim * self.config.image_embedding_pooling_dim, self.config.latent_dim), nn.LayerNorm(self.config.latent_dim), nn.Tanh(), ) return encoder def _build_classifier_head(self) -> None: """Initialize the classifier head architecture.""" # Get input dimension based on model type if self.is_cnn: input_dim = self.config.latent_dim else: # Transformer models if hasattr(self.encoder.config, "hidden_size"): input_dim = self.encoder.config.hidden_size else: raise ValueError("Unsupported transformer architecture since hidden_size is not found") self.classifier_head = nn.Sequential( nn.Linear(input_dim * self.config.num_cameras, self.config.hidden_dim), nn.Dropout(self.config.dropout_rate), nn.LayerNorm(self.config.hidden_dim), nn.ReLU(), nn.Linear( self.config.hidden_dim, 1 if self.config.num_classes == 2 else self.config.num_classes, ), ) def _get_encoder_output(self, x: torch.Tensor, image_key: str) -> torch.Tensor: """Extract the appropriate output from the encoder.""" with torch.no_grad(): if self.is_cnn: # The HF ResNet applies pooling internally outputs = self.encoders[image_key](x) return outputs else: # Transformer models outputs = self.encoder(x) return outputs.last_hidden_state[:, 0, :] def extract_images_and_labels(self, batch: dict[str, Tensor]) -> tuple[list, Tensor]: """Extract image tensors and label tensors from batch.""" # Check for both OBS_IMAGE and OBS_IMAGES prefixes images = [batch[key] for key in self.config.input_features if key.startswith(OBS_IMAGE)] labels = batch[REWARD] return images, labels def predict(self, xs: list) -> ClassifierOutput: """Forward pass of the classifier for inference.""" encoder_outputs = torch.hstack( [self._get_encoder_output(x, img_key) for x, img_key in zip(xs, self.image_keys, strict=True)] ) logits = self.classifier_head(encoder_outputs) if self.config.num_classes == 2: logits = logits.squeeze(-1) probabilities = torch.sigmoid(logits) else: probabilities = torch.softmax(logits, dim=-1) return ClassifierOutput(logits=logits, probabilities=probabilities, hidden_states=encoder_outputs) def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict[str, Tensor]]: """Standard forward pass for training compatible with train.py.""" # Normalize inputs if needed batch = self.normalize_inputs(batch) batch = self.normalize_targets(batch) # Extract images and labels images, labels = self.extract_images_and_labels(batch) # Get predictions outputs = self.predict(images) # Calculate loss if self.config.num_classes == 2: # Binary classification loss = nn.functional.binary_cross_entropy_with_logits(outputs.logits, labels) predictions = (torch.sigmoid(outputs.logits) > 0.5).float() else: # Multi-class classification loss = nn.functional.cross_entropy(outputs.logits, labels.long()) predictions = torch.argmax(outputs.logits, dim=1) # Calculate accuracy for logging correct = (predictions == labels).sum().item() total = labels.size(0) accuracy = 100 * correct / total # Return loss and metrics for logging output_dict = { "accuracy": accuracy, "correct": correct, "total": total, } return loss, output_dict def predict_reward(self, batch, threshold=0.5): """Eval method. Returns predicted reward with the decision threshold as argument.""" # Check for both OBS_IMAGE and OBS_IMAGES prefixes batch = self.normalize_inputs(batch) batch = self.normalize_targets(batch) # Extract images from batch dict images = [batch[key] for key in self.config.input_features if key.startswith(OBS_IMAGE)] if self.config.num_classes == 2: probs = self.predict(images).probabilities logging.debug(f"Predicted reward images: {probs}") return (probs > threshold).float() else: return torch.argmax(self.predict(images).probabilities, dim=1) def get_optim_params(self): """Return optimizer parameters for the policy.""" return self.parameters() def select_action(self, batch: dict[str, Tensor]) -> Tensor: """ This method is required by PreTrainedPolicy but not used for reward classifiers. The reward classifier is not an actor and does not select actions. """ raise NotImplementedError("Reward classifiers do not select actions") def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: """ This method is required by PreTrainedPolicy but not used for reward classifiers. The reward classifier is not an actor and does not produce action chunks. """ raise NotImplementedError("Reward classifiers do not predict action chunks") def reset(self): """ This method is required by PreTrainedPolicy but not used for reward classifiers. The reward classifier is not an actor and does not select actions. """ pass
lerobot/src/lerobot/policies/sac/reward_model/modeling_classifier.py/0
{ "file_path": "lerobot/src/lerobot/policies/sac/reward_model/modeling_classifier.py", "repo_id": "lerobot", "token_count": 5078 }
198
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass import einops import numpy as np import torch from torch import Tensor from lerobot.configs.types import PolicyFeature from lerobot.constants import OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE from lerobot.processor.pipeline import ObservationProcessor, ProcessorStepRegistry @dataclass @ProcessorStepRegistry.register(name="observation_processor") class VanillaObservationProcessor(ObservationProcessor): """ Processes environment observations into the LeRobot format by handling both images and states. Image processing: - Converts channel-last (H, W, C) images to channel-first (C, H, W) - Normalizes uint8 images ([0, 255]) to float32 ([0, 1]) - Adds a batch dimension if missing - Supports single images and image dictionaries State processing: - Maps 'environment_state' to observation.environment_state - Maps 'agent_pos' to observation.state - Converts numpy arrays to tensors - Adds a batch dimension if missing """ def _process_single_image(self, img: np.ndarray) -> Tensor: """Process a single image array.""" # Convert to tensor img_tensor = torch.from_numpy(img) # Add batch dimension if needed if img_tensor.ndim == 3: img_tensor = img_tensor.unsqueeze(0) # Validate image format _, h, w, c = img_tensor.shape if not (c < h and c < w): raise ValueError(f"Expected channel-last images, but got shape {img_tensor.shape}") if img_tensor.dtype != torch.uint8: raise ValueError(f"Expected torch.uint8 images, but got {img_tensor.dtype}") # Convert to channel-first format img_tensor = einops.rearrange(img_tensor, "b h w c -> b c h w").contiguous() # Convert to float32 and normalize to [0, 1] img_tensor = img_tensor.type(torch.float32) / 255.0 return img_tensor def _process_observation(self, observation): """ Processes both image and state observations. """ processed_obs = observation.copy() if "pixels" in processed_obs: pixels = processed_obs.pop("pixels") if isinstance(pixels, dict): imgs = {f"{OBS_IMAGES}.{key}": img for key, img in pixels.items()} else: imgs = {OBS_IMAGE: pixels} for imgkey, img in imgs.items(): processed_obs[imgkey] = self._process_single_image(img) if "environment_state" in processed_obs: env_state_np = processed_obs.pop("environment_state") env_state = torch.from_numpy(env_state_np).float() if env_state.dim() == 1: env_state = env_state.unsqueeze(0) processed_obs[OBS_ENV_STATE] = env_state if "agent_pos" in processed_obs: agent_pos_np = processed_obs.pop("agent_pos") agent_pos = torch.from_numpy(agent_pos_np).float() if agent_pos.dim() == 1: agent_pos = agent_pos.unsqueeze(0) processed_obs[OBS_STATE] = agent_pos return processed_obs def observation(self, observation): return self._process_observation(observation) def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]: """Transforms feature keys to a standardized contract. This method handles several renaming patterns: - Exact matches (e.g., 'pixels' -> 'OBS_IMAGE'). - Prefixed exact matches (e.g., 'observation.pixels' -> 'OBS_IMAGE'). - Prefix matches (e.g., 'pixels.cam1' -> 'OBS_IMAGES.cam1'). - Prefixed prefix matches (e.g., 'observation.pixels.cam1' -> 'OBS_IMAGES.cam1'). - environment_state -> OBS_ENV_STATE, - agent_pos -> OBS_STATE, - observation.environment_state -> OBS_ENV_STATE, - observation.agent_pos -> OBS_STATE """ exact_pairs = { "pixels": OBS_IMAGE, "environment_state": OBS_ENV_STATE, "agent_pos": OBS_STATE, } prefix_pairs = { "pixels.": f"{OBS_IMAGES}.", } for key in list(features.keys()): matched_prefix = False for old_prefix, new_prefix in prefix_pairs.items(): prefixed_old = f"observation.{old_prefix}" if key.startswith(prefixed_old): suffix = key[len(prefixed_old) :] features[f"{new_prefix}{suffix}"] = features.pop(key) matched_prefix = True break if key.startswith(old_prefix): suffix = key[len(old_prefix) :] features[f"{new_prefix}{suffix}"] = features.pop(key) matched_prefix = True break if matched_prefix: continue for old, new in exact_pairs.items(): if key == old or key == f"observation.{old}": if key in features: features[new] = features.pop(key) break return features
lerobot/src/lerobot/processor/observation_processor.py/0
{ "file_path": "lerobot/src/lerobot/processor/observation_processor.py", "repo_id": "lerobot", "token_count": 2558 }
199
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json from copy import deepcopy from pathlib import Path import cv2 import torch import torchvision.transforms.functional as F # type: ignore # noqa: N812 from tqdm import tqdm # type: ignore from lerobot.datasets.lerobot_dataset import LeRobotDataset def select_rect_roi(img): """ Allows the user to draw a rectangular ROI on the image. The user must click and drag to draw the rectangle. - While dragging, the rectangle is dynamically drawn. - On mouse button release, the rectangle is fixed. - Press 'c' to confirm the selection. - Press 'r' to reset the selection. - Press ESC to cancel. Returns: A tuple (top, left, height, width) representing the rectangular ROI, or None if no valid ROI is selected. """ # Create a working copy of the image clone = img.copy() working_img = clone.copy() roi = None # Will store the final ROI as (top, left, height, width) drawing = False index_x, index_y = -1, -1 # Initial click coordinates def mouse_callback(event, x, y, flags, param): nonlocal index_x, index_y, drawing, roi, working_img if event == cv2.EVENT_LBUTTONDOWN: # Start drawing: record starting coordinates drawing = True index_x, index_y = x, y elif event == cv2.EVENT_MOUSEMOVE: if drawing: # Compute the top-left and bottom-right corners regardless of drag direction top = min(index_y, y) left = min(index_x, x) bottom = max(index_y, y) right = max(index_x, x) # Show a temporary image with the current rectangle drawn temp = working_img.copy() cv2.rectangle(temp, (left, top), (right, bottom), (0, 255, 0), 2) cv2.imshow("Select ROI", temp) elif event == cv2.EVENT_LBUTTONUP: # Finish drawing drawing = False top = min(index_y, y) left = min(index_x, x) bottom = max(index_y, y) right = max(index_x, x) height = bottom - top width = right - left roi = (top, left, height, width) # (top, left, height, width) # Draw the final rectangle on the working image and display it working_img = clone.copy() cv2.rectangle(working_img, (left, top), (right, bottom), (0, 255, 0), 2) cv2.imshow("Select ROI", working_img) # Create the window and set the callback cv2.namedWindow("Select ROI") cv2.setMouseCallback("Select ROI", mouse_callback) cv2.imshow("Select ROI", working_img) print("Instructions for ROI selection:") print(" - Click and drag to draw a rectangular ROI.") print(" - Press 'c' to confirm the selection.") print(" - Press 'r' to reset and draw again.") print(" - Press ESC to cancel the selection.") # Wait until the user confirms with 'c', resets with 'r', or cancels with ESC while True: key = cv2.waitKey(1) & 0xFF # Confirm ROI if one has been drawn if key == ord("c") and roi is not None: break # Reset: clear the ROI and restore the original image elif key == ord("r"): working_img = clone.copy() roi = None cv2.imshow("Select ROI", working_img) # Cancel selection for this image elif key == 27: # ESC key roi = None break cv2.destroyWindow("Select ROI") return roi def select_square_roi_for_images(images: dict) -> dict: """ For each image in the provided dictionary, open a window to allow the user to select a rectangular ROI. Returns a dictionary mapping each key to a tuple (top, left, height, width) representing the ROI. Parameters: images (dict): Dictionary where keys are identifiers and values are OpenCV images. Returns: dict: Mapping of image keys to the selected rectangular ROI. """ selected_rois = {} for key, img in images.items(): if img is None: print(f"Image for key '{key}' is None, skipping.") continue print(f"\nSelect rectangular ROI for image with key: '{key}'") roi = select_rect_roi(img) if roi is None: print(f"No valid ROI selected for '{key}'.") else: selected_rois[key] = roi print(f"ROI for '{key}': {roi}") return selected_rois def get_image_from_lerobot_dataset(dataset: LeRobotDataset): """ Find the first row in the dataset and extract the image in order to be used for the crop. """ row = dataset[0] image_dict = {} for k in row: if "image" in k: image_dict[k] = deepcopy(row[k]) return image_dict def convert_lerobot_dataset_to_cropper_lerobot_dataset( original_dataset: LeRobotDataset, crop_params_dict: dict[str, tuple[int, int, int, int]], new_repo_id: str, new_dataset_root: str, resize_size: tuple[int, int] = (128, 128), push_to_hub: bool = False, task: str = "", ) -> LeRobotDataset: """ Converts an existing LeRobotDataset by iterating over its episodes and frames, applying cropping and resizing to image observations, and saving a new dataset with the transformed data. Args: original_dataset (LeRobotDataset): The source dataset. crop_params_dict (Dict[str, Tuple[int, int, int, int]]): A dictionary mapping observation keys to crop parameters (top, left, height, width). new_repo_id (str): Repository id for the new dataset. new_dataset_root (str): The root directory where the new dataset will be written. resize_size (Tuple[int, int], optional): The target size (height, width) after cropping. Defaults to (128, 128). Returns: LeRobotDataset: A new LeRobotDataset where the specified image observations have been cropped and resized. """ # 1. Create a new (empty) LeRobotDataset for writing. new_dataset = LeRobotDataset.create( repo_id=new_repo_id, fps=original_dataset.fps, root=new_dataset_root, robot_type=original_dataset.meta.robot_type, features=original_dataset.meta.info["features"], use_videos=len(original_dataset.meta.video_keys) > 0, ) # Update the metadata for every image key that will be cropped: # (Here we simply set the shape to be the final resize_size.) for key in crop_params_dict: if key in new_dataset.meta.info["features"]: new_dataset.meta.info["features"][key]["shape"] = [3] + list(resize_size) # TODO: Directly modify the mp4 video + meta info features, instead of recreating a dataset prev_episode_index = 0 for frame_idx in tqdm(range(len(original_dataset))): frame = original_dataset[frame_idx] # Create a copy of the frame to add to the new dataset new_frame = {} for key, value in frame.items(): if key in ("task_index", "timestamp", "episode_index", "frame_index", "index", "task"): continue if key in ("next.done", "next.reward"): # if not isinstance(value, str) and len(value.shape) == 0: value = value.unsqueeze(0) if key in crop_params_dict: top, left, height, width = crop_params_dict[key] # Apply crop then resize. cropped = F.crop(value, top, left, height, width) value = F.resize(cropped, resize_size) value = value.clamp(0, 1) if key.startswith("complementary_info") and isinstance(value, torch.Tensor) and value.dim() == 0: value = value.unsqueeze(0) new_frame[key] = value new_dataset.add_frame(new_frame, task=task) if frame["episode_index"].item() != prev_episode_index: # Save the episode new_dataset.save_episode() prev_episode_index = frame["episode_index"].item() # Save the last episode new_dataset.save_episode() if push_to_hub: new_dataset.push_to_hub() return new_dataset if __name__ == "__main__": parser = argparse.ArgumentParser(description="Crop rectangular ROIs from a LeRobot dataset.") parser.add_argument( "--repo-id", type=str, default="lerobot", help="The repository id of the LeRobot dataset to process.", ) parser.add_argument( "--root", type=str, default=None, help="The root directory of the LeRobot dataset.", ) parser.add_argument( "--crop-params-path", type=str, default=None, help="The path to the JSON file containing the ROIs.", ) parser.add_argument( "--push-to-hub", action="store_true", help="Whether to push the new dataset to the hub.", ) parser.add_argument( "--task", type=str, default="", help="The natural language task to describe the dataset.", ) args = parser.parse_args() dataset = LeRobotDataset(repo_id=args.repo_id, root=args.root) images = get_image_from_lerobot_dataset(dataset) images = {k: v.cpu().permute(1, 2, 0).numpy() for k, v in images.items()} images = {k: (v * 255).astype("uint8") for k, v in images.items()} if args.crop_params_path is None: rois = select_square_roi_for_images(images) else: with open(args.crop_params_path) as f: rois = json.load(f) # Print the selected rectangular ROIs print("\nSelected Rectangular Regions of Interest (top, left, height, width):") for key, roi in rois.items(): print(f"{key}: {roi}") new_repo_id = args.repo_id + "_cropped_resized" new_dataset_root = Path(str(dataset.root) + "_cropped_resized") cropped_resized_dataset = convert_lerobot_dataset_to_cropper_lerobot_dataset( original_dataset=dataset, crop_params_dict=rois, new_repo_id=new_repo_id, new_dataset_root=new_dataset_root, resize_size=(128, 128), push_to_hub=args.push_to_hub, task=args.task, ) meta_dir = new_dataset_root / "meta" meta_dir.mkdir(exist_ok=True) with open(meta_dir / "crop_params.json", "w") as f: json.dump(rois, f, indent=4)
lerobot/src/lerobot/scripts/rl/crop_dataset_roi.py/0
{ "file_path": "lerobot/src/lerobot/scripts/rl/crop_dataset_roi.py", "repo_id": "lerobot", "token_count": 4715 }
200
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import time from queue import Queue from typing import Any from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from ..teleoperator import Teleoperator from .configuration_keyboard import KeyboardEndEffectorTeleopConfig, KeyboardTeleopConfig PYNPUT_AVAILABLE = True try: if ("DISPLAY" not in os.environ) and ("linux" in sys.platform): logging.info("No DISPLAY set. Skipping pynput import.") raise ImportError("pynput blocked intentionally due to no display.") from pynput import keyboard except ImportError: keyboard = None PYNPUT_AVAILABLE = False except Exception as e: keyboard = None PYNPUT_AVAILABLE = False logging.info(f"Could not import pynput: {e}") class KeyboardTeleop(Teleoperator): """ Teleop class to use keyboard inputs for control. """ config_class = KeyboardTeleopConfig name = "keyboard" def __init__(self, config: KeyboardTeleopConfig): super().__init__(config) self.config = config self.robot_type = config.type self.event_queue = Queue() self.current_pressed = {} self.listener = None self.logs = {} @property def action_features(self) -> dict: return { "dtype": "float32", "shape": (len(self.arm),), "names": {"motors": list(self.arm.motors)}, } @property def feedback_features(self) -> dict: return {} @property def is_connected(self) -> bool: return PYNPUT_AVAILABLE and isinstance(self.listener, keyboard.Listener) and self.listener.is_alive() @property def is_calibrated(self) -> bool: pass def connect(self) -> None: if self.is_connected: raise DeviceAlreadyConnectedError( "Keyboard is already connected. Do not run `robot.connect()` twice." ) if PYNPUT_AVAILABLE: logging.info("pynput is available - enabling local keyboard listener.") self.listener = keyboard.Listener( on_press=self._on_press, on_release=self._on_release, ) self.listener.start() else: logging.info("pynput not available - skipping local keyboard listener.") self.listener = None def calibrate(self) -> None: pass def _on_press(self, key): if hasattr(key, "char"): self.event_queue.put((key.char, True)) def _on_release(self, key): if hasattr(key, "char"): self.event_queue.put((key.char, False)) if key == keyboard.Key.esc: logging.info("ESC pressed, disconnecting.") self.disconnect() def _drain_pressed_keys(self): while not self.event_queue.empty(): key_char, is_pressed = self.event_queue.get_nowait() self.current_pressed[key_char] = is_pressed def configure(self): pass def get_action(self) -> dict[str, Any]: before_read_t = time.perf_counter() if not self.is_connected: raise DeviceNotConnectedError( "KeyboardTeleop is not connected. You need to run `connect()` before `get_action()`." ) self._drain_pressed_keys() # Generate action based on current key states action = {key for key, val in self.current_pressed.items() if val} self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t return dict.fromkeys(action, None) def send_feedback(self, feedback: dict[str, Any]) -> None: pass def disconnect(self) -> None: if not self.is_connected: raise DeviceNotConnectedError( "KeyboardTeleop is not connected. You need to run `robot.connect()` before `disconnect()`." ) if self.listener is not None: self.listener.stop() class KeyboardEndEffectorTeleop(KeyboardTeleop): """ Teleop class to use keyboard inputs for end effector control. Designed to be used with the `So100FollowerEndEffector` robot. """ config_class = KeyboardEndEffectorTeleopConfig name = "keyboard_ee" def __init__(self, config: KeyboardEndEffectorTeleopConfig): super().__init__(config) self.config = config self.misc_keys_queue = Queue() @property def action_features(self) -> dict: if self.config.use_gripper: return { "dtype": "float32", "shape": (4,), "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3}, } else: return { "dtype": "float32", "shape": (3,), "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2}, } def _on_press(self, key): if hasattr(key, "char"): key = key.char self.event_queue.put((key, True)) def _on_release(self, key): if hasattr(key, "char"): key = key.char self.event_queue.put((key, False)) def get_action(self) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError( "KeyboardTeleop is not connected. You need to run `connect()` before `get_action()`." ) self._drain_pressed_keys() delta_x = 0.0 delta_y = 0.0 delta_z = 0.0 gripper_action = 1.0 # Generate action based on current key states for key, val in self.current_pressed.items(): if key == keyboard.Key.up: delta_y = -int(val) elif key == keyboard.Key.down: delta_y = int(val) elif key == keyboard.Key.left: delta_x = int(val) elif key == keyboard.Key.right: delta_x = -int(val) elif key == keyboard.Key.shift: delta_z = -int(val) elif key == keyboard.Key.shift_r: delta_z = int(val) elif key == keyboard.Key.ctrl_r: # Gripper actions are expected to be between 0 (close), 1 (stay), 2 (open) gripper_action = int(val) + 1 elif key == keyboard.Key.ctrl_l: gripper_action = int(val) - 1 elif val: # If the key is pressed, add it to the misc_keys_queue # this will record key presses that are not part of the delta_x, delta_y, delta_z # this is useful for retrieving other events like interventions for RL, episode success, etc. self.misc_keys_queue.put(key) self.current_pressed.clear() action_dict = { "delta_x": delta_x, "delta_y": delta_y, "delta_z": delta_z, } if self.config.use_gripper: action_dict["gripper"] = gripper_action return action_dict
lerobot/src/lerobot/teleoperators/keyboard/teleop_keyboard.py/0
{ "file_path": "lerobot/src/lerobot/teleoperators/keyboard/teleop_keyboard.py", "repo_id": "lerobot", "token_count": 3386 }
201
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any from lerobot.utils.utils import format_big_number class AverageMeter: """ Computes and stores the average and current value Adapted from https://github.com/pytorch/examples/blob/main/imagenet/main.py """ def __init__(self, name: str, fmt: str = ":f"): self.name = name self.fmt = fmt self.reset() def reset(self) -> None: self.val = 0.0 self.avg = 0.0 self.sum = 0.0 self.count = 0.0 def update(self, val: float, n: int = 1) -> None: self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = "{name}:{avg" + self.fmt + "}" return fmtstr.format(**self.__dict__) class MetricsTracker: """ A helper class to track and log metrics over time. Usage pattern: ```python # initialize, potentially with non-zero initial step (e.g. if resuming run) metrics = {"loss": AverageMeter("loss", ":.3f")} train_metrics = MetricsTracker(cfg, dataset, metrics, initial_step=step) # update metrics derived from step (samples, episodes, epochs) at each training step train_metrics.step() # update various metrics loss = policy.forward(batch) train_metrics.loss = loss # display current metrics logging.info(train_metrics) # export for wandb wandb.log(train_metrics.to_dict()) # reset averages after logging train_metrics.reset_averages() ``` """ __keys__ = [ "_batch_size", "_num_frames", "_avg_samples_per_ep", "metrics", "steps", "samples", "episodes", "epochs", ] def __init__( self, batch_size: int, num_frames: int, num_episodes: int, metrics: dict[str, AverageMeter], initial_step: int = 0, ): self.__dict__.update(dict.fromkeys(self.__keys__)) self._batch_size = batch_size self._num_frames = num_frames self._avg_samples_per_ep = num_frames / num_episodes self.metrics = metrics self.steps = initial_step # A sample is an (observation,action) pair, where observation and action # can be on multiple timestamps. In a batch, we have `batch_size` number of samples. self.samples = self.steps * self._batch_size self.episodes = self.samples / self._avg_samples_per_ep self.epochs = self.samples / self._num_frames def __getattr__(self, name: str) -> int | dict[str, AverageMeter] | AverageMeter | Any: if name in self.__dict__: return self.__dict__[name] elif name in self.metrics: return self.metrics[name] else: raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") def __setattr__(self, name: str, value: Any) -> None: if name in self.__dict__: super().__setattr__(name, value) elif name in self.metrics: self.metrics[name].update(value) else: raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") def step(self) -> None: """ Updates metrics that depend on 'step' for one step. """ self.steps += 1 self.samples += self._batch_size self.episodes = self.samples / self._avg_samples_per_ep self.epochs = self.samples / self._num_frames def __str__(self) -> str: display_list = [ f"step:{format_big_number(self.steps)}", # number of samples seen during training f"smpl:{format_big_number(self.samples)}", # number of episodes seen during training f"ep:{format_big_number(self.episodes)}", # number of time all unique samples are seen f"epch:{self.epochs:.2f}", *[str(m) for m in self.metrics.values()], ] return " ".join(display_list) def to_dict(self, use_avg: bool = True) -> dict[str, int | float]: """ Returns the current metric values (or averages if `use_avg=True`) as a dict. """ return { "steps": self.steps, "samples": self.samples, "episodes": self.episodes, "epochs": self.epochs, **{k: m.avg if use_avg else m.val for k, m in self.metrics.items()}, } def reset_averages(self) -> None: """Resets average meters.""" for m in self.metrics.values(): m.reset()
lerobot/src/lerobot/utils/logging_utils.py/0
{ "file_path": "lerobot/src/lerobot/utils/logging_utils.py", "repo_id": "lerobot", "token_count": 2247 }
202
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Example of running a specific test: # ```bash # pytest tests/cameras/test_opencv.py::test_connect # ``` from pathlib import Path from unittest.mock import patch import numpy as np import pytest from lerobot.cameras.configs import Cv2Rotation from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError pytest.importorskip("pyrealsense2") from lerobot.cameras.realsense import RealSenseCamera, RealSenseCameraConfig TEST_ARTIFACTS_DIR = Path(__file__).parent.parent / "artifacts" / "cameras" BAG_FILE_PATH = TEST_ARTIFACTS_DIR / "test_rs.bag" # NOTE(Steven): For some reason these tests take ~20sec in macOS but only ~2sec in Linux. def mock_rs_config_enable_device_from_file(rs_config_instance, _sn): return rs_config_instance.enable_device_from_file(str(BAG_FILE_PATH), repeat_playback=True) def mock_rs_config_enable_device_bad_file(rs_config_instance, _sn): return rs_config_instance.enable_device_from_file("non_existent_file.bag", repeat_playback=True) @pytest.fixture(name="patch_realsense", autouse=True) def fixture_patch_realsense(): """Automatically mock pyrealsense2.config.enable_device for all tests.""" with patch( "pyrealsense2.config.enable_device", side_effect=mock_rs_config_enable_device_from_file ) as mock: yield mock def test_abc_implementation(): """Instantiation should raise an error if the class doesn't implement abstract methods/properties.""" config = RealSenseCameraConfig(serial_number_or_name="042") _ = RealSenseCamera(config) def test_connect(): config = RealSenseCameraConfig(serial_number_or_name="042") camera = RealSenseCamera(config) camera.connect(warmup=False) assert camera.is_connected def test_connect_already_connected(): config = RealSenseCameraConfig(serial_number_or_name="042") camera = RealSenseCamera(config) camera.connect(warmup=False) with pytest.raises(DeviceAlreadyConnectedError): camera.connect(warmup=False) def test_connect_invalid_camera_path(patch_realsense): patch_realsense.side_effect = mock_rs_config_enable_device_bad_file config = RealSenseCameraConfig(serial_number_or_name="042") camera = RealSenseCamera(config) with pytest.raises(ConnectionError): camera.connect(warmup=False) def test_invalid_width_connect(): config = RealSenseCameraConfig(serial_number_or_name="042", width=99999, height=480, fps=30) camera = RealSenseCamera(config) with pytest.raises(ConnectionError): camera.connect(warmup=False) def test_read(): config = RealSenseCameraConfig(serial_number_or_name="042", width=640, height=480, fps=30) camera = RealSenseCamera(config) camera.connect(warmup=False) img = camera.read() assert isinstance(img, np.ndarray) # TODO(Steven): Fix this test for the latest version of pyrealsense2. @pytest.mark.skip("Skipping test: pyrealsense2 version > 2.55.1.6486") def test_read_depth(): config = RealSenseCameraConfig(serial_number_or_name="042", width=640, height=480, fps=30, use_depth=True) camera = RealSenseCamera(config) camera.connect(warmup=False) img = camera.read_depth(timeout_ms=2000) # NOTE(Steven): Reading depth takes longer in CI environments. assert isinstance(img, np.ndarray) def test_read_before_connect(): config = RealSenseCameraConfig(serial_number_or_name="042") camera = RealSenseCamera(config) with pytest.raises(DeviceNotConnectedError): _ = camera.read() def test_disconnect(): config = RealSenseCameraConfig(serial_number_or_name="042") camera = RealSenseCamera(config) camera.connect(warmup=False) camera.disconnect() assert not camera.is_connected def test_disconnect_before_connect(): config = RealSenseCameraConfig(serial_number_or_name="042") camera = RealSenseCamera(config) with pytest.raises(DeviceNotConnectedError): camera.disconnect() def test_async_read(): config = RealSenseCameraConfig(serial_number_or_name="042", width=640, height=480, fps=30) camera = RealSenseCamera(config) camera.connect(warmup=False) try: img = camera.async_read() assert camera.thread is not None assert camera.thread.is_alive() assert isinstance(img, np.ndarray) finally: if camera.is_connected: camera.disconnect() # To stop/join the thread. Otherwise get warnings when the test ends def test_async_read_timeout(): config = RealSenseCameraConfig(serial_number_or_name="042", width=640, height=480, fps=30) camera = RealSenseCamera(config) camera.connect(warmup=False) try: with pytest.raises(TimeoutError): camera.async_read(timeout_ms=0) finally: if camera.is_connected: camera.disconnect() def test_async_read_before_connect(): config = RealSenseCameraConfig(serial_number_or_name="042") camera = RealSenseCamera(config) with pytest.raises(DeviceNotConnectedError): _ = camera.async_read() @pytest.mark.parametrize( "rotation", [ Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270, ], ids=["no_rot", "rot90", "rot180", "rot270"], ) def test_rotation(rotation): config = RealSenseCameraConfig(serial_number_or_name="042", rotation=rotation) camera = RealSenseCamera(config) camera.connect(warmup=False) img = camera.read() assert isinstance(img, np.ndarray) if rotation in (Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_270): assert camera.width == 480 assert camera.height == 640 assert img.shape[:2] == (640, 480) else: assert camera.width == 640 assert camera.height == 480 assert img.shape[:2] == (480, 640)
lerobot/tests/cameras/test_realsense.py/0
{ "file_path": "lerobot/tests/cameras/test_realsense.py", "repo_id": "lerobot", "token_count": 2353 }
203
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from pathlib import Path import datasets import jsonlines import pyarrow.compute as pc import pyarrow.parquet as pq import pytest from lerobot.datasets.utils import ( EPISODES_PATH, EPISODES_STATS_PATH, INFO_PATH, STATS_PATH, TASKS_PATH, ) @pytest.fixture(scope="session") def info_path(info_factory): def _create_info_json_file(dir: Path, info: dict | None = None) -> Path: if not info: info = info_factory() fpath = dir / INFO_PATH fpath.parent.mkdir(parents=True, exist_ok=True) with open(fpath, "w") as f: json.dump(info, f, indent=4, ensure_ascii=False) return fpath return _create_info_json_file @pytest.fixture(scope="session") def stats_path(stats_factory): def _create_stats_json_file(dir: Path, stats: dict | None = None) -> Path: if not stats: stats = stats_factory() fpath = dir / STATS_PATH fpath.parent.mkdir(parents=True, exist_ok=True) with open(fpath, "w") as f: json.dump(stats, f, indent=4, ensure_ascii=False) return fpath return _create_stats_json_file @pytest.fixture(scope="session") def episodes_stats_path(episodes_stats_factory): def _create_episodes_stats_jsonl_file(dir: Path, episodes_stats: list[dict] | None = None) -> Path: if not episodes_stats: episodes_stats = episodes_stats_factory() fpath = dir / EPISODES_STATS_PATH fpath.parent.mkdir(parents=True, exist_ok=True) with jsonlines.open(fpath, "w") as writer: writer.write_all(episodes_stats.values()) return fpath return _create_episodes_stats_jsonl_file @pytest.fixture(scope="session") def tasks_path(tasks_factory): def _create_tasks_jsonl_file(dir: Path, tasks: list | None = None) -> Path: if not tasks: tasks = tasks_factory() fpath = dir / TASKS_PATH fpath.parent.mkdir(parents=True, exist_ok=True) with jsonlines.open(fpath, "w") as writer: writer.write_all(tasks.values()) return fpath return _create_tasks_jsonl_file @pytest.fixture(scope="session") def episode_path(episodes_factory): def _create_episodes_jsonl_file(dir: Path, episodes: list | None = None) -> Path: if not episodes: episodes = episodes_factory() fpath = dir / EPISODES_PATH fpath.parent.mkdir(parents=True, exist_ok=True) with jsonlines.open(fpath, "w") as writer: writer.write_all(episodes.values()) return fpath return _create_episodes_jsonl_file @pytest.fixture(scope="session") def single_episode_parquet_path(hf_dataset_factory, info_factory): def _create_single_episode_parquet( dir: Path, ep_idx: int = 0, hf_dataset: datasets.Dataset | None = None, info: dict | None = None ) -> Path: if not info: info = info_factory() if hf_dataset is None: hf_dataset = hf_dataset_factory() data_path = info["data_path"] chunks_size = info["chunks_size"] ep_chunk = ep_idx // chunks_size fpath = dir / data_path.format(episode_chunk=ep_chunk, episode_index=ep_idx) fpath.parent.mkdir(parents=True, exist_ok=True) table = hf_dataset.data.table ep_table = table.filter(pc.equal(table["episode_index"], ep_idx)) pq.write_table(ep_table, fpath) return fpath return _create_single_episode_parquet @pytest.fixture(scope="session") def multi_episode_parquet_path(hf_dataset_factory, info_factory): def _create_multi_episode_parquet( dir: Path, hf_dataset: datasets.Dataset | None = None, info: dict | None = None ) -> Path: if not info: info = info_factory() if hf_dataset is None: hf_dataset = hf_dataset_factory() data_path = info["data_path"] chunks_size = info["chunks_size"] total_episodes = info["total_episodes"] for ep_idx in range(total_episodes): ep_chunk = ep_idx // chunks_size fpath = dir / data_path.format(episode_chunk=ep_chunk, episode_index=ep_idx) fpath.parent.mkdir(parents=True, exist_ok=True) table = hf_dataset.data.table ep_table = table.filter(pc.equal(table["episode_index"], ep_idx)) pq.write_table(ep_table, fpath) return dir / "data" return _create_multi_episode_parquet
lerobot/tests/fixtures/files.py/0
{ "file_path": "lerobot/tests/fixtures/files.py", "repo_id": "lerobot", "token_count": 2174 }
204
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature from lerobot.policies.sac.configuration_sac import ( ActorLearnerConfig, ActorNetworkConfig, ConcurrencyConfig, CriticNetworkConfig, PolicyConfig, SACConfig, ) def test_sac_config_default_initialization(): config = SACConfig() assert config.normalization_mapping == { "VISUAL": NormalizationMode.MEAN_STD, "STATE": NormalizationMode.MIN_MAX, "ENV": NormalizationMode.MIN_MAX, "ACTION": NormalizationMode.MIN_MAX, } assert config.dataset_stats == { "observation.image": { "mean": [0.485, 0.456, 0.406], "std": [0.229, 0.224, 0.225], }, "observation.state": { "min": [0.0, 0.0], "max": [1.0, 1.0], }, "action": { "min": [0.0, 0.0, 0.0], "max": [1.0, 1.0, 1.0], }, } # Basic parameters assert config.device == "cpu" assert config.storage_device == "cpu" assert config.discount == 0.99 assert config.temperature_init == 1.0 assert config.num_critics == 2 # Architecture specifics assert config.vision_encoder_name is None assert config.freeze_vision_encoder is True assert config.image_encoder_hidden_dim == 32 assert config.shared_encoder is True assert config.num_discrete_actions is None assert config.image_embedding_pooling_dim == 8 # Training parameters assert config.online_steps == 1000000 assert config.online_env_seed == 10000 assert config.online_buffer_capacity == 100000 assert config.offline_buffer_capacity == 100000 assert config.async_prefetch is False assert config.online_step_before_learning == 100 assert config.policy_update_freq == 1 # SAC algorithm parameters assert config.num_subsample_critics is None assert config.critic_lr == 3e-4 assert config.actor_lr == 3e-4 assert config.temperature_lr == 3e-4 assert config.critic_target_update_weight == 0.005 assert config.utd_ratio == 1 assert config.state_encoder_hidden_dim == 256 assert config.latent_dim == 256 assert config.target_entropy is None assert config.use_backup_entropy is True assert config.grad_clip_norm == 40.0 # Dataset stats defaults expected_dataset_stats = { "observation.image": { "mean": [0.485, 0.456, 0.406], "std": [0.229, 0.224, 0.225], }, "observation.state": { "min": [0.0, 0.0], "max": [1.0, 1.0], }, "action": { "min": [0.0, 0.0, 0.0], "max": [1.0, 1.0, 1.0], }, } assert config.dataset_stats == expected_dataset_stats # Critic network configuration assert config.critic_network_kwargs.hidden_dims == [256, 256] assert config.critic_network_kwargs.activate_final is True assert config.critic_network_kwargs.final_activation is None # Actor network configuration assert config.actor_network_kwargs.hidden_dims == [256, 256] assert config.actor_network_kwargs.activate_final is True # Policy configuration assert config.policy_kwargs.use_tanh_squash is True assert config.policy_kwargs.std_min == 1e-5 assert config.policy_kwargs.std_max == 10.0 assert config.policy_kwargs.init_final == 0.05 # Discrete critic network configuration assert config.discrete_critic_network_kwargs.hidden_dims == [256, 256] assert config.discrete_critic_network_kwargs.activate_final is True assert config.discrete_critic_network_kwargs.final_activation is None # Actor learner configuration assert config.actor_learner_config.learner_host == "127.0.0.1" assert config.actor_learner_config.learner_port == 50051 assert config.actor_learner_config.policy_parameters_push_frequency == 4 # Concurrency configuration assert config.concurrency.actor == "threads" assert config.concurrency.learner == "threads" assert isinstance(config.actor_network_kwargs, ActorNetworkConfig) assert isinstance(config.critic_network_kwargs, CriticNetworkConfig) assert isinstance(config.policy_kwargs, PolicyConfig) assert isinstance(config.actor_learner_config, ActorLearnerConfig) assert isinstance(config.concurrency, ConcurrencyConfig) def test_critic_network_kwargs(): config = CriticNetworkConfig() assert config.hidden_dims == [256, 256] assert config.activate_final is True assert config.final_activation is None def test_actor_network_kwargs(): config = ActorNetworkConfig() assert config.hidden_dims == [256, 256] assert config.activate_final is True def test_policy_kwargs(): config = PolicyConfig() assert config.use_tanh_squash is True assert config.std_min == 1e-5 assert config.std_max == 10.0 assert config.init_final == 0.05 def test_actor_learner_config(): config = ActorLearnerConfig() assert config.learner_host == "127.0.0.1" assert config.learner_port == 50051 assert config.policy_parameters_push_frequency == 4 def test_concurrency_config(): config = ConcurrencyConfig() assert config.actor == "threads" assert config.learner == "threads" def test_sac_config_custom_initialization(): config = SACConfig( device="cpu", discount=0.95, temperature_init=0.5, num_critics=3, ) assert config.device == "cpu" assert config.discount == 0.95 assert config.temperature_init == 0.5 assert config.num_critics == 3 def test_validate_features(): config = SACConfig( input_features={"observation.state": PolicyFeature(type=FeatureType.STATE, shape=(10,))}, output_features={"action": PolicyFeature(type=FeatureType.ACTION, shape=(3,))}, ) config.validate_features() def test_validate_features_missing_observation(): config = SACConfig( input_features={"wrong_key": PolicyFeature(type=FeatureType.STATE, shape=(10,))}, output_features={"action": PolicyFeature(type=FeatureType.ACTION, shape=(3,))}, ) with pytest.raises( ValueError, match="You must provide either 'observation.state' or an image observation" ): config.validate_features() def test_validate_features_missing_action(): config = SACConfig( input_features={"observation.state": PolicyFeature(type=FeatureType.STATE, shape=(10,))}, output_features={"wrong_key": PolicyFeature(type=FeatureType.ACTION, shape=(3,))}, ) with pytest.raises(ValueError, match="You must provide 'action' in the output features"): config.validate_features()
lerobot/tests/policies/test_sac_config.py/0
{ "file_path": "lerobot/tests/policies/test_sac_config.py", "repo_id": "lerobot", "token_count": 2753 }
205
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from pathlib import Path from typing import Any import pytest from lerobot.utils.io_utils import deserialize_json_into_object @pytest.fixture def tmp_json_file(tmp_path: Path): """Writes `data` to a temporary JSON file and returns the file's path.""" def _write(data: Any) -> Path: file_path = tmp_path / "data.json" with file_path.open("w", encoding="utf-8") as f: json.dump(data, f) return file_path return _write def test_simple_dict(tmp_json_file): data = {"name": "Alice", "age": 30} json_path = tmp_json_file(data) obj = {"name": "", "age": 0} assert deserialize_json_into_object(json_path, obj) == data def test_nested_structure(tmp_json_file): data = {"items": [1, 2, 3], "info": {"active": True}} json_path = tmp_json_file(data) obj = {"items": [0, 0, 0], "info": {"active": False}} assert deserialize_json_into_object(json_path, obj) == data def test_tuple_conversion(tmp_json_file): data = {"coords": [10.5, 20.5]} json_path = tmp_json_file(data) obj = {"coords": (0.0, 0.0)} result = deserialize_json_into_object(json_path, obj) assert result["coords"] == (10.5, 20.5) def test_type_mismatch_raises(tmp_json_file): data = {"numbers": {"bad": "structure"}} json_path = tmp_json_file(data) obj = {"numbers": [0, 0]} with pytest.raises(TypeError): deserialize_json_into_object(json_path, obj) def test_missing_key_raises(tmp_json_file): data = {"one": 1} json_path = tmp_json_file(data) obj = {"one": 0, "two": 0} with pytest.raises(ValueError): deserialize_json_into_object(json_path, obj) def test_extra_key_raises(tmp_json_file): data = {"one": 1, "two": 2} json_path = tmp_json_file(data) obj = {"one": 0} with pytest.raises(ValueError): deserialize_json_into_object(json_path, obj) def test_list_length_mismatch_raises(tmp_json_file): data = {"nums": [1, 2, 3]} json_path = tmp_json_file(data) obj = {"nums": [0, 0]} with pytest.raises(ValueError): deserialize_json_into_object(json_path, obj)
lerobot/tests/utils/test_io_utils.py/0
{ "file_path": "lerobot/tests/utils/test_io_utils.py", "repo_id": "lerobot", "token_count": 1063 }
206
# coding=utf-8 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import asyncio from fastapi import FastAPI from pydantic import BaseModel, ConfigDict from typing import Optional, List from fastapi import FastAPI, Request import uvicorn from dotenv import load_dotenv import os load_dotenv() class BatchRequest(BaseModel): """ BatchRequest is a data model representing a batch processing request. Attributes: scripts (list[str]): A list of script names or paths to be executed. languages (List[str]): The programming languages for each script in the list. timeout (int): The maximum allowed execution time for each script in seconds. request_timeout (int): The maximum allowed time for the entire batch request in seconds. """ scripts: List[str] languages: List[str] timeout: int request_timeout: int class ScriptResult(BaseModel): """ ScriptResult is a Pydantic model that represents the result of a script execution. Attributes: text (Optional[str]): The output text from the script execution. exception_str (Optional[str]): An optional string that captures the exception message or details if an error occurred during the script's execution. model_config (ConfigDict): A configuration dictionary that allows arbitrary types to be used within the Pydantic model. """ text: Optional[str] exception_str: Optional[str] model_config = ConfigDict(arbitrary_types_allowed=True) def create_app(args): """ Creates and configures a FastAPI application instance for the MorphCloud router. Args: args: An object containing configuration parameters for the application. - max_num_sandboxes (int): The maximum number of concurrent sandboxes allowed. - api_key (str): The MorphCloud API key to use. Returns: FastAPI: A configured FastAPI application instance. """ app = FastAPI() from morphcloud.api import MorphCloudClient from morphcloud.sandbox import Sandbox app.state.client = MorphCloudClient(api_key=args.api_key) app.state.Sandbox = Sandbox app.state.sandbox_semaphore = asyncio.Semaphore(args.max_num_sandboxes) @app.get("/health") async def health(): return {"status": "ok"} @app.post("/execute_batch") async def execute_batch(batch: BatchRequest, request: Request): semaphore = request.app.state.sandbox_semaphore client = request.app.state.client Sandbox = request.app.state.Sandbox languages = batch.languages timeout = batch.timeout request_timeout = batch.request_timeout asyncio_timeout = batch.timeout + 1 async def run_script(script: str, language: str) -> ScriptResult: sandbox = None sandbox_id = "unknown" async with semaphore: try: sandbox = await asyncio.to_thread( Sandbox.new, client=client, ttl_seconds=timeout ) sandbox_id = getattr(sandbox, 'id', None) or getattr(sandbox._instance, 'id', 'unknown') execution = await asyncio.wait_for( asyncio.to_thread( sandbox.run_code, script, language=language, timeout=timeout * 1000 ), timeout=asyncio_timeout, ) if hasattr(execution, 'text') and execution.text: return ScriptResult(text=execution.text, exception_str=None) elif hasattr(execution, 'stdout') and execution.stdout: return ScriptResult(text=execution.stdout, exception_str=None) else: return ScriptResult(text="", exception_str="No output from execution") except Exception as e: return ScriptResult(text=None, exception_str=str(e)) finally: if sandbox: try: await asyncio.to_thread(sandbox.close) await asyncio.to_thread(sandbox.shutdown) except Exception: pass tasks = [run_script(script, lang) for script, lang in zip(batch.scripts, batch.languages)] return await asyncio.gather(*tasks) return app def parse_args(): """ Parse command-line arguments for the morph_router script. Arguments: --host (str): The hostname or IP address to bind the server to. Defaults to "0.0.0.0". --port (int): The port number on which the server will listen. Defaults to 8001. --max_num_sandboxes (int): The maximum number of sandboxes that can be created simultaneously. Defaults to 20. --api_key (str): The MorphCloud API key. If not provided, it will be read from the MORPH_API_KEY environment variable. Returns: argparse.Namespace: Parsed command-line arguments as an object. """ parser = argparse.ArgumentParser() parser.add_argument("--host", default="0.0.0.0") parser.add_argument("--port", type=int, default=8001) parser.add_argument("--max_num_sandboxes", type=int, default=20) parser.add_argument("--api_key", default=os.getenv("MORPH_API_KEY")) args = parser.parse_args() if not args.api_key: raise ValueError("MorphCloud API key not provided. Please set MORPH_API_KEY environment variable or use --api_key.") return args if __name__ == "__main__": args = parse_args() app = create_app(args) print(f"Starting MorphCloud Router on {args.host}:{args.port}") uvicorn.run(app, host=args.host, port=args.port)
open-r1/scripts/morph_router.py/0
{ "file_path": "open-r1/scripts/morph_router.py", "repo_id": "open-r1", "token_count": 2781 }
207
#!/bin/bash # this simple script will launch a bunch of piston workers on the HF science cluster N_INSTANCES=${1:-5} # Default to 5 instances for i in $(seq 1 $N_INSTANCES); do # Find random (hopefully) available port PORT=$(comm -23 <(seq 2000 10000 | sort) <(ss -tan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n1) # the job name format is important for the code to then be able to get a list of workers. `piston-worker-<port>` sbatch \ --job-name="piston-worker-$PORT" \ --export=ALL,PORT=$PORT \ slurm/piston/launch_single_piston.sh done
open-r1/slurm/piston/launch_piston_workers.sh/0
{ "file_path": "open-r1/slurm/piston/launch_piston_workers.sh", "repo_id": "open-r1", "token_count": 237 }
208
import re def fix_python3_imports(source_code): """ Fix common import and function changes between Python 3 versions Args: source_code (str): The Python source code to update Returns: str: The updated source code """ # Dictionary of patterns to replacements replacements = [ # Fix collections.abc imports (changed in Python 3.3+) ( r"from collections import (Mapping|Sequence|Set|Container|MutableMapping|MutableSet|MutableSequence)", r"from collections.abc import \1", ), # Fix imp module deprecation (deprecated in 3.4) (r"import imp", r"import importlib"), # Fix asyncio.async() to asyncio.ensure_future() (renamed in 3.4.4) (r"asyncio\.async\(", r"asyncio.ensure_future("), # Fix inspect.getargspec to inspect.getfullargspec (deprecated in 3.5) (r"inspect\.getargspec", r"inspect.getfullargspec"), # Fix array.array 'c' type code to 'b' (removed in 3.9) (r"array\.array\('c'", r"array.array('b'"), # Fix backslash line continuation with multiple newlines (Python-specific issue) (r"\\(\r\n|\r|\n)+", "\\\n"), # some solutions use getlogin() to check if they are debugging or on an actual submission (r"(?:os\s*\.\s*)?getlogin\s*\(\s*\)", "False"), # Fix usage of fractions.gcd (moved to math in 3.5) # 1. Fix direct usage: fractions.gcd -> math.gcd (r"\bfractions\.gcd\b", r"math.gcd"), # 2. Fix 'from fractions import gcd, X' -> 'from fractions import X' (start/middle) (r"(from\s+fractions\s+import\s+(?:\([^)]*)?)\bgcd\s*,\s*", r"\1"), # 3. Fix 'from fractions import X, gcd' -> 'from fractions import X' (end) (r"(from\s+fractions\s+import\s+.*?\S)\s*,\s*\bgcd(\s*\)?\s*(?:#.*)?)", r"\1\2"), # 4. Fix standalone 'from fractions import gcd' -> 'from math import gcd' (r"from\s+fractions\s+import\s+\(?\s*gcd\s*\)?", r""), # --- End: Replacement for the faulty line --- ] lines = source_code.splitlines() last_import = max( [ i for i, line in enumerate(lines) if line.strip().startswith("import") or (line.strip().startswith("from") and "import" in line) ], default=0, ) import_section = "\n".join(lines[: last_import + 1]) main_source = "\n".join(lines[last_import:]) if "fractions.gcd" in source_code and "import math" not in source_code: import_section += "\nimport math" elif "gcd" in source_code and "from math import gcd" not in source_code: import_section += "\nfrom math import gcd" if "set_int_max_str_digits" not in source_code: import_section += "\nimport sys\nsys.set_int_max_str_digits(0)" source_code = import_section + "\n" + main_source # Apply each replacement for pattern, replacement in replacements: source_code = re.sub(pattern, replacement, source_code) source_code = source_code.rstrip("\\") return source_code def fix_cpp_includes(source_code): # has most of the useful functions code_header = "#include <bits/stdc++.h>\n" # use namespace std since models forget std:: often if "using namespace std;" not in source_code and "std::" not in source_code: code_header += "\nusing namespace std;\n\n" return code_header + source_code def is_patchable(lang): return lang in ("python", "python3", "Python 3", "PyPy 3", "PyPy 3-64", "cpp") or "C++" in lang def patch_code(text, lang): if not text: return text if lang in ("python", "python3", "Python 3", "PyPy 3", "PyPy 3-64"): return fix_python3_imports(text) elif "cpp" in lang or "C++" in lang: return fix_cpp_includes(text) return text tests = [ """read = lambda: map(int, input().split()) n, m, z = read() from fractions import gcd ans = z // (n * m // gcd(n, m)) print(ans)""", """from fractions import Fraction,gcd a,b,c,d = [int(x) for x in input().split()] if a*d > b*c: num = a*d-b*c denom = a*d else: num = b*c-a*d denom = b*c div = gcd(num,denom) print('%d/%d'%(num//div,denom//div))""", ] if __name__ == "__main__": for test in tests: print("ORIGINAL:", test, sep="\n\n") print("PATCHED:", patch_code(test, "Python 3"), sep="\n\n") print("=" * 50)
open-r1/src/open_r1/utils/competitive_programming/code_patcher.py/0
{ "file_path": "open-r1/src/open_r1/utils/competitive_programming/code_patcher.py", "repo_id": "open-r1", "token_count": 1876 }
209
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from dotenv import load_dotenv from open_r1.configs import GRPOScriptArguments from open_r1.rewards import ( accuracy_reward, format_reward, get_code_format_reward, get_cosine_scaled_reward, get_repetition_penalty_reward, get_reward_funcs, get_soft_overlong_punishment, len_reward, reasoning_steps_reward, tag_count_reward, ) load_dotenv() class TestGetRewardFuncs(unittest.TestCase): def test_get_reward_funcs(self): """Test get_reward_funcs with various reward functions.""" reward_names = [ "accuracy", "format", "reasoning_steps", "cosine", "repetition_penalty", "length", "tag_count", "code", "ioi_code", "code_format", "binary_code", ] reward_func_names = [ "accuracy_reward", "format_reward", "reasoning_steps_reward", "cosine_scaled_reward", "repetition_penalty_reward", "len_reward", "tag_count_reward", "code_reward", "ioi_code_reward", "code_format_reward", "binary_code_reward", ] args = GRPOScriptArguments( dataset_name="dummy", reward_funcs=reward_names, ) reward_funcs = get_reward_funcs(args) self.assertEqual(len(reward_funcs), 11) for func_name, func in zip(reward_func_names, reward_funcs): self.assertEqual(func_name, func.__name__) class TestRewards(unittest.TestCase): def test_accuracy_reward_correct_answer(self): """Test accuracy_reward with a correct answer.""" completion = [[{"content": r"\boxed{\frac{63}{400}}"}]] solution = [r"\frac{63}{400}"] rewards = accuracy_reward(completion, solution) self.assertEqual(rewards[0], 1.0) def test_accuracy_reward_wrong_answer(self): """Test accuracy_reward with an incorrect answer.""" completion = [[{"content": r"\boxed{\frac{64}{400}}"}]] solution = [r"\frac{63}{400}"] rewards = accuracy_reward(completion, solution) self.assertEqual(rewards[0], 0.0) def test_accuracy_reward_wrong_answer_no_latex(self): """Test accuracy_reward with an incorrect answer and gold solution with no latex.""" completion = [[{"content": r"\boxed{3}"}]] solution = ["6"] rewards = accuracy_reward(completion, solution) self.assertEqual(rewards[0], 0.0) def test_format_reward_correct(self): """Test format_reward with correct format.""" completion = [[{"content": "<think>\nSome reasoning\n</think>\n<answer>\nThe answer\n</answer>"}]] rewards = format_reward(completion) self.assertEqual(rewards[0], 1.0) def test_format_reward_incorrect(self): """Test format_reward with incorrect format.""" incorrect_formats = [ "<think>Only thinking</think>", "<answer>Only answer</answer>", "No tags at all", "<think>Missing closing</think><answer>Missing closing", "<think>Wrong order</answer><answer>Wrong order</think>", ] for fmt in incorrect_formats: completion = [[{"content": fmt}]] rewards = format_reward(completion) self.assertEqual(rewards[0], 0.0) def test_reasoning_steps_reward(self): """Test reasoning_steps_reward with various formats.""" test_cases = [ # Full credit cases (3 or more steps) ("Step 1: First step\nStep 2: Second step\nStep 3: Third step", 1.0), ("First, we do this.\nSecond, we do that.\nFinally, we conclude.", 1.0), # Partial credit cases (less than 3 steps) ("Step 1: Only step", 1 / 3), ("First, we do this.\nFinally, we conclude.", 2 / 3), # No credit case ("Just plain text without any clear steps", 0.0), ] for content, expected_reward in test_cases: completion = [[{"content": content}]] rewards = reasoning_steps_reward(completion) self.assertAlmostEqual(rewards[0], expected_reward) def test_multiple_completions(self): """Test handling multiple completions at once.""" completions = [ [{"content": r"\boxed{\frac{63}{400}}"}], [{"content": r"\boxed{\frac{64}{400}}"}], ] solutions = [r"\frac{63}{400}", r"\frac{63}{400}"] rewards = accuracy_reward(completions, solutions) self.assertEqual(len(rewards), 2) self.assertEqual(rewards[0], 1.0) self.assertEqual(rewards[1], 0.0) def test_cosine_scaled_reward(self): """Test cosine_scaled_reward with various cases.""" # Test parameters test_params = { "min_value_wrong": -1.0, "max_value_wrong": -0.5, "min_value_correct": 0.5, "max_value_correct": 1.0, "max_len": 100, } test_cases = [ # Correct answers with different lengths ( r"\boxed{\frac{63}{400}}", r"\frac{63}{400}", 20, 0.943, ), # Short correct answer ( r"\boxed{\frac{63}{400}}", r"\frac{63}{400}", 80, 0.547, ), # Long correct answer # Wrong answers with different lengths ( r"\boxed{\frac{64}{400}}", r"\frac{63}{400}", 20, -0.942, ), # Short wrong answer ( r"\boxed{\frac{64}{400}}", r"\frac{63}{400}", 80, -0.547, ), # Long wrong answer ] for content, solution, content_len, expected_reward in test_cases: # Pad content to desired length padded_content = content + " " * (content_len - len(content)) completion = [[{"content": padded_content}]] rewards = get_cosine_scaled_reward(**test_params)(completion, [solution]) self.assertAlmostEqual(rewards[0], expected_reward, places=2) def test_format_reward_specific_multiline(self): """Test format_reward with a specific multiline input.""" inputs = "<think>\nI will count each distinct object in the image:\n1. Purple scooter\n2. Red bicycle\n3. Green motorcycle\n4. Gray sedan\n5. Yellow school bus\n6. Small green double-decker bus\n7. Small red car\n8. Small purple car\n9. Small gray dirt bike\n\nThere are 9 distinct objects in total.\n</think>\n<answer>\n9\n</answer>" completion = [[{"content": inputs}]] rewards = format_reward(completion) self.assertEqual(rewards[0], 1.0) def test_same_length_responses(self): """Test len_reward when all responses have the same length.""" completions = [ [{"content": r"\boxed{\frac{63}{400}}"}], [{"content": r"\boxed{\frac{64}{400}}"}], ] solutions = [r"\frac{63}{400}", r"\frac{63}{400}"] rewards = len_reward(completions, solutions) self.assertEqual(rewards, [0.0, 0.0]) def test_different_lengths_correct_answers(self): """Test len_reward with different length correct answers.""" completions = [ [{"content": r"\boxed{\frac{63}{400}}"}], # shorter [{"content": r"\boxed{\frac{63}{400}} " + "x" * 10}], # longer ] solutions = [r"\frac{63}{400}", r"\frac{63}{400}"] rewards = len_reward(completions, solutions) self.assertGreater(rewards[0], rewards[1]) # shorter answer should get higher reward self.assertAlmostEqual(rewards[0], 0.5) # shortest correct answer gets maximum reward def test_different_lengths_incorrect_answers(self): """Test len_reward with different length incorrect answers.""" completions = [ [{"content": r"\boxed{\frac{64}{400}}"}], # shorter [{"content": r"\boxed{\frac{64}{400}} " + "x" * 10}], # longer ] solutions = [r"\frac{63}{400}", r"\frac{63}{400}"] rewards = len_reward(completions, solutions) self.assertLessEqual(rewards[0], 0.0) # incorrect answers should get non-positive rewards self.assertLessEqual(rewards[1], 0.0) self.assertGreater(rewards[0], rewards[1]) # shorter answer should still be penalized less def test_mixed_correctness(self): """Test len_reward with mix of correct and incorrect answers of different lengths.""" completions = [ [{"content": r"\boxed{\frac{63}{400}}"}], # correct, shorter [{"content": r"\boxed{\frac{63}{400}} " + "x" * 10}], # correct, longer [{"content": r"\boxed{\frac{64}{400}}"}], # incorrect, shorter [{"content": r"\boxed{\frac{64}{400}} " + "x" * 10}], # incorrect, longer ] solutions = [r"\frac{63}{400}"] * 4 rewards = len_reward(completions, solutions) # Shortest correct answer should get positive reward self.assertGreater(rewards[0], 0.0) # Longer correct answer might get negative reward: self.assertGreater(rewards[2], rewards[1]) self.assertGreaterEqual(rewards[1], rewards[3]) # Incorrect answers should get non-positive rewards self.assertLessEqual(rewards[2], 0.0) self.assertLessEqual(rewards[3], 0.0) # Shorter answers should get better rewards within their correctness category self.assertGreater(rewards[0], rewards[1]) # correct answers self.assertGreater(rewards[2], rewards[3]) # incorrect answers def test_unparseable_solution(self): """Test len_reward with unparseable solution.""" completions = [ [{"content": r"\boxed{answer}"}], [{"content": r"\boxed{answer} " + "x" * 10}], ] solutions = ["unparseable_latex", "unparseable_latex"] rewards = len_reward(completions, solutions) self.assertGreater(rewards[0], rewards[1]) # shorter answer should still get better reward self.assertAlmostEqual(rewards[0], 0.5) # treated as correct, shortest gets maximum reward class TestRepetitionPenaltyReward(unittest.TestCase): def test_positive_max_penalty_raises_value_error(self): with self.assertRaises(ValueError): get_repetition_penalty_reward(ngram_size=2, max_penalty=1.0) with self.assertRaisesRegex(ValueError, "max_penalty 1.5 should not be positive"): get_repetition_penalty_reward(ngram_size=2, max_penalty=1.5) def test_no_repetition(self): reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0) completions = [[{"content": "this is a test sentence"}]] rewards = reward_fn(completions) self.assertEqual(rewards, [0.0]) def test_full_repetition(self): reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0) completions = [[{"content": "this this this this this"}]] rewards = reward_fn(completions) # (1 - 1/4) * -1 = -0.75 self.assertEqual(rewards, [-0.75]) def test_partial_repetition(self): reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0) completions = [[{"content": "this is a this is a test"}]] rewards = reward_fn(completions) # Unique 2-grams: (this, is), (is, a), (a, this), (a, test). 4 unique out of 6 total # (1 - 4/6) * -1 = -1/3 = -0.3333... self.assertAlmostEqual(rewards[0], -1 / 3) def test_multiple_completions(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-0.5) completions = [ [{"content": "this is a test"}], [{"content": "test test test test"}], ] rewards = reward_fn(completions) # Completion 1: (this, is, a), (is, a, test) -> 2 unique / 2 total -> (1 - 2/2) * -0.5 = 0 # Completion 2: (test, test, test) -> 1 unique / 2 total -> (1 - 1/2) * -0.5 = -0.25 self.assertAlmostEqual(rewards[0], 0.0) self.assertAlmostEqual(rewards[1], -0.25) def test_empty_completion(self): reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0) completions = [[{"content": ""}]] rewards = reward_fn(completions) self.assertEqual(rewards, [0.0]) def test_different_ngram_size(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-2.0) completions = [[{"content": "this is a this is a test"}]] rewards = reward_fn(completions) self.assertAlmostEqual(rewards[0], -0.4) def test_mixed_case(self): reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0) completions = [ [{"content": "This is A Test"}], [{"content": "this IS a test"}], ] rewards = reward_fn(completions) # both completions should produce the same reward, because the text gets lowercased self.assertAlmostEqual(rewards[0], rewards[1]) def test_one_word_completion(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0) completions = [[{"content": "word"}]] rewards = reward_fn(completions) self.assertEqual(rewards, [0.0]) def test_two_word_completion(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0) completions = [[{"content": "two words"}]] rewards = reward_fn(completions) self.assertEqual(rewards, [0.0]) def test_three_word_completion(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0) completions = [[{"content": "three different words"}]] rewards = reward_fn(completions) self.assertEqual(rewards, [0.0]) def test_three_word_repetition_completion(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0) completions = [[{"content": "word word word word"}]] rewards = reward_fn(completions) self.assertEqual(rewards, [-0.5]) def test_four_word_completion_with_repetition(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0) completions = [[{"content": "one two one two"}]] rewards = reward_fn(completions) # ngrams are (one two one) (two one two). unique is 2 and count is 2, therefore (1-1) * -1. self.assertEqual(rewards, [0.0]) def test_five_word_completion_with_repetition(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-0.5) completions = [[{"content": "A B C A B"}]] rewards = reward_fn(completions) # (A B C) (B C A) (C A B). unique is 3. count is 3 (1-1) * -.5 = 0 self.assertEqual(rewards, [0.0]) def test_six_word_completion_with_repetition(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0) completions = [[{"content": "A B C A B C"}]] rewards = reward_fn(completions) self.assertEqual(rewards, [-0.25]) def test_long_completion_with_repetition(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0) completions = [[{"content": "A B C A B C E F G A B C A B C"}]] rewards = reward_fn(completions) self.assertAlmostEqual(rewards[0], -0.3846, places=4) def test_long_completion_without_repetition(self): reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0) completions = [[{"content": "A B C D E F G H I J K L"}]] rewards = reward_fn(completions) self.assertEqual(rewards, [0.0]) def test_tag_count_rewards_all_correct(self): """Test tag_count_reward with correct tags.""" completion = [[{"content": "<think>\nSome reasoning\n</think>\n<answer>\nThe answer\n</answer>"}]] rewards = tag_count_reward(completion) self.assertEqual(rewards[0], 1.0) def test_tag_count_rewards_missing_think_begin(self): """Test tag_count_reward with missing <think> tag.""" completion = [[{"content": "Some reasoning\n</think>\n<answer>\nThe answer\n</answer>"}]] rewards = tag_count_reward(completion) self.assertEqual(rewards[0], 0.75) def test_tag_count_rewards_missing_think_end(self): """Test tag_count_reward with missing </think> tag.""" completion = [[{"content": "<think>\nSome reasoning\n<answer>\nThe answer\n</answer>"}]] rewards = tag_count_reward(completion) self.assertEqual(rewards[0], 0.75) def test_tag_count_rewards_missing_answer_begin(self): """Test tag_count_reward with missing <answer> tag.""" completion = [[{"content": "<think>\nSome reasoning\n</think>\nThe answer\n</answer>"}]] rewards = tag_count_reward(completion) self.assertEqual(rewards[0], 0.75) def test_tag_count_rewards_missing_answer_end(self): """Test tag_count_reward with missing </answer> tag.""" completion = [[{"content": "<think>\nSome reasoning\n</think>\n<answer>\nThe answer"}]] rewards = tag_count_reward(completion) self.assertEqual(rewards[0], 0.75) def test_tag_count_rewards_missing_all_tags(self): """Test tag_count_reward with missing all tags.""" completion = [[{"content": "Some reasoning\nThe answer"}]] rewards = tag_count_reward(completion) self.assertEqual(rewards[0], 0.0) def test_full_repetition_with_language(self): reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0, language="en") completions = [[{"content": "that that that that that"}]] rewards = reward_fn(completions) self.assertEqual(rewards, [-0.75]) # begin test for zh language reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0, language="zh") completions = [[{"content": "这个这个这个这个这个"}]] rewards = reward_fn(completions) self.assertEqual(rewards, [-0.75]) def test_soft_overlong_punishment_short_completion(self): """Test soft overlong punishment reward function with a short completion.""" # length 50, with max=100 and soft cache=20, reward should be 0. reward_fn = get_soft_overlong_punishment(max_completion_len=100, soft_punish_cache=20) completion_ids = [[1] * 50] # 50 <= 80 rewards = reward_fn(completion_ids=completion_ids) self.assertEqual(rewards, [0]) def test_soft_overlong_punishment_long_completion(self): """Test soft overlong punishment reward function with a longer than max completion.""" # 110 > 100, reward should be -1. reward_fn = get_soft_overlong_punishment(max_completion_len=100, soft_punish_cache=20) completion_ids = [[1] * 110] rewards = reward_fn(completion_ids) self.assertEqual(rewards, [-1]) def test_soft_overlong_punishment_intermediate_completion(self): """Test soft overlong punishment reward function for intermediate length completion.""" reward_fn = get_soft_overlong_punishment(max_completion_len=100, soft_punish_cache=20) completion_ids = [[1] * 90] # 90 is between 80 and 100 rewards = reward_fn(completion_ids) self.assertAlmostEqual(rewards[0], -0.5, places=4) class TestCodeFormat(unittest.TestCase): def test_correct_python_format(self): """Test code format reward with correct Python format.""" completion = [ [ { "content": "<think>\nLet's solve this\nStep 1: First step\n</think>\n<answer>\n```python\ndef hello():\n print('world')\n```\n</answer>" } ] ] reward_fn = get_code_format_reward(language="python") rewards = reward_fn(completion) self.assertEqual(rewards[0], 1.0) def test_incorrect_formats(self): """Test code format reward with various incorrect formats.""" incorrect_formats = [ # Missing think/answer tags "```python\ndef hello():\n print('world')\n```", # Missing code block "<think>Some thinking</think><answer>Just plain text</answer>", # Wrong language "<think>Analysis</think><answer>```javascript\nconsole.log('hello');\n```</answer>", # Missing language identifier "<think>Analysis</think><answer>```\ndef hello(): pass\n```</answer>", # Wrong order of tags "<answer>```python\ndef hello(): pass\n```</answer><think>Analysis</think>", ] reward_fn = get_code_format_reward(language="python") for fmt in incorrect_formats: completion = [[{"content": fmt}]] rewards = reward_fn(completion) self.assertEqual(rewards[0], 0.0) def test_multiple_code_blocks(self): """Test format reward with multiple code blocks in think and answer sections.""" completion = [ [ { "content": "<think>\nHere's an example:\n```python\nx = 1\n```\nNow the solution:\n</think>\n<answer>\n```python\ndef solution():\n return 42\n```\n</answer>" } ] ] reward_fn = get_code_format_reward(language="python") rewards = reward_fn(completion) self.assertEqual(rewards[0], 1.0) def test_different_languages(self): """Test code format reward with different programming languages.""" completion = [ [ { "content": "<think>\nAnalysis\n</think>\n<answer>\n```javascript\nconsole.log('hello');\n```\n</answer>" } ] ] # Test with JavaScript js_reward_fn = get_code_format_reward(language="javascript") rewards = js_reward_fn(completion) self.assertEqual(rewards[0], 1.0) # Same completion should fail for Python py_reward_fn = get_code_format_reward(language="python") rewards = py_reward_fn(completion) self.assertEqual(rewards[0], 0.0) def test_multiline_code(self): """Test format reward with complex multiline code blocks.""" completion = [ [ { "content": "<think>\nHere's the analysis\n</think>\n<answer>\n```python\nclass Solution:\n def __init__(self):\n self.value = 42\n \n def get_value(self):\n return self.value\n```\n</answer>" } ] ] reward_fn = get_code_format_reward(language="python") rewards = reward_fn(completion) self.assertEqual(rewards[0], 1.0) if __name__ == "__main__": unittest.main()
open-r1/tests/test_rewards.py/0
{ "file_path": "open-r1/tests/test_rewards.py", "repo_id": "open-r1", "token_count": 10656 }
210
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tuners A tuner (or adapter) is a module that can be plugged into a `torch.nn.Module`. [`BaseTuner`] base class for other tuners and provides shared methods and attributes for preparing an adapter configuration and replacing a target module with the adapter module. [`BaseTunerLayer`] is a base class for adapter layers. It offers methods and attributes for managing adapters such as activating and disabling adapters. ## BaseTuner [[autodoc]] tuners.tuners_utils.BaseTuner ## BaseTunerLayer [[autodoc]] tuners.tuners_utils.BaseTunerLayer
peft/docs/source/package_reference/tuners.md/0
{ "file_path": "peft/docs/source/package_reference/tuners.md", "repo_id": "peft", "token_count": 330 }
211
IDX=$1 PROMPT_IDX=$((IDX % 25)) CLASS_IDX=$((IDX % 30)) # Define the UNIQUE_TOKEN, CLASS_TOKENs, and SUBJECT_NAMES UNIQUE_TOKEN="qwe" SUBJECT_NAMES=( "backpack" "backpack_dog" "bear_plushie" "berry_bowl" "can" "candle" "cat" "cat2" "clock" "colorful_sneaker" "dog" "dog2" "dog3" "dog5" "dog6" "dog7" "dog8" "duck_toy" "fancy_boot" "grey_sloth_plushie" "monster_toy" "pink_sunglasses" "poop_emoji" "rc_car" "red_cartoon" "robot_toy" "shiny_sneaker" "teapot" "vase" "wolf_plushie" ) CLASS_TOKENs=( "backpack" "backpack" "stuffed animal" "bowl" "can" "candle" "cat" "cat" "clock" "sneaker" "dog" "dog" "dog" "dog" "dog" "dog" "dog" "toy" "boot" "stuffed animal" "toy" "glasses" "toy" "toy" "cartoon" "toy" "sneaker" "teapot" "vase" "stuffed animal" ) CLASS_TOKEN=${CLASS_TOKENs[$CLASS_IDX]} SELECTED_SUBJECT=${SUBJECT_NAMES[$CLASS_IDX]} if [[ $CLASS_IDX =~ ^(0|1|2|3|4|5|8|9|17|18|19|20|21|22|23|24|25|26|27|28|29)$ ]]; then PROMPT_LIST=( "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the jungle." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the snow." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on the beach." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on a cobblestone street." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of pink fabric." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a wooden floor." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a city in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a mountain in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a blue house in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a purple rug in a forest." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a wheat field in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a tree and autumn leaves in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with the Eiffel Tower in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} floating on top of water." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} floating in an ocean of milk." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of green grass with sunflowers around it." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a mirror." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of the sidewalk in a crowded street." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a dirt road." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a white rug." "a red ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a purple ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a shiny ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a wet ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a cube shaped ${UNIQUE_TOKEN} ${CLASS_TOKEN}." ) prompt_test_list=( "a ${CLASS_TOKEN} in the jungle" "a ${CLASS_TOKEN} in the snow" "a ${CLASS_TOKEN} on the beach" "a ${CLASS_TOKEN} on a cobblestone street" "a ${CLASS_TOKEN} on top of pink fabric" "a ${CLASS_TOKEN} on top of a wooden floor" "a ${CLASS_TOKEN} with a city in the background" "a ${CLASS_TOKEN} with a mountain in the background" "a ${CLASS_TOKEN} with a blue house in the background" "a ${CLASS_TOKEN} on top of a purple rug in a forest" "a ${CLASS_TOKEN} with a wheat field in the background" "a ${CLASS_TOKEN} with a tree and autumn leaves in the background" "a ${CLASS_TOKEN} with the Eiffel Tower in the background" "a ${CLASS_TOKEN} floating on top of water" "a ${CLASS_TOKEN} floating in an ocean of milk" "a ${CLASS_TOKEN} on top of green grass with sunflowers around it" "a ${CLASS_TOKEN} on top of a mirror" "a ${CLASS_TOKEN} on top of the sidewalk in a crowded street" "a ${CLASS_TOKEN} on top of a dirt road" "a ${CLASS_TOKEN} on top of a white rug" "a red ${CLASS_TOKEN}" "a purple ${CLASS_TOKEN}" "a shiny ${CLASS_TOKEN}" "a wet ${CLASS_TOKEN}" "a cube shaped ${CLASS_TOKEN}" ) else PROMPT_LIST=( "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the jungle." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the snow." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on the beach." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on a cobblestone street." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of pink fabric." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a wooden floor." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a city in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a mountain in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a blue house in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a purple rug in a forest." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a red hat." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a santa hat." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a rainbow scarf." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a black top hat and a monocle." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a chef outfit." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a firefighter outfit." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a police outfit." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing pink glasses." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a yellow shirt." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a purple wizard outfit." "a red ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a purple ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a shiny ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a wet ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a cube shaped ${UNIQUE_TOKEN} ${CLASS_TOKEN}." ) prompt_test_list=( "a ${CLASS_TOKEN} in the jungle" "a ${CLASS_TOKEN} in the snow" "a ${CLASS_TOKEN} on the beach" "a ${CLASS_TOKEN} on a cobblestone street" "a ${CLASS_TOKEN} on top of pink fabric" "a ${CLASS_TOKEN} on top of a wooden floor" "a ${CLASS_TOKEN} with a city in the background" "a ${CLASS_TOKEN} with a mountain in the background" "a ${CLASS_TOKEN} with a blue house in the background" "a ${CLASS_TOKEN} on top of a purple rug in a forest" "a ${CLASS_TOKEN} wearing a red hat" "a ${CLASS_TOKEN} wearing a santa hat" "a ${CLASS_TOKEN} wearing a rainbow scarf" "a ${CLASS_TOKEN} wearing a black top hat and a monocle" "a ${CLASS_TOKEN} in a chef outfit" "a ${CLASS_TOKEN} in a firefighter outfit" "a ${CLASS_TOKEN} in a police outfit" "a ${CLASS_TOKEN} wearing pink glasses" "a ${CLASS_TOKEN} wearing a yellow shirt" "a ${CLASS_TOKEN} in a purple wizard outfit" "a red ${CLASS_TOKEN}" "a purple ${CLASS_TOKEN}" "a shiny ${CLASS_TOKEN}" "a wet ${CLASS_TOKEN}" "a cube shaped ${CLASS_TOKEN}" ) fi VALIDATION_PROMPT=${PROMPT_LIST[@]} INSTANCE_PROMPT="a photo of ${UNIQUE_TOKEN} ${CLASS_TOKEN}" CLASS_PROMPT="a photo of ${CLASS_TOKEN}" export MODEL_NAME="stabilityai/stable-diffusion-2-1" # export MODEL_NAME="runwayml/stable-diffusion-v1-5" PEFT_TYPE="boft" BLOCK_NUM=8 BLOCK_SIZE=0 N_BUTTERFLY_FACTOR=1 export PROJECT_NAME="dreambooth_${PEFT_TYPE}" export RUN_NAME="${SELECTED_SUBJECT}_${PEFT_TYPE}_${BLOCK_NUM}${BLOCK_SIZE}${N_BUTTERFLY_FACTOR}" export INSTANCE_DIR="./data/dreambooth/dataset/${SELECTED_SUBJECT}" export CLASS_DIR="./data/class_data/${CLASS_TOKEN}" export OUTPUT_DIR="./data/output/${PEFT_TYPE}" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir="$CLASS_DIR" \ --output_dir=$OUTPUT_DIR \ --wandb_project_name=$PROJECT_NAME \ --wandb_run_name=$RUN_NAME \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="$INSTANCE_PROMPT" \ --validation_prompt="$VALIDATION_PROMPT" \ --class_prompt="$CLASS_PROMPT" \ --resolution=512 \ --train_batch_size=1 \ --num_dataloader_workers=2 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --use_boft \ --boft_block_num=$BLOCK_NUM \ --boft_block_size=$BLOCK_SIZE \ --boft_n_butterfly_factor=$N_BUTTERFLY_FACTOR \ --boft_dropout=0.1 \ --boft_bias="boft_only" \ --learning_rate=3e-5 \ --max_train_steps=1010 \ --checkpointing_steps=200 \ --validation_steps=200 \ --enable_xformers_memory_efficient_attention \ --report_to="wandb" \
peft/examples/boft_dreambooth/train_dreambooth.sh/0
{ "file_path": "peft/examples/boft_dreambooth/train_dreambooth.sh", "repo_id": "peft", "token_count": 3412 }
212
<jupyter_start><jupyter_text>Fine-tuning [Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on [timdettmers/openassistant-guanaco](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) Dataset using QDora (quantized Lora w/ use_dora=True) on T4 Free Colab GPU.<jupyter_code># Install the libraries !pip install -q -U bitsandbytes !pip install -q -U git+https://github.com/huggingface/transformers.git !pip install -q -U git+https://github.com/huggingface/peft.git !pip install -q -U git+https://github.com/huggingface/accelerate.git !pip install -q datasets # Required when training models/data that are gated on HuggingFace, and required for pushing models to HuggingFace from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Loading the model and it's tokenizer in quantized setup!<jupyter_code># setting up the config for 4-bit quantization of Qlora import torch from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig model_id = "meta-llama/Meta-Llama-3-8B" bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map={"": 0}) # print(model)<jupyter_output><empty_output><jupyter_text>Prepare model for PEFT fine-tuning<jupyter_code>from peft import prepare_model_for_kbit_training model.gradient_checkpointing_enable() model = prepare_model_for_kbit_training(model) def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" )<jupyter_output><empty_output><jupyter_text>Setup `LoraConfig` To use Dora we set the `use_dora=True`<jupyter_code>from peft import LoraConfig, get_peft_model config = LoraConfig( use_dora=True, r=8, lora_alpha=32, target_modules=[ "q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj", ], # parameters specific to llama lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) print_trainable_parameters(model)<jupyter_output>trainable params: 22347776 || all params: 4562948096 || trainable%: 0.48976616717579247<jupyter_text>Step 2) Fine-tuning process 💥<jupyter_code># Load the dataset from HF from datasets import load_dataset data = load_dataset("timdettmers/openassistant-guanaco") data = data.map(lambda samples: tokenizer(samples["text"]), batched=True)<jupyter_output><empty_output><jupyter_text>TrainingFor the sake of the demo, we just ran it for 10 steps just to showcase how to use this integration with existing tools on the HF ecosystem.<jupyter_code>import transformers tokenizer.pad_token = tokenizer.eos_token trainer = transformers.Trainer( model=model, train_dataset=data["train"], args=transformers.TrainingArguments( per_device_train_batch_size=1, gradient_accumulation_steps=4, warmup_steps=2, max_steps=10, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir="path/to/your/HF/repo", # change it to your desired repo! optim="paged_adamw_8bit", ), data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train()<jupyter_output>max_steps is given, it will override any value given in num_train_epochs /usr/local/lib/python3.10/dist-packages/torch/utils/checkpoint.py:464: UserWarning: torch.utils.checkpoint: the use_reentrant parameter should be passed explicitly. In version 2.4 we will raise an exception if use_reentrant is not passed. use_reentrant=False is recommended, but if you need to preserve the current default behavior, you can pass use_reentrant=True. Refer to docs for more details on the differences between the two variants. warnings.warn(<jupyter_text>Usage Example<jupyter_code>model.config.use_cache = True model.eval(); from transformers import GenerationConfig max_new_tokens = 120 top_p = 0.9 temperature = 0.7 user_question = "What is the purpose of quantization in LLMs?" prompt = ( "A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the user's questions. " "### Human: {user_question}" "### Assistant: " ) def generate(model, user_question, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature): inputs = tokenizer(prompt.format(user_question=user_question), return_tensors="pt").to("cuda") outputs = model.generate( **inputs, generation_config=GenerationConfig( do_sample=True, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature, ), ) text = tokenizer.decode(outputs[0], skip_special_tokens=True) # print(text) return text generate(model, user_question) # trainer.push_to_hub()<jupyter_output><empty_output>
peft/examples/dora_finetuning/QDoRA_finetuning.ipynb/0
{ "file_path": "peft/examples/dora_finetuning/QDoRA_finetuning.ipynb", "repo_id": "peft", "token_count": 2118 }
213
<jupyter_start><jupyter_code>import os os.environ["CUDA_VISIBLE_DEVICES"] = "1" # force using CUDA device 1 os.environ["ZE_AFFINITY_MASK"] = "1" # force using Intel XPU device 1 from peft import PeftConfig, PeftModel from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from datasets import load_dataset import torch import random peft_model_id = "smangrul/tinyllama_lora_norobots" device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" config = PeftConfig.from_pretrained(peft_model_id) model_kwargs = {"device_map": "auto"} model_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_4bit=True) model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, **model_kwargs) tokenizer = AutoTokenizer.from_pretrained(peft_model_id) model.resize_token_embeddings(len(tokenizer)) model = PeftModel.from_pretrained(model, peft_model_id, adapter_name="norobots") _ = model.load_adapter("smangrul/tinyllama_lora_sql", adapter_name="sql") _ = model.load_adapter("smangrul/tinyllama_lora_adcopy", adapter_name="adcopy") %%time # [0.8, 0.1, 0.1] linear #[1.0, 0.2] 0.7 density dare_linear #[1.5, 0.3] 0.5 density ties #[0.8, 0.5] cat adapters = ["norobots", "adcopy", "sql"] weights = [2.0, 0.3, 0.7] adapter_name = "merge" density = 0.2 combination_type = "ties" if adapter_name in model.peft_config: model.delete_adapter(adapter_name) model.add_weighted_adapter(adapters, weights, adapter_name, combination_type=combination_type, density=density) model.eval() model.set_adapter("merge") messages = [ {"role": "user", "content": "Write an essay about Generative AI."}, ] text = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) inputs = tokenizer(text, return_tensors="pt") # , add_special_tokens=False) inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( **inputs, max_new_tokens=256, do_sample=True, top_p=0.95, temperature=0.2, repetition_penalty=1.2, eos_token_id=tokenizer.eos_token_id, ) print(tokenizer.decode(outputs[0])) messages = [ {"role": "system", "content": "Create a text ad given the following product and description."}, { "role": "user", "content": "Product: Sony PS5 PlayStation Console\nDescription: The PS5™ console unleashes new gaming possibilities that you never anticipated.", }, ] text = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) inputs = tokenizer(text, return_tensors="pt") # , add_special_tokens=False) inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, top_p=0.95, temperature=0.2, repetition_penalty=1.2, eos_token_id=tokenizer.eos_token_id, ) print(tokenizer.decode(outputs[0])) text = """Table: 2-11365528-2 Columns: ['Team', 'Head Coach', 'President', 'Home Ground', 'Location'] Natural Query: Who is the Head Coach of the team whose President is Mario Volarevic? SQL Query:""" inputs = tokenizer(text, return_tensors="pt") # , add_special_tokens=False) inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( **inputs, max_new_tokens=64, repetition_penalty=1.1, eos_token_id=tokenizer("</s>").input_ids[-1] ) print(tokenizer.decode(outputs[0]))<jupyter_output><s> Table: 2-11365528-2 Columns: ['Team', 'Head Coach', 'President', 'Home Ground', 'Location'] Natural Query: Who is the Head Coach of the team whose President is Mario Volarevic? SQL Query: SELECT Head Coach FROM 2-11365528-2 WHERE President = Mario Volarevic</s>
peft/examples/multi_adapter_examples/Lora_Merging.ipynb/0
{ "file_path": "peft/examples/multi_adapter_examples/Lora_Merging.ipynb", "repo_id": "peft", "token_count": 1401 }
214
<jupyter_start><jupyter_text>Fine-tuning [Llama3-8b](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on [timdettmers/openassistant-guanaco](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) Dataset using QRandLora (quantized RandLora) on T4 Free Colab GPU.<jupyter_code># Install the libraries !pip install -q -U bitsandbytes !pip install -q -U git+https://github.com/huggingface/transformers.git !pip install -q -U git+https://github.com/huggingface/peft.git !pip install -q -U git+https://github.com/huggingface/accelerate.git !pip install -q datasets # Required when training models/data that are gated on HuggingFace, and required for pushing models to HuggingFace from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Loading the model and its tokenizer in quantized setup!<jupyter_code># setting up the config for 4-bit quantization of QRandLora import torch from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig model_id = "meta-llama/Meta-Llama-3-8B" bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map={"": 0}) print(model)<jupyter_output><empty_output><jupyter_text>Prepare model for PEFT fine-tuning<jupyter_code>from peft import prepare_model_for_kbit_training model.gradient_checkpointing_enable() model = prepare_model_for_kbit_training(model) def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" )<jupyter_output><empty_output><jupyter_text>Setup `RandLoraConfig`<jupyter_code>from peft import RandLoraConfig, get_peft_model config = RandLoraConfig( r=32, randlora_alpha=640, target_modules=[ "q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj", ], # parameters specific to llama randlora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) print_trainable_parameters(model) print(model)<jupyter_output><empty_output><jupyter_text>Step 2) Fine-tuning process 💥<jupyter_code># Load the dataset from HF from datasets import load_dataset data = load_dataset("timdettmers/openassistant-guanaco") data = data.map(lambda samples: tokenizer(samples["text"]), batched=True)<jupyter_output><empty_output><jupyter_text>TrainingFor the sake of the demo, we just ran it for 10 steps just to showcase how to use this integration with existing tools on the HF ecosystem.<jupyter_code>import transformers tokenizer.pad_token = tokenizer.eos_token trainer = transformers.Trainer( model=model, train_dataset=data["train"], args=transformers.TrainingArguments( per_device_train_batch_size=1, gradient_accumulation_steps=4, warmup_steps=2, max_steps=10, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir="path/to/your/HF/repo", # change it to your desired repo! optim="paged_adamw_8bit", label_names=["labels"], ), data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train()<jupyter_output><empty_output><jupyter_text>Usage Example<jupyter_code>model.config.use_cache = True model.eval(); from transformers import GenerationConfig max_new_tokens = 120 top_p = 0.9 temperature = 0.7 user_question = "What is the purpose of quantization in LLMs?" prompt = ( "A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the user's questions. " "### Human: {user_question}" "### Assistant: " ) def generate(model, user_question, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature): device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" inputs = tokenizer(prompt.format(user_question=user_question), return_tensors="pt").to(device) outputs = model.generate( **inputs, generation_config=GenerationConfig( do_sample=True, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature, ), ) text = tokenizer.decode(outputs[0], skip_special_tokens=True) # print(text) return text generate(model, user_question) # trainer.push_to_hub()<jupyter_output><empty_output>
peft/examples/randlora_finetuning/qrandlora_finetuning.ipynb/0
{ "file_path": "peft/examples/randlora_finetuning/qrandlora_finetuning.ipynb", "repo_id": "peft", "token_count": 1979 }
215
import argparse import evaluate import torch from accelerate import Accelerator, DistributedDataParallelKwargs from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from peft import ( PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, get_peft_model, ) from peft.utils.other import fsdp_auto_wrap_policy def parse_args(): parser = argparse.ArgumentParser(description="PEFT a transformers model on a sequence classification task") parser.add_argument( "--num_virtual_tokens", type=int, default=20, help="num_virtual_tokens if the number of virtual tokens used in prompt/prefix/P tuning.", ) parser.add_argument( "--encoder_hidden_size", type=int, default=128, help="encoder_hidden_size if the encoder hidden size used in P tuninig/Prefix tuning.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=1e-3, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--peft_type", type=str, default="p_tuning", help="The PEFT type to use.", choices=["p_tuning", "prefix_tuning", "prompt_tuning"], ) args = parser.parse_args() assert args.output_dir is not None, "Need an `output_dir` to store the finetune model and verify." return args def main(): args = parse_args() ddp_scaler = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator(kwargs_handlers=[ddp_scaler]) task = "mrpc" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) if args.peft_type == "p_tuning": peft_config = PromptEncoderConfig( task_type="SEQ_CLS", num_virtual_tokens=args.num_virtual_tokens, encoder_hidden_size=args.encoder_hidden_size, ) elif args.peft_type == "prefix_tuning": peft_config = PrefixTuningConfig( task_type="SEQ_CLS", num_virtual_tokens=args.num_virtual_tokens, encoder_hidden_size=args.encoder_hidden_size, ) else: peft_config = PromptTuningConfig(task_type="SEQ_CLS", num_virtual_tokens=args.num_virtual_tokens) tokenizer_kwargs = {} if any(k in args.model_name_or_path for k in ("gpt", "opt", "bloom")): tokenizer_kwargs["padding_side"] = "left" else: tokenizer_kwargs["padding_side"] = "right" tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, **tokenizer_kwargs) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=args.per_device_eval_batch_size, ) model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() if getattr(accelerator.state, "fsdp_plugin", None) is not None: accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(model) model = accelerator.prepare(model) optimizer = AdamW(params=model.parameters(), lr=args.learning_rate) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=(len(train_dataloader) * args.num_train_epochs), ) if getattr(accelerator.state, "fsdp_plugin", None) is not None: train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare( train_dataloader, eval_dataloader, optimizer, lr_scheduler ) else: model, train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, optimizer, lr_scheduler ) for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() samples_seen = 0 for step, batch in enumerate(tqdm(eval_dataloader)): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather((predictions, batch["labels"])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.num_processes > 1: if step == len(eval_dataloader) - 1: predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] references = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() accelerator.print(f"epoch {epoch}:", eval_metric) accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, state_dict=accelerator.get_state_dict(model)) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if __name__ == "__main__": main()
peft/examples/sequence_classification/peft_no_lora_accelerate.py/0
{ "file_path": "peft/examples/sequence_classification/peft_no_lora_accelerate.py", "repo_id": "peft", "token_count": 3361 }
216
# PEFT method comparison on the MetaMathQA and GSM8K datasets ## Goal This goal is to provide a benchmarking framework for the different PEFT methods that are implemented. It is important that evaluating different PEFT methods is reproducible, idempotent, and version-controlled. Results for more PEFT methods can be added over time. ## Dataset This task trains on the [MetaMathQA]((https://huggingface.co/datasets/meta-math/MetaMathQA)) dataset and validates/tests on the [GSM8K](https://huggingface.co/datasets/openai/gsm8k) dataset ("main"). For the model to attain good accuracy, it needs to learn to adhere to the output format and it must express basic chain of thought reasoning capabilities to get to the correct result in the first place. The task is challenging for models in the sub 7B parameter range. The train set uses the whole of MetaMathQA. The validation set is a random sample from the train set of GSM8K. The test set is the whole of the GSM8K test set. ## Running Create an experiment in the `experiment/<peft-method>` folder of your choice and give it a name (the name itself does not matter but helps identify the experiment). An example would be `experiments/lora/llama-3.2-3B-rank32/`. Inside that directory, create 2 files: - `adapter_config.json` - Optional: `training_parameters.json` Once you created these two files, you can either - run the whole suite using by simply calling `make` (takes >24h) - run one specific experiment by calling `make results/<experiment_name>-<experiment_variation>.json`, for example `results/vblora-llama-3.2-3B-default.json` You can get a list of all runnable experiments by running `make list`, e.g.: ``` % make list (git)-[method-comparison-results] ⛓ peft Discovered experiment configurations: - experiments/ptuning/llama-3.2-3B-default/adapter_config.json [...] - experiments/vblora/llama-3.2-3B-default/adapter_config.json Target result files: - results/ptuning-llama-3.2-3B-default.json [...] - results/vblora-llama-3.2-3B-default.json ``` In case you want to force the execution of an experiment, you can simply `touch` the respective adapter config without modifying it. For example: touch experiments/vblora/llama-3.2-3B-default/adapter_config.json make to run the VBLoRA default experiment again. ### `adapter_config.json` This must be a valid PEFT configuration. It is easiest to create it programmatically, e.g.: ```python from peft import LoraConfig config = LoraConfig(...) config.save_pretrained(<path-to-experiment>) ``` ### `training_parameters.json` There is a default file for the non-PEFT parameters: `default_training_params.json`. This contains all the other parameters that are relevant for training, e.g. the base model id, number of steps, batch size, learning rate, etc. If parameters that differ from the defaults are needed for a specific experiment, place a `training_parameters.json` into the experiment directory and adjust the parameters that need changing. The other parametes are taken from the aforementioned default config. For an overview of all possible arguments, you can also check the `TrainConfig` `dataclass` in `utils.py`. ### Runtime performance Several factors should be considered to achieve a fast runtime performance. Besides the obvious factors like `max_steps` or the base model size, we found the following factors to have a significant impact: #### Eval batch size Regarding the `batch_size_eval` parameter, it is quite critical since evaluation takes up a significant portion of the training time and batching helps with reducing that. It should be possible to choose a value that is multiple times higher than the batch size used for training (`batch_size`). You should also pay attention to the size of the validation set -- e.g. if it's 50, don't choose a `batch_size_eval` of 40, as that results in a large batch of 30 and a small batch of 10. 25 might be a better choice. Also, ensure via a quick train run that the batch size does not lead to out of memory errors -- getting this error at the very end on evaluating the test set would be quite a loss of time. #### Generation length During testing, we discovered that the validation time is greatly inflated by just a few very long generations. Those can inflate the validation time by a factor of 3 or more. At the same time, we discovered that these long generations do not help with accuracy -- in fact, if they exceed the maximum configured length, they're just cut off mid sentence and would thus produce an accuracy of 0 anyway. To remedy this, we now set both `max_length` and `max_new_tokens` for the generation kwargs in the default training parameters. Normally, this is not possible when using transformers, as the latter argument overrides the former. However, we have added special logic inside of `get_generation_config` which takes both and chooses the smaller of the two. This way, we can get rid of these excessively long generations, thus considerably reducing eval times, while still guaranteeing a maximum total generation length to guard against OOM errors. Testing showed that this does not hamper test accuracy. It is therefore recommended not to change these settings. #### Bucketing The length of the sequences in the training data can vary a lot. Therefore, if samples are taken randomly from the training dataset, we will end up with batches containing very short and very long sequences. This is bad because the batch will be padded to the longest sequence, slowing down training. The obvious solution would be to sort the whole dataset by sequence length, but this is also bad because it introduces an order bias (e.g. first training on only short and then on only long answers). The solution is to find a trade off between the two factors. This is achieved by the `BucketIterator`. It first creates buckets that contain multiple batches, e.g. 20x the batch size. The bucket is then sorted by sequence length and then batches are yielded from the bucket. Therefore, we have a small order bias within a bucket but not between buckets, stricking a good balance between training speed and training loss. From practical experiments, for a batch size of 4, a bucket size of 80 provides a good balance with only slightly lower training loss but cutting training time by 25%. For eval, we don't use the iterator since there, the batch size is relatively big and thus there is little upside. ### Start a run Once everything is set up properly, start a run by using the `run.py` script. Pass `-v` for verbose output to the console (recommended if observing the progress is desired). As an example, for `experiments/lora/llama-3.2-3B-rank32/` the invocation would be: ```sh python run.py -v experiments/lora/llama-3.2-3B-rank32/ ``` By default, the adapter will be saved in a temporary file for further inspection if needed. The prevent this, add the `--clean` flag to the call. ### Run status The run can be categorized 3 different states: 1. Main run: You are on the `main` branch and the run ended successfully. The results are stored in the `results` folder and are used for further analysis. 2. Test run: You are not on the `main` branch and the run ended successfully. The results are stored in the `temporary_results` folder and are not used for further analysis. 3. The run was cancelled (`ctrl + c`). The results are stored in the `cancelled_results` folder and are not used for further analysis. ## Outputs Results are stored in one of the result directories. An example output could look like so: ```js { "run_info": { "created_at": "2025-03-05T13:50:05+00:00", "total_time": 2711.0915009640157, "experiment_name": "ia3/lr_0.001", "peft_branch": "ben-method-comparison", "train_config": { "model_id": "meta-llama/Llama-3.2-3B", "dtype": "bfloat16", "max_seq_length": 768, "batch_size": 4, "batch_size_eval": 51, "max_steps": 5000, "eval_steps": 250, "compile": false, "query_template": "Question: {query} Think step by step.\nAnswer:", "seed": 0, "grad_norm_clip": 1.0, "optimizer_kwargs": { "lr": 0.001 }, "lr_scheduler": "cosine", "use_amp": false, "generation_kwargs": { "max_length": 800 }, "attn_implementation": null }, "peft_config": { "task_type": null, "peft_type": "IA3", "auto_mapping": null, "base_model_name_or_path": "meta-llama/Llama-3.2-3B", "revision": null, "inference_mode": false, "target_modules": [ "v_proj", "k_proj", "down_proj" ], "exclude_modules": null, "feedforward_modules": [ "down_proj" ], "fan_in_fan_out": false, "modules_to_save": null, "init_ia3_weights": true } }, "train_info": { "accelerator_memory_reserved_avg": 14229219940, "accelerator_memory_max": 24847056896, "accelerator_memory_reserved_99th": 19115624366, "train_time": 2238.65277833899, "file_size": 1157064, "status": "success", "metrics": [ { "step": 250, "valid accuracy": 0.0784313725490196, "train loss": 1.1336498007774354, "train samples": 1000 }, [...] { "step": 5000, "valid accuracy": 0.21568627450980393, "train loss": 0.6345920492410659, "train samples": 20000 }, { "step": 5000, "test accuracy": 0.35129740518962077, "train loss": 0.6345920492410659, "train samples": 20000, "train total tokens": 4197579 } ] }, "meta_info": { "model_sha": "13afe5124825b4f3751f836b40dafda64c1ed062", "model_created_at": "2024-09-18T15:23:48+00:00", "dataset_sha": "aa4f34d3d2d3231299b5b03d9b3e5a20da45aa18", "dataset_created_at": "2023-09-21T17:22:46+00:00", "package_info": { "transformers-version": "4.50.0.dev0", "transformers-commit-hash": "752ef3fd4e70869626ec70657a770a85c0ad9219", "peft-version": "0.14.1.dev0", "peft-commit-hash": "a447a4e5ecd87b7d57733f4df9616a328cf130f4", "datasets-version": "3.3.2", "datasets-commit-hash": null, "bitsandbytes-version": "0.45.2", "bitsandbytes-commit-hash": null, "torch-version": "2.6.0+cu124", "torch-commit-hash": null }, "system_info": { "system": "Linux", "release": "6.11.0-17-generic", "version": "#17~24.04.2-Ubuntu SMP PREEMPT_DYNAMIC Mon Jan 20 22:48:29 UTC 2", "machine": "x86_64", "processor": "x86_64", "accelerator": "NVIDIA GeForce RTX 4090" }, "pytorch_info": "PyTorch built with: [...]" } } ``` ## Dependencies Apart from the normal PEFT dependencies, ensure that the packages in the `requirements.txt` are installed, e.g. via: ```sh python -m pip install -r requirements.txt ``` Python 3.12+ is required. ## Open tasks - consider using `DataLoader` - consider adding https://github.com/huggingface/Math-Verify - consider adding `weight` argument to cross entropy calculation to downweight the EOS token, but it would require calculating the loss manually instead of relying on transformers (see https://github.com/huggingface/transformers/blob/6a876462c308bd7cd7d3ca8e93abaa7d5b02e90e/src/transformers/loss/loss_utils.py#L24-L48) - do a sanity check against/comparison with transformers Trainer - consider using vLLM to potentially speed up generations, at least for the test set - using `torch.compile` leads to a huge slowdown, investigate (maybe recompiles), although it does save memory - AMP does not appear to help, investigate - packing of sequences (but this probably requires adjusting the attention matrix) - clean up what gets printed and where (stdout, stderr)
peft/method_comparison/MetaMathQA/README.md/0
{ "file_path": "peft/method_comparison/MetaMathQA/README.md", "repo_id": "peft", "token_count": 4024 }
217
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Gradio app to show the results""" import os import tempfile import gradio as gr import plotly.express as px import plotly.graph_objects as go from processing import load_df from sanitizer import parse_and_filter metric_preferences = { "accelerator_memory_reserved_avg": "lower", "accelerator_memory_max": "lower", "accelerator_memory_reserved_99th": "lower", "total_time": "lower", "train_time": "lower", "file_size": "lower", "test_accuracy": "higher", "train_loss": "lower", } def get_model_ids(task_name, df): filtered = df[df["task_name"] == task_name] return sorted(filtered["model_id"].unique()) def filter_data(task_name, model_id, df): filtered = df[(df["task_name"] == task_name) & (df["model_id"] == model_id)] return filtered # Compute the Pareto frontier for two selected metrics. def compute_pareto_frontier(df, metric_x, metric_y): if df.empty: return df df = df.copy() points = df[[metric_x, metric_y]].values selected_indices = [] def dominates(a, b, metric_x, metric_y): # Check for each metric whether b is as good or better than a if metric_preferences[metric_x] == "higher": cond_x = b[0] >= a[0] better_x = b[0] > a[0] else: cond_x = b[0] <= a[0] better_x = b[0] < a[0] if metric_preferences[metric_y] == "higher": cond_y = b[1] >= a[1] better_y = b[1] > a[1] else: cond_y = b[1] <= a[1] better_y = b[1] < a[1] return cond_x and cond_y and (better_x or better_y) for i, point in enumerate(points): dominated = False for j, other_point in enumerate(points): if i == j: continue if dominates(point, other_point, metric_x, metric_y): dominated = True break if not dominated: selected_indices.append(i) pareto_df = df.iloc[selected_indices] return pareto_df def generate_pareto_plot(df, metric_x, metric_y): if df.empty: return {} # Compute Pareto frontier and non-frontier points. pareto_df = compute_pareto_frontier(df, metric_x, metric_y) non_pareto_df = df.drop(pareto_df.index) # Create an empty figure. fig = go.Figure() # Draw the line connecting Pareto frontier points. if not pareto_df.empty: # Sort the Pareto frontier points by metric_x for a meaningful connection. pareto_sorted = pareto_df.sort_values(by=metric_x) line_trace = go.Scatter( x=pareto_sorted[metric_x], y=pareto_sorted[metric_y], mode="lines", line={"color": "rgba(0,0,255,0.3)", "width": 4}, name="Pareto Frontier", ) fig.add_trace(line_trace) # Add non-frontier points in gray with semi-transparency. if not non_pareto_df.empty: non_frontier_trace = go.Scatter( x=non_pareto_df[metric_x], y=non_pareto_df[metric_y], mode="markers", marker={"color": "rgba(128,128,128,0.5)", "size": 12}, hoverinfo="text", text=non_pareto_df.apply( lambda row: f"experiment_name: {row['experiment_name']}<br>" f"peft_type: {row['peft_type']}<br>" f"{metric_x}: {row[metric_x]}<br>" f"{metric_y}: {row[metric_y]}", axis=1, ), showlegend=False, ) fig.add_trace(non_frontier_trace) # Add Pareto frontier points with legend if not pareto_df.empty: pareto_scatter = px.scatter( pareto_df, x=metric_x, y=metric_y, color="experiment_name", hover_data={"experiment_name": True, "peft_type": True, metric_x: True, metric_y: True}, ) for trace in pareto_scatter.data: trace.marker = {"size": 12} fig.add_trace(trace) # Update layout with axes labels. fig.update_layout( title=f"Pareto Frontier for {metric_x} vs {metric_y}", template="seaborn", height=700, autosize=True, xaxis_title=metric_x, yaxis_title=metric_y, ) return fig def compute_pareto_summary(filtered, pareto_df, metric_x, metric_y): if filtered.empty: return "No data available." stats = filtered[[metric_x, metric_y]].agg(["min", "max", "mean"]).to_string() total_points = len(filtered) pareto_points = len(pareto_df) excluded_points = total_points - pareto_points summary_text = ( f"{stats}\n\n" f"Total points: {total_points}\n" f"Pareto frontier points: {pareto_points}\n" f"Excluded points: {excluded_points}" ) return summary_text def export_csv(df): if df.empty: return None csv_data = df.to_csv(index=False) with tempfile.NamedTemporaryFile(delete=False, suffix=".csv", mode="w", encoding="utf-8") as tmp: tmp.write(csv_data) tmp_path = tmp.name return tmp_path def format_df(df): return df.style.format(precision=3, thousands=",", decimal=".") def build_app(df): with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.Markdown("# PEFT method comparison") gr.Markdown( "Find more information [on the PEFT GitHub repo](https://github.com/huggingface/peft/tree/main/method_comparison)" ) # Hidden state to store the current filter query. filter_state = gr.State("") gr.Markdown("## Choose the task and base model") with gr.Row(): task_dropdown = gr.Dropdown( label="Select Task", choices=sorted(df["task_name"].unique()), value=sorted(df["task_name"].unique())[0], ) model_dropdown = gr.Dropdown( label="Select Model ID", choices=get_model_ids(sorted(df["task_name"].unique())[0], df) ) # Make dataframe columns all equal in width so that they are good enough for numbers but don't # get hugely extended by columns like `train_config`. column_widths = ["150px" for _ in df.columns] column2index = dict(zip(df.columns, range(len(df.columns)))) column_widths[column2index['experiment_name']] = '300px' data_table = gr.DataFrame( label="Results", value=format_df(df), interactive=False, max_chars=100, wrap=False, column_widths=column_widths, ) with gr.Row(): filter_textbox = gr.Textbox( label="Filter DataFrame", placeholder="Enter filter (e.g.: peft_type=='LORA')", interactive=True, ) apply_filter_button = gr.Button("Apply Filter") reset_filter_button = gr.Button("Reset Filter") gr.Markdown("## Pareto plot") gr.Markdown( "Select 2 criteria to plot the Pareto frontier. This will show the best PEFT methods along this axis and " "the trade-offs with the other axis. The PEFT methods that Pareto-dominate are shown in colors. All other " "methods are inferior with regard to these two metrics. Hover over a point to show details." ) with gr.Row(): x_default = ( "accelerator_memory_max" if "accelerator_memory_max" in metric_preferences else list(metric_preferences.keys())[0] ) y_default = ( "test_accuracy" if "test_accuracy" in metric_preferences else list(metric_preferences.keys())[1] ) metric_x_dropdown = gr.Dropdown( label="1st metric for Pareto plot", choices=list(metric_preferences.keys()), value=x_default, ) metric_y_dropdown = gr.Dropdown( label="2nd metric for Pareto plot", choices=list(metric_preferences.keys()), value=y_default, ) pareto_plot = gr.Plot(label="Pareto Frontier Plot") summary_box = gr.Textbox(label="Summary Statistics", lines=6) csv_output = gr.File(label="Export Filtered Data as CSV") def update_on_task(task_name, current_filter): new_models = get_model_ids(task_name, df) filtered = filter_data(task_name, new_models[0] if new_models else "", df) if current_filter.strip(): try: mask = parse_and_filter(filtered, current_filter) df_queried = filtered[mask] if not df_queried.empty: filtered = df_queried except Exception: # invalid filter query pass return gr.update(choices=new_models, value=new_models[0] if new_models else None), format_df(filtered) task_dropdown.change( fn=update_on_task, inputs=[task_dropdown, filter_state], outputs=[model_dropdown, data_table] ) def update_on_model(task_name, model_id, current_filter): filtered = filter_data(task_name, model_id, df) if current_filter.strip(): try: mask = parse_and_filter(filtered, current_filter) filtered = filtered[mask] except Exception: pass return format_df(filtered) model_dropdown.change( fn=update_on_model, inputs=[task_dropdown, model_dropdown, filter_state], outputs=data_table ) def update_pareto_plot_and_summary(task_name, model_id, metric_x, metric_y, current_filter): filtered = filter_data(task_name, model_id, df) if current_filter.strip(): try: mask = parse_and_filter(filtered, current_filter) filtered = filtered[mask] except Exception as e: return generate_pareto_plot(filtered, metric_x, metric_y), f"Filter error: {e}" pareto_df = compute_pareto_frontier(filtered, metric_x, metric_y) fig = generate_pareto_plot(filtered, metric_x, metric_y) summary = compute_pareto_summary(filtered, pareto_df, metric_x, metric_y) return fig, summary for comp in [model_dropdown, metric_x_dropdown, metric_y_dropdown]: comp.change( fn=update_pareto_plot_and_summary, inputs=[task_dropdown, model_dropdown, metric_x_dropdown, metric_y_dropdown, filter_state], outputs=[pareto_plot, summary_box], ) def apply_filter(filter_query, task_name, model_id, metric_x, metric_y): filtered = filter_data(task_name, model_id, df) if filter_query.strip(): try: mask = parse_and_filter(filtered, filter_query) filtered = filtered[mask] except Exception as e: # Update the table, plot, and summary even if there is a filter error. return ( filter_query, filtered, generate_pareto_plot(filtered, metric_x, metric_y), f"Filter error: {e}", ) pareto_df = compute_pareto_frontier(filtered, metric_x, metric_y) fig = generate_pareto_plot(filtered, metric_x, metric_y) summary = compute_pareto_summary(filtered, pareto_df, metric_x, metric_y) return filter_query, format_df(filtered), fig, summary apply_filter_button.click( fn=apply_filter, inputs=[filter_textbox, task_dropdown, model_dropdown, metric_x_dropdown, metric_y_dropdown], outputs=[filter_state, data_table, pareto_plot, summary_box], ) def reset_filter(task_name, model_id, metric_x, metric_y): filtered = filter_data(task_name, model_id, df) pareto_df = compute_pareto_frontier(filtered, metric_x, metric_y) fig = generate_pareto_plot(filtered, metric_x, metric_y) summary = compute_pareto_summary(filtered, pareto_df, metric_x, metric_y) # Return empty strings to clear the filter state and textbox. return "", "", format_df(filtered), fig, summary reset_filter_button.click( fn=reset_filter, inputs=[task_dropdown, model_dropdown, metric_x_dropdown, metric_y_dropdown], outputs=[filter_state, filter_textbox, data_table, pareto_plot, summary_box], ) gr.Markdown("## Export data") # Export button for CSV download. export_button = gr.Button("Export Filtered Data") export_button.click( fn=lambda task, model: export_csv(filter_data(task, model, df)), inputs=[task_dropdown, model_dropdown], outputs=csv_output, ) demo.load( fn=update_pareto_plot_and_summary, inputs=[task_dropdown, model_dropdown, metric_x_dropdown, metric_y_dropdown, filter_state], outputs=[pareto_plot, summary_box], ) return demo path = os.path.join(os.path.dirname(__file__), "MetaMathQA", "results") df = load_df(path, task_name="MetaMathQA") demo = build_app(df) demo.launch()
peft/method_comparison/app.py/0
{ "file_path": "peft/method_comparison/app.py", "repo_id": "peft", "token_count": 6716 }
218
[tool.black] # Only used by `hf-doc-builder´. line-length = 119 target-version = ['py38'] [tool.ruff] target-version = "py39" line-length = 119 extend-exclude = ["*.ipynb"] [tool.ruff.lint] preview = true explicit-preview-rules = true extend-select = [ "C", # Complexity "E", # PEP8 errors "F", # PEP8 formatting "I", # Import sorting "UP", # Pyupgrade upgrades "W", # PEP8 warnings "PT009", # Pytest assertions "RUF022", # Sorting of __all__ ] ignore = [ "C901", # Function too complex "E501", # Line length (handled by ruff-format) "F841", # unused variable "UP007", # X | Y style Unions "C420", # dict.fromkeys "UP045", # don't force replacing Optional[X] with X | None ] [tool.ruff.lint.isort] lines-after-imports = 2 known-first-party = ["peft"] [tool.pytest] doctest_optionflags = [ "NORMALIZE_WHITESPACE", "ELLIPSIS", "NUMBER", ] [tool.pytest.ini_options] addopts = "--cov=src/peft --cov-report=term-missing --durations=10" markers = [ "single_gpu_tests: tests that run on a single GPU", "multi_gpu_tests: tests that run on multiple GPUs", "regression: whether to run regression suite test", "bitsandbytes: select bitsandbytes integration tests" ]
peft/pyproject.toml/0
{ "file_path": "peft/pyproject.toml", "repo_id": "peft", "token_count": 498 }
219
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import remove_hook_from_submodules from torch import nn from transformers.utils import PushToHubMixin from peft.utils.constants import DUMMY_MODEL_CONFIG from .config import PeftConfig from .peft_model import PeftModel from .tuners import MixedModel from .utils import _set_adapter, _set_trainable def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None: r""" Prepares the model for gradient checkpointing if necessary """ # Note: same as PeftModel._prepare_model_for_gradient_checkpointing if not getattr(model, "is_gradient_checkpointing", True): return model if not ( getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_quantized", False) ): if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() elif hasattr(model, "get_input_embeddings"): def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) def _check_config_compatible(peft_config: PeftConfig) -> None: from .tuners.mixed import COMPATIBLE_TUNER_TYPES if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES: raise ValueError( f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. " f"Compatible types are: {COMPATIBLE_TUNER_TYPES}" ) class PeftMixedModel(PushToHubMixin, torch.nn.Module): """ PeftMixedModel for loading mixing different types of adapters for inference. This class does not support loading/saving, and it shouldn't usually be initialized directly. Instead, use `get_peft_model` with the argument `mixed=True`. <Tip> Read the [Mixed adapter types](https://huggingface.co/docs/peft/en/developer_guides/mixed_models) guide to learn more about using different adapter types. </Tip> Example: ```py >>> base_model = ... # load the base model, e.g. from transformers >>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval() >>> peft_model.load_adapter(path_to_adapter2, "adapter2") >>> peft_model.set_adapter(["adapter1", "adapter2"]) # activate both adapters >>> peft_model(data) # forward pass using both adapters ``` Args: model (`torch.nn.Module`): The model to be tuned. config (`PeftConfig`): The config of the model to be tuned. The adapter type must be compatible. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the first adapter. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. """ def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__() _check_config_compatible(peft_config) _prepare_model_for_gradient_checkpointing(model) self.modules_to_save = None self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name) self.set_modules_to_save(peft_config, adapter_name) self.config = getattr(model, "config", DUMMY_MODEL_CONFIG) # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected # behavior we disable that in this line. if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): self.base_model.config.pretraining_tp = 1 @property def peft_config(self) -> dict[str, PeftConfig]: return self.base_model.peft_config @property def active_adapter(self) -> str: return self.base_model.active_adapter @property def active_adapters(self) -> list[str]: return self.base_model.active_adapters def get_nb_trainable_parameters(self): r""" Returns the number of trainable parameters and number of all parameters in the model. """ # note: same as PeftModel.get_nb_trainable_parameters trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes # one needs to multiply the number of parameters by 2 to get # the correct number of parameters if param.__class__.__name__ == "Params4bit": num_params = num_params * 2 all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def print_trainable_parameters(self): """ Prints the number of trainable parameters in the model. Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model. For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number of trainable parameters of the backbone transformer model which can be different. """ # note: same as PeftModel.print_trainable_parameters trainable_params, all_param = self.get_nb_trainable_parameters() print( f"trainable params: {trainable_params:,d} || " f"all params: {all_param:,d} || " f"trainable%: {100 * trainable_params / all_param:.4f}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "base_model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.base_model, name) def forward(self, *args: Any, **kwargs: Any): """ Forward pass of the model. """ return self.base_model(*args, **kwargs) def generate(self, *args: Any, **kwargs: Any): """ Generate output. """ return self.base_model.generate(*args, **kwargs) @contextmanager def disable_adapter(self): """ Disables the adapter module. """ try: self.base_model.disable_adapter_layers() yield finally: self.base_model.enable_adapter_layers() def add_adapter(self, adapter_name: str, peft_config: PeftConfig, low_cpu_mem_usage: bool = False) -> None: """ Add an adapter to the model based on the passed configuration. This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`]. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the process when loading saved adapters. <Tip> Don't use `low_cpu_mem_usage=True` when creating a new PEFT adapter for training (training is untested and discouraged for PeftMixedModel in general). </Tip> """ _check_config_compatible(peft_config) try: self.peft_config[adapter_name] = peft_config self.base_model.inject_adapter(self, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage) except Exception: # something went wrong, roll back if adapter_name in self.peft_config: del self.peft_config[adapter_name] raise self.set_modules_to_save(peft_config, adapter_name) def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None: if (modules_to_save := getattr(peft_config, "modules_to_save", None)) is None: return if self.modules_to_save is None: self.modules_to_save = set(modules_to_save) else: self.modules_to_save.update(modules_to_save) _set_trainable(self, adapter_name, module_names=getattr(peft_config, "modules_to_save", None)) def set_adapter(self, adapter_name: Union[str, list[str]]) -> None: """ Sets the active adapter(s) for the model. Note that the order in which the adapters are applied during the forward pass may not be the same as the order in which they are passed to this function. Instead, the order during the forward pass is determined by the order in which the adapters were loaded into the model. The active adapters only determine which adapters are active during the forward pass, but not the order in which they are applied. Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): The name of the adapter(s) to be activated. """ if isinstance(adapter_name, str): adapter_name = [adapter_name] mismatched = set(adapter_name) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None: if isinstance(adapter_name, str): adapter_name = [adapter_name] mismatched = set(adapter_name) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) self.base_model.delete_adapter(adapter_name) def merge_and_unload(self, *args: Any, **kwargs: Any): r""" This method merges the adapter layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self.base_model.merge_and_unload(*args, **kwargs) def unload(self, *args: Any, **kwargs: Any): """ Gets back the base model by removing all the adapter modules without merging. This gives back the original base model. """ return self.base_model.unload(*args, **kwargs) def get_layer_status(self): raise TypeError(f"get_layer_status is not supported for {self.__class__.__name__}.") def get_model_status(self): raise TypeError(f"get_model_status is not supported for {self.__class__.__name__}.") @classmethod def _split_kwargs(cls, kwargs: dict[str, Any]): return PeftModel._split_kwargs(kwargs) def _check_new_adapter_config(self, peft_config: PeftConfig, is_trainable: bool) -> None: return PeftModel._check_new_adapter_config(self, peft_config, is_trainable=is_trainable) def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any): """ Load a trained adapter into the model. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. torch_device (`str`, *optional*, defaults to None): The device to load the adapter on. If `None`, the device will be inferred. autocast_adapter_dtype (`bool`, *optional*, defaults to `True`): Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. ephemeral_gpu_offload (`bool`, *optional*, defaults to `False`): Whether to use ephemeral GPU offloading for partially loaded modules. Defaults to `False`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device before loading the saved weights. Useful to speed up the process. kwargs: (`optional`): Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub. """ # the low_cpu_mem_usage option is handled through kwargs output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs) # TODO: not quite clear why this is necessary but tests fail without it self.set_adapter(self.active_adapters) return output def create_or_update_model_card(self, output_dir: str): raise NotImplementedError(f"Model card creation is not supported for {self.__class__.__name__} (yet).") def save_pretrained( self, save_directory: str, safe_serialization: bool = False, selected_adapters: Optional[list[str]] = None, **kwargs: Any, ): raise NotImplementedError(f"Saving is not supported for {self.__class__.__name__} (yet).") @classmethod def from_pretrained( cls, model: nn.Module, model_id: str | os.PathLike, adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs: Any, ): r""" Instantiate a PEFT mixed model from a pretrained model and loaded PEFT weights. Note that the passed `model` may be modified inplace. Args: model (`nn.Module`): The model to be adapted. model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to be loaded. This is useful for loading multiple adapters. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for inference config ([`~peft.PeftConfig`], *optional*): The configuration object to use instead of an automatically loaded configuration. This configuration object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already loaded before calling `from_pretrained`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device before loading the saved weights. Useful to speed up the process. kwargs: (`optional`): Additional keyword arguments passed along to the specific PEFT configuration class. """ # note: adapted from PeftModel.from_pretrained from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING, PEFT_TYPE_TO_MIXED_MODEL_MAPPING # load the config if config is None: config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), ) ].from_pretrained(model_id, **kwargs) elif isinstance(config, PeftConfig): config.inference_mode = not is_trainable else: raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") # note: this is different from PeftModel.from_pretrained if config.peft_type not in PEFT_TYPE_TO_MIXED_MODEL_MAPPING: raise ValueError(f"Adapter of type {config.peft_type} is not supported for mixed models.") if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if config.is_prompt_learning and is_trainable: # note: should not be possible to reach, but just in case raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable # note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel model = cls(model, config, adapter_name) # the low_cpu_mem_usage option is handled through kwargs model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) return model
peft/src/peft/mixed_model.py/0
{ "file_path": "peft/src/peft/mixed_model.py", "repo_id": "peft", "token_count": 8247 }
220
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from .config import TRANSFORMERS_MODEL_CONFIG class _BaseAdaptedAttention(nn.Module): """Base module, which defines adaption prompts for multiple model types.""" def __init__(self, model_type: str, adapter_len: int, model, target_dtype=torch.float32): """ Initialize object. Args: model_type: The transformer model type. This is used to retrieve the right method to compute query states. adapter_len: The length of the adaption prompt to insert. model: The original transformer attention module that is being wrapped. """ if isinstance(model, _BaseAdaptedAttention): raise ValueError("Unable to stack multiple adaption prompts") super().__init__() self.model_type = model_type self.model = model self.adapter_len = adapter_len # Assume all parameters of the attention model we are wrapping are on the same device. device = next(model.parameters()).device # Don't think this was specified in the paper, but we follow the official repo which used an Embedding # which initializes the tokens with standard normal values. # https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L234 # (bsz, adapter_len, hidden_size) if hasattr(self.model, "hidden_size"): # TODO: remove this clause after 2026-01-01 hidden_size = self.model.hidden_size else: # changed in https://github.com/huggingface/transformers/pull/35235 hidden_size = self.model.config.hidden_size if hasattr(self.model, "num_heads"): # TODO: remove this clause after 2026-01-01 self.num_heads = self.model.num_heads else: # changed in https://github.com/huggingface/transformers/pull/35235 self.num_heads = self.model.config.num_attention_heads self.adaption_prompt = nn.Parameter( torch.empty(1, adapter_len, hidden_size, device=device, dtype=target_dtype).normal_() ) # Initialize the gate to 0 as this is "zero-init". self.adaption_gate = nn.Parameter(torch.zeros(1, device=device, dtype=target_dtype)) class AdaptedAttentionGPT(_BaseAdaptedAttention): """This module wraps a GPT2Attention module and injects adaption prompts""" def __init__(self, model_type, adapter_len, model): target_dtype = ( model.c_proj.weight.dtype if model.c_proj.weight.dtype not in [torch.int8, torch.uint8] else torch.float32 ) super().__init__(model_type, adapter_len, model, target_dtype=target_dtype) def forward( self, hidden_states: Optional[tuple[torch.FloatTensor]], layer_past: Optional[tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]: attn_outputs = self.model( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, **kwargs, ) """ Forward pass for the adapter which wraps the GPT2Attention module """ attn_output = attn_outputs[0] add_outputs = attn_outputs[1:] c_attn_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].k_proj_layer bsz = attn_output.shape[0] q_len = attn_output.shape[1] embed_dim = attn_output.shape[2] _, key, value = getattr(self.model, c_attn_layer)(self.adaption_prompt).split(embed_dim, dim=2) adapter_k = ( key.view(1, self.adapter_len, self.num_heads, self.model.head_dim).repeat(bsz, 1, 1, 1).transpose(1, 2) ) adapter_v = ( value.view(1, self.adapter_len, self.num_heads, self.model.head_dim).repeat(bsz, 1, 1, 1).transpose(1, 2) ) # recompute query state since it is not returned by GPT2 forward compute_query_states = TRANSFORMERS_MODEL_CONFIG[self.model_type].compute_query_states query_states = compute_query_states( self.model, hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states ) previous_dtype = query_states.dtype scores = torch.matmul(query_states, adapter_k.transpose(2, 3).to(previous_dtype)) / math.sqrt( self.model.head_dim ) # Upcast attention to fp32 # (bsz, num_heads, q_len, adapter_len) scores = self.adaption_gate * F.softmax(scores, dim=-1, dtype=torch.float32).to(previous_dtype) # (bsz, q_len, num_heads * head_dim) adapter_output = torch.matmul(scores, adapter_v).transpose(1, 2).reshape(bsz, q_len, -1) # Add adaption prompt output to original output. hidden_state = attn_output + adapter_output # Restore original dtype. hidden_state = hidden_state.to(previous_dtype) # add additional attention outputs (attention and cross attention) output = (hidden_state,) + add_outputs return output class AdaptedAttention(_BaseAdaptedAttention): """This module wraps a LLamaAttention module and injects adaption prompts.""" def __init__(self, model_type, adapter_len, model): target_dtype = ( model.q_proj.weight.dtype if model.q_proj.weight.dtype not in [torch.int8, torch.uint8] else torch.float32 ) super().__init__(model_type, adapter_len, model, target_dtype=target_dtype) def forward(self, **kwargs): """ Forward pass for the adapter which wraps the original LlamaAttention module. "Official" paper implementation: https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L141 Args: kwargs: See the original LlamaAttention module. """ if kwargs.get("output_attention", False): raise NotImplementedError("output_attention is not currently supported.") output, *_ = self.model(**kwargs) bsz = output.shape[0] q_len = output.shape[1] embed_dim = output.shape[2] k_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].k_proj_layer v_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].v_proj_layer o_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].o_proj_layer factor = ( self.model.k_proj.in_features // self.model.k_proj.out_features ) # Mistral has different input and output dimension for k_proj and v_proj layers if k_proj_layer == v_proj_layer: _, key, value = getattr(self.model, k_proj_layer)(self.adaption_prompt).split(embed_dim, dim=2) else: key = getattr(self.model, k_proj_layer)(self.adaption_prompt) value = getattr(self.model, v_proj_layer)(self.adaption_prompt) if hasattr(self.model, "num_heads"): # TODO: remove this clause after 2026-01-01 num_heads = self.model.num_heads else: # changed in https://github.com/huggingface/transformers/pull/35235 num_heads = self.model.config.num_attention_heads # (bsz, num_key_value_heads, adapter_len, head_dim) adapter_k = ( key.view(1, self.adapter_len, (num_heads // factor), self.model.head_dim) .repeat(bsz, 1, 1, 1) .transpose(1, 2) ) adapter_v = ( value.view(1, self.adapter_len, (num_heads // factor), self.model.head_dim) .repeat(bsz, 1, 1, 1) .transpose(1, 2) ) # Below is taken from https://github.com/huggingface/transformers/blob/e547458c43dfdbbb8f6a7757237e234c44e20a8f/src/transformers/models/mistral/modeling_mistral.py#L181 # (bsz, num_heads, adapter_len, head_dim) adapter_k = torch.repeat_interleave(adapter_k, repeats=factor, dim=1) adapter_v = torch.repeat_interleave(adapter_v, repeats=factor, dim=1) # Recompute query states. compute_query_states = TRANSFORMERS_MODEL_CONFIG[self.model_type].compute_query_states # (bsz, num_heads, q_len, head_dim) query_states = compute_query_states(model=self.model, **kwargs) previous_dtype = query_states.dtype # (bsz, num_heads, q_len, adapter_len) scores = torch.matmul(query_states, adapter_k.transpose(2, 3).to(previous_dtype)) / math.sqrt( self.model.head_dim ) # Upcast attention to fp32 # (bsz, num_heads, q_len, adapter_len) scores = self.adaption_gate * F.softmax(scores, dim=-1, dtype=torch.float32).to(previous_dtype) # (bsz, q_len, num_heads * head_dim) adapter_output = torch.matmul(scores, adapter_v).transpose(1, 2).reshape(bsz, q_len, -1) # (bsz, q_len, hidden_size) if o_proj_layer is not None: adapter_output = getattr(self.model, o_proj_layer)(adapter_output) # Add adaption prompt output to original output. output = output + adapter_output # Restore original dtype. output = output.to(previous_dtype) return output, *_
peft/src/peft/tuners/adaption_prompt/layer.py/0
{ "file_path": "peft/src/peft/tuners/adaption_prompt/layer.py", "repo_id": "peft", "token_count": 4483 }
221
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import math import warnings from typing import Any, Literal, Optional import torch import torch.nn as nn from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from .utils import BlockCircularConvolution, get_circulant_fast class C3ALayer(BaseTunerLayer): # All names of layers that may contain (trainable) adapter weights adapter_layer_names = ("c3a_kernel",) # All names of other parameters that may contain adapter-related parameters other_param_names = ("block_size",) def __init__(self, base_layer: nn.Module, **kwargs) -> None: self.base_layer = base_layer self.block_size = {} self.c3a_kernel = nn.ParameterDict({}) # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] self.kwargs = kwargs base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): self.in_features, self.out_features = base_layer.in_features, base_layer.out_features else: raise ValueError(f"Unsupported layer type {type(base_layer)}") def get_delta_weight(self, adapter) -> torch.Tensor: if adapter not in self.c3a_kernel.keys(): raise ValueError(f"Adapter {adapter} not found.") base_layer_weight = self.get_base_layer().weight base_layer_weight_dtype = base_layer_weight.dtype c3a_kernel = self.c3a_kernel[adapter] delta_weight = get_circulant_fast(c3a_kernel.to(torch.float32)).to(base_layer_weight_dtype) return delta_weight / base_layer_weight.size(-1) def update_layer(self, adapter_name, block_size, init_weights): if block_size <= 0: raise ValueError(f"`block_size` should be a positive integer value but the value passed is {block_size}") if self.in_features % block_size != 0: raise ValueError( f"The block size should be a factor of the input size. However, the input size is {self.in_features} and the block size is {block_size}" ) if self.out_features % block_size != 0: raise ValueError( f"The block size should be a factor of the output size. However, the output size is {self.out_features} and the block size is {block_size}" ) self.block_size[adapter_name] = block_size weight = self.get_base_layer().weight self.c3a_kernel[adapter_name] = nn.Parameter( torch.zeros( self.out_features // block_size, self.in_features // block_size, block_size, # Currently, only fp32 is widely supported for FFT (fp16 is only supported on GPU with shapes of powers # of 2, bf16 lacks FFT support) dtype=torch.float32, device=weight.device, ) ) self.reset_c3a_parameters(adapter_name, init_weights) self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) @torch.no_grad() def reset_c3a_parameters(self, adapter_name, init_weights): if init_weights is True: return if adapter_name in self.c3a_kernel.keys(): if init_weights == "gaussian": nn.init.normal_(self.c3a_kernel[adapter_name]) elif init_weights in ["xavier_uniform", False]: fan_in, fan_out = self.in_features, self.out_features std = 1.0 * math.sqrt(2.0 / float(fan_in + fan_out)) a = math.sqrt(3.0) * std nn.init.uniform_(self.c3a_kernel[adapter_name], -a, a) elif init_weights == "kaiming_uniform": fan_in = self.in_features a = 1.0 * math.sqrt(1.0 / float(fan_in)) nn.init.uniform_(self.c3a_kernel[adapter_name], -a, a) else: raise ValueError(f"Unknown init_weights: {init_weights}") class C3ALinear(nn.Module, C3ALayer): # Lora implemented in a dense layer def __init__( self, base_layer, adapter_name: str, block_size: int, init_weights: bool | Literal["gaussian", "kaiming_uniform", "xavier_uniform"], **kwargs, ) -> None: super().__init__() C3ALayer.__init__(self, base_layer, **kwargs) self._active_adapter = adapter_name self.update_layer(adapter_name, block_size, init_weights) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.c3a_kernel.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() delta_weight = self.get_delta_weight(active_adapter) orig_weights = orig_weights + delta_weight if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: delta_weight = self.get_delta_weight(active_adapter) base_layer.weight.data = base_layer.weight.data + delta_weight self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.c3a_kernel.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) x = x.to(torch.float32) for active_adapter in self.active_adapters: if active_adapter not in self.c3a_kernel.keys(): continue c3a_kernel = self.c3a_kernel[active_adapter].to(torch.float32) x = BlockCircularConvolution.apply(x, c3a_kernel) / x.size(-1) result += x.to(result.dtype) result = result.to(previous_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "c3a." + rep
peft/src/peft/tuners/c3a/layer.py/0
{ "file_path": "peft/src/peft/tuners/c3a/layer.py", "repo_id": "peft", "token_count": 3835 }
222
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class IA3Config(PeftConfig): """ This is the configuration class to store the configuration of a [`IA3Model`]. Args: target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. exclude_modules (`Optional[Union[List[str], str]]`): The names of the modules to not apply the adapter. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. feedforward_modules (`Optional[Union[List[str], str]]`): The names of the modules to be treated as feedforward modules, as in the original paper. These modules will have (IA)³ vectors multiplied to the input, instead of the output. `feedforward_modules` must be a name or a subset of names present in `target_modules`. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. modules_to_save (`Optional[List[str]]`): List of modules apart from (IA)³ layers to be set as trainable and saved in the final checkpoint. init_ia3_weights (`bool`): Whether to initialize the vectors in the (IA)³ layers, defaults to `True`. Setting this to `False` is discouraged. """ target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with (IA)³." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'." "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." "If not specified, modules will be chosen according to the model architecture, If the architecture is " "not known, an error will be raised -- in this case, you should specify the target modules manually." ), }, ) exclude_modules: Optional[Union[list[str], str]] = field( default=None, metadata={"help": "List of module names or regex expression of the module names to exclude from (IA)³."}, ) feedforward_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "List of module names or a regex expression of module names which are feedforward" "For example, ['output.dense']" }, ) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": "List of modules apart from (IA)^3 layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_ia3_weights: bool = field( default=True, metadata={"help": "Whether to initialize the vectors in the (IA)^3 layers."}, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.IA3 self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.exclude_modules = ( set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules ) self.feedforward_modules = ( set(self.feedforward_modules) if isinstance(self.feedforward_modules, list) else self.feedforward_modules ) # check if feedforward_modules is a subset of target_modules. run the check only if both are sets if isinstance(self.feedforward_modules, set) and isinstance(self.target_modules, set): if not self.feedforward_modules.issubset(self.target_modules): raise ValueError("`feedforward_modules` should be a subset of `target_modules`")
peft/src/peft/tuners/ia3/config.py/0
{ "file_path": "peft/src/peft/tuners/ia3/config.py", "repo_id": "peft", "token_count": 2119 }
223
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Optional import torch from peft.import_utils import is_aqlm_available from peft.tuners.lora.layer import LoraLayer from peft.tuners.tuners_utils import BaseTunerLayer if is_aqlm_available(): from aqlm import QuantizedLinear class AqlmLoraLinear(torch.nn.Module, LoraLayer): def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, use_dora: bool = False, lora_bias: bool = False, **kwargs, ): if use_dora: raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") super().__init__() LoraLayer.__init__(self, base_layer) self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, lora_bias=lora_bias, ) def forward(self, x: torch.Tensor): # note: logic differs from default Linear because merging is not supported result = self.base_layer(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = self._cast_input_dtype(x, lora_A.weight.dtype) output = lora_B(lora_A(dropout(x))) if requires_conversion: output = output.to(expected_dtype) output = output * scaling result += output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102 # def reset_lora_parameters(self, adapter_name): # if adapter_name in self.lora_A.keys(): # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight) # torch.nn.init.zeros_(self.lora_B[adapter_name].weight) def dispatch_aqlm( target: torch.nn.Module, adapter_name: str, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear): new_module = AqlmLoraLinear(target, adapter_name, **kwargs) target.qweight = target_base_layer.codes return new_module
peft/src/peft/tuners/lora/aqlm.py/0
{ "file_path": "peft/src/peft/tuners/lora/aqlm.py", "repo_id": "peft", "token_count": 1647 }
224
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from abc import abstractmethod from dataclasses import dataclass, field from typing import Any, Optional, Union import torch import torch.nn as nn from tqdm import tqdm from peft.config import PeftConfig from peft.utils import ( ModulesToSaveWrapper, _get_submodules, ) from .tuners_utils import BaseTuner, BaseTunerLayer, check_adapters_to_merge, check_target_module_exists @dataclass class LycorisConfig(PeftConfig): r""" A base config for LyCORIS like adapters """ rank_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " "For example, `{'^model.decoder.layers.0.encoder_attn.k_proj': 16}`." ) }, ) alpha_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. " "For example, `{'^model.decoder.layers.0.encoder_attn.k_proj': 16}`." ) }, ) class LycorisLayer(BaseTunerLayer): r""" A base layer for LyCORIS like adapters """ # adapter_layer_names needs to be defined on the child class other_param_names = ("r", "alpha", "scaling", "rank_dropout", "module_dropout") def __init__(self, base_layer: nn.Module) -> None: self.base_layer = base_layer self.r = {} self.alpha = {} self.scaling = {} self.rank_dropout = {} self.rank_dropout_scale = {} self.module_dropout = {} # Tuner info self._disable_adapters = False self.merged_adapters = [] # flag to enable/disable casting of input to weight dtype during forward call self.cast_input_dtype_enabled = True @property @abstractmethod def _available_adapters(self) -> set[str]: ... def _init_empty_weights(self, cls, *args, **kwargs) -> None: # A helper method that allows to initialize the layer of the given class without spending time to initialize the # model weights. The implementation is inspired by # https://pytorch.org/docs/stable/generated/torch.nn.utils.skip_init.html but this function cannot be used # directly. # Instead of this approach, it would be possible to bypass the __init__ of the class but that runs the risk of # omitting important logic inside that __init__. kwargs = kwargs.copy() final_device = kwargs.pop("device", "cpu") cls.__init__(self, *args, device="meta", **kwargs) self.to_empty(device=final_device) @abstractmethod def create_adapter_parameters(self, adapter_name: str, r: int, **kwargs): ... # TODO: refactor LoRA to use the same approach @abstractmethod def _get_delta_activations(self, adapter_name: str, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: """Activations added on top of the base layer output (i.e. after the base layer forward pass)""" @abstractmethod def get_delta_weight(self, adapter_name: str) -> torch.Tensor: ... def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If `None`, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self._available_adapters: base_layer = self.get_base_layer() if safe_merge: orig_weights = base_layer.weight.data.clone() orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) @abstractmethod def reset_adapter_parameters(self, adapter_name: str): ... def set_scale(self, adapter, scale): if adapter not in self._available_adapters: # Ignore the case where the adapter is not in the layer return self.scaling[adapter] = scale * self.alpha[adapter] / self.r[adapter] def scale_layer(self, scale: float) -> None: if scale == 1: return for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue self.scaling[active_adapter] *= scale def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self._available_adapters: self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def unscale_layer(self, scale=None) -> None: for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue if scale is None: self.scaling[active_adapter] = self.alpha[active_adapter] / self.r[active_adapter] else: self.scaling[active_adapter] /= scale @abstractmethod def update_layer(self, adapter_name: str, r: int, alpha: float, **kwargs): ... class LycorisTuner(BaseTuner): r""" A base tuner for LyCORIS like adapters Args: model ([`torch.nn.Module`]): The model to be adapted. config ([`LoraConfig`]): The configuration of the Lora model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. """ prefix: str layers_mapping: dict[type[torch.nn.Module], type[LycorisLayer]] def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) @staticmethod def _check_target_module_exists(config, key): return check_target_module_exists(config, key) @abstractmethod def _create_and_replace( self, config: LycorisConfig, adapter_name: str, target: Union[LycorisLayer, nn.Module], target_name, parent, current_key, ): ... @classmethod def _create_new_module(cls, config: LycorisConfig, adapter_name: str, target: nn.Module, **kwargs) -> LycorisLayer: # Find corresponding subtype of provided target module new_module_cls = None for subtype, target_cls in cls.layers_mapping.items(): if ( hasattr(target, "base_layer") and isinstance(target.get_base_layer(), subtype) and isinstance(target, BaseTunerLayer) ): # nested tuner layers are allowed new_module_cls = target_cls break elif isinstance(target, subtype): new_module_cls = target_cls break # We didn't find corresponding type, so adapter for this layer is not supported if new_module_cls is None: supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys()) raise ValueError( f"Target module of type {type(target)} not supported, " f"currently only adapters for {supported_modules} are supported" ) if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if isinstance(target_base_layer, torch.nn.Conv2d): new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Linear): new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs) else: supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys()) raise ValueError( f"Target module of type {type(target)} not supported, " f"currently only adapters for {supported_modules} are supported" ) return new_module def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if self.prefix not in n: p.requires_grad = False @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: raise ValueError("Please specify `target_modules` in `peft_config`") return peft_config def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) meta = torch.device("meta") # dispatch to correct device for name, module in new_module.named_modules(): if self.prefix in name: if not any(p.device == meta for p in module.parameters()): module.to(child.weight.device) def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def _unload_and_optionally_merge( self, merge: bool = True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None, ): if merge: if getattr(self.model, "quantization_method", None) == "gptq": raise ValueError("Cannot merge LOHA layers when the model is gptq quantized") self._unloading_checks(adapter_names) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` new_module = target.modules_to_save[target.active_adapter] if hasattr(new_module, "base_layer"): # check if the module is itself a tuner layer if merge: new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names) new_module = new_module.get_base_layer() setattr(parent, target_name, new_module) return self.model def enable_adapter_layers(self) -> None: """Enable all adapters. Call this if you have previously disabled all adapters and want to re-enable them. """ self._set_adapter_layers(enabled=True) def disable_adapter_layers(self) -> None: """Disable all adapters. When disabling all adapters, the model output corresponds to the output of the base model. """ self._set_adapter_layers(enabled=False) def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ) -> torch.nn.Module: r""" This method merges the adapter layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self) -> torch.nn.Module: """ Gets back the base model by removing all the lora modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False) def set_adapter(self, adapter_name: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. """ for module in self.model.modules(): if isinstance(module, LycorisLayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (`str`): Name of the adapter to be deleted. """ if adapter_name not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, LycorisLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or [] self._delete_auxiliary_adapter(adapter_name, new_active_adapters=new_adapter)
peft/src/peft/tuners/lycoris_utils.py/0
{ "file_path": "peft/src/peft/tuners/lycoris_utils.py", "repo_id": "peft", "token_count": 7468 }
225
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Optional import torch from peft.import_utils import is_gptqmodel_available from peft.tuners.oft.layer import OFTLayer from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import get_auto_gptq_quant_linear class GPTQOFTLinear(torch.nn.Module, OFTLayer): def __init__( self, base_layer, adapter_name: str, r: int = 8, oft_block_size: int = 0, module_dropout: float = 0.0, coft: bool = False, eps: float = 6e-5, block_share: bool = False, use_cayley_neumann: bool = False, num_cayley_neumann_terms: int = 5, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) init_weights: bool = True, **kwargs, ): super().__init__() OFTLayer.__init__(self, base_layer) # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self._active_adapter = adapter_name self.update_layer( adapter_name, r, oft_block_size=oft_block_size, module_dropout=module_dropout, coft=coft, eps=eps, block_share=block_share, init_weights=init_weights, use_cayley_neumann=use_cayley_neumann, num_cayley_neumann_terms=num_cayley_neumann_terms, ) def forward(self, x: torch.Tensor): # note: logic differs from default Linear because merging is not supported result = self.quant_linear_module(x) if self.disable_adapters: return self.quant_linear_module(x) for active_adapter in self.active_adapters: if active_adapter not in self.oft_R.keys(): continue oft_R = self.oft_R[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = x.dtype x = self._cast_input_dtype(x, oft_R.weight.dtype) x = oft_R(x) result = self.quant_linear_module(x) if requires_conversion: result = result.to(expected_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "oft." + rep def dispatch_gptq( target: torch.nn.Module, adapter_name: str, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target cfg = kwargs.get("gptq_quantization_config", None) if is_gptqmodel_available(): from gptqmodel.nn_modules.qlinear import BaseQuantLinear if isinstance(target_base_layer, BaseQuantLinear): new_module = GPTQOFTLinear(target, adapter_name, **kwargs) target.qweight = target_base_layer.qweight else: quant_linear = get_auto_gptq_quant_linear(cfg) if quant_linear is not None and isinstance(target_base_layer, quant_linear): new_module = GPTQOFTLinear(target, adapter_name, **kwargs) target.qweight = target_base_layer.qweight return new_module
peft/src/peft/tuners/oft/gptq.py/0
{ "file_path": "peft/src/peft/tuners/oft/gptq.py", "repo_id": "peft", "token_count": 1704 }
226
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module is intended to store mask functions for use inside SHiRA construction. The mask functions are required to have a specific signature as shown below. Required positional arguments: base_layer - This is the linear layer where the shira adapter will be attached. r - This parameter is used to determine the number of parameters in the shira adapter in a way that is consistent with LoRA sizing. SHiRA is a high rank adapter. Setting this parameter does not restrict the adapter rank. Keyword arguments can be provided as needed by the particular mask function implementation. Return: mask - this is a torch.tensor of the same shape as base_layer.weight that contains 0s and 1s with the same dtype and device as base_layer.weight If you would like to attach SHiRA adapters to a model using PEFT methods (such as get_peft_model()), using more arguments than the provided positional arguments, you can create the mask function reference like the following: ``` def create_mask_function_reference(**my_kwargs): def mask_fn(base_layer, r): ... your implementation here that might use my_kwargs ... return mask return mask_fn ``` Then, you can create your peft model with custom SHiRA mask as follows: ``` model = ... my_kwargs = ... mask_fn = create_mask_function_reference(**my_kwargs) peft_config = ShiraConfig(r=4, mask_type='my_custom_mask') peft_config.mask_fn = mask_fn peft_model = get_peft_model(model, peft_config) ``` Complete training examples are provided in the examples/shira/ directory. """ from typing import Optional import torch import torch.nn as nn def random_mask(base_layer: nn.Module, r: int, random_seed: Optional[int] = None, **kwargs) -> torch.tensor: shape = base_layer.weight.shape num_shira_weights = r * (shape[0] + shape[1]) random_generator = torch.Generator() if random_seed is not None: random_generator.manual_seed(random_seed) idx = (torch.randperm(base_layer.weight.numel(), generator=random_generator)[:num_shira_weights]).to( base_layer.weight.device ) val = torch.ones_like(idx.type(base_layer.weight.dtype)) mask = torch.zeros_like(base_layer.weight.view(1, -1)) mask = mask.scatter_(1, idx.unsqueeze(0), val.unsqueeze(0)).view(shape) return mask
peft/src/peft/tuners/shira/mask_functions.py/0
{ "file_path": "peft/src/peft/tuners/shira/mask_functions.py", "repo_id": "peft", "token_count": 970 }
227
# Note: These tests were copied from test_common_gpu.py and test_gpu_examples.py as they can run on CPU too. # # Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import tempfile import unittest import pytest import torch from accelerate.utils.memory import clear_device_cache from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling, Trainer, TrainingArguments, ) from peft import ( AdaLoraConfig, LoraConfig, OFTConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training, ) from peft.tuners.lora import GPTQLoraLinear from peft.utils import SAFETENSORS_WEIGHTS_NAME, infer_device from .testing_utils import ( device_count, load_dataset_english_quotes, require_gptqmodel, require_optimum, require_torch_multi_accelerator, ) @require_gptqmodel class PeftGPTQModelCommonTests(unittest.TestCase): r""" A common tester to run common operations that are performed on GPU/CPU such as generation, loading in 8bit, etc. """ def setUp(self): self.causal_lm_model_id = "facebook/opt-350m" self.device = infer_device() def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ clear_device_cache(garbage_collection=True) gc.collect() def test_lora_gptq_quantization_from_pretrained_safetensors(self): r""" Tests that the gptqmodel quantization using LoRA works as expected with safetensors weights. """ from transformers import GPTQConfig model_id = "marcsun13/opt-350m-gptq-4bit" quantization_config = GPTQConfig(bits=4, use_exllama=False) kwargs = { "pretrained_model_name_or_path": model_id, "torch_dtype": torch.float16, "device_map": "auto", "quantization_config": quantization_config, } model = AutoModelForCausalLM.from_pretrained(**kwargs) model = prepare_model_for_kbit_training(model) config = LoraConfig(task_type="CAUSAL_LM") peft_model = get_peft_model(model, config) peft_model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(peft_model.device)) with tempfile.TemporaryDirectory() as tmp_dir: peft_model.save_pretrained(tmp_dir) model = AutoModelForCausalLM.from_pretrained(**kwargs) model = PeftModel.from_pretrained(model, tmp_dir) model = prepare_model_for_kbit_training(model) model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(peft_model.device)) # loading a 2nd adapter works, #1239 model.load_adapter(tmp_dir, "adapter2") model.set_adapter("adapter2") model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(peft_model.device)) # check that both adapters are in the same layer assert "default" in model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A assert "adapter2" in model.base_model.model.model.decoder.layers[0].self_attn.q_proj.lora_A def test_oft_gptq_quantization_from_pretrained_safetensors(self): r""" Tests that the gptqmodel quantization using OFT works as expected with safetensors weights. """ from transformers import GPTQConfig model_id = "marcsun13/opt-350m-gptq-4bit" quantization_config = GPTQConfig(bits=4, use_exllama=False) kwargs = { "pretrained_model_name_or_path": model_id, "torch_dtype": torch.float16, "device_map": "auto", "quantization_config": quantization_config, } model = AutoModelForCausalLM.from_pretrained(**kwargs) model = prepare_model_for_kbit_training(model) config = OFTConfig(task_type="CAUSAL_LM") peft_model = get_peft_model(model, config) peft_model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(peft_model.device)) with tempfile.TemporaryDirectory() as tmp_dir: peft_model.save_pretrained(tmp_dir) model = AutoModelForCausalLM.from_pretrained(**kwargs) model = PeftModel.from_pretrained(model, tmp_dir) model = prepare_model_for_kbit_training(model) model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(peft_model.device)) # loading a 2nd adapter works, #1239 model.load_adapter(tmp_dir, "adapter2") model.set_adapter("adapter2") model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(peft_model.device)) # check that both adapters are in the same layer assert "default" in model.base_model.model.model.decoder.layers[0].self_attn.q_proj.oft_R assert "adapter2" in model.base_model.model.model.decoder.layers[0].self_attn.q_proj.oft_R @require_gptqmodel @require_optimum class PeftGPTQModelTests(unittest.TestCase): r""" GPTQ + peft tests """ def setUp(self): from transformers import GPTQConfig self.causal_lm_model_id = "marcsun13/opt-350m-gptq-4bit" self.quantization_config = GPTQConfig(bits=4, backend="auto_trainable") self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ clear_device_cache(garbage_collection=True) def _check_inference_finite(self, model, batch): # try inference without Trainer class training = model.training model.eval() output = model(**batch.to(model.device)) assert torch.isfinite(output.logits).all() model.train(training) def test_causal_lm_training(self): r""" Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset_english_quotes() data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None def test_oft_causal_lm_training(self): r""" Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) config = OFTConfig( r=0, oft_block_size=8, target_modules=["q_proj", "v_proj"], bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset_english_quotes() data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.single_gpu_tests def test_adalora_causalLM(self): r""" Tests the gptq training with adalora """ model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) model = prepare_model_for_kbit_training(model) peft_config = AdaLoraConfig( total_step=40, init_r=6, target_r=4, tinit=10, tfinal=20, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, peft_config) data = load_dataset_english_quotes() data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) batch = tokenizer(data["train"][:3]["quote"], return_tensors="pt", padding=True) self._check_inference_finite(model, batch) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.multi_gpu_tests @require_torch_multi_accelerator def test_causal_lm_training_multi_accelerator(self): r""" Test the CausalLM training on a multi-accelerator device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) assert set(model.hf_device_map.values()) == set(range(device_count)) model = prepare_model_for_kbit_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset_english_quotes() data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None @pytest.mark.multi_gpu_tests @require_torch_multi_accelerator def test_oft_causal_lm_training_multi_accelerator(self): r""" Test the CausalLM training on a multi-accelerator device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) assert set(model.hf_device_map.values()) == set(range(device_count)) model = prepare_model_for_kbit_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = OFTConfig( r=0, oft_block_size=8, target_modules=["q_proj", "v_proj"], bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset_english_quotes() data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) assert "adapter_config.json" in os.listdir(tmp_dir) assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir) # assert loss is not None assert trainer.state.log_history[-1]["train_loss"] is not None def test_non_default_adapter_name(self): # See issue 1346 config = LoraConfig( r=16, target_modules=["q_proj", "v_proj"], task_type="CAUSAL_LM", ) # default adapter name model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) model = get_peft_model(model, config) n_trainable_default, n_total_default = model.get_nb_trainable_parameters() # other adapter name model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) model = get_peft_model(model, config, adapter_name="other") n_trainable_other, n_total_other = model.get_nb_trainable_parameters() assert n_trainable_other > 0 # sanity check assert n_trainable_default == n_trainable_other assert n_total_default == n_total_other def test_oft_non_default_adapter_name(self): # See issue 1346 config = OFTConfig( r=0, oft_block_size=8, target_modules=["q_proj", "v_proj"], task_type="CAUSAL_LM", ) # default adapter name model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) model = get_peft_model(model, config) n_trainable_default, n_total_default = model.get_nb_trainable_parameters() # other adapter name model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) model = get_peft_model(model, config, adapter_name="other") n_trainable_other, n_total_other = model.get_nb_trainable_parameters() assert n_trainable_other > 0 # sanity check assert n_trainable_default == n_trainable_other assert n_total_default == n_total_other def test_load_lora(self): model_id = "ModelCloud/Llama-3.2-1B-gptqmodel-ci-4bit" adapter_id = "ModelCloud/Llama-3.2-1B-gptqmodel-ci-4bit-lora" model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto") model.load_adapter(adapter_id) # assert dynamic rank v_proj_module = model.model.layers[5].self_attn.v_proj assert isinstance(v_proj_module, GPTQLoraLinear) assert v_proj_module.lora_A["default"].weight.data.shape[0] == 128 assert v_proj_module.lora_B["default"].weight.data.shape[1] == 128 gate_proj_module = model.model.layers[5].mlp.gate_proj assert isinstance(gate_proj_module, GPTQLoraLinear) assert gate_proj_module.lora_A["default"].weight.data.shape[0] == 256 assert gate_proj_module.lora_B["default"].weight.data.shape[1] == 256 tokenizer = AutoTokenizer.from_pretrained(model_id) inp = tokenizer("Capital of France is", return_tensors="pt").to(model.device) tokens = model.generate(**inp)[0] result = tokenizer.decode(tokens) assert "paris" in result.lower()
peft/tests/test_gptqmodel.py/0
{ "file_path": "peft/tests/test_gptqmodel.py", "repo_id": "peft", "token_count": 10299 }
228
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from peft import PeftModel, PolyConfig, TaskType, get_peft_model class TestPoly(unittest.TestCase): def test_poly(self): torch.manual_seed(0) model_name_or_path = "google/flan-t5-small" atol, rtol = 1e-6, 1e-6 r = 8 # rank of lora in poly n_tasks = 3 # number of tasks n_skills = 2 # number of skills (loras) n_splits = 4 # number of heads lr = 1e-2 num_epochs = 10 tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) base_model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) peft_config = PolyConfig( task_type=TaskType.SEQ_2_SEQ_LM, poly_type="poly", r=r, n_tasks=n_tasks, n_skills=n_skills, n_splits=n_splits, ) model = get_peft_model(base_model, peft_config) # generate some dummy data text = os.__doc__.splitlines() assert len(text) > 10 inputs = tokenizer(text, return_tensors="pt", padding=True) inputs["task_ids"] = torch.arange(len(text)) % n_tasks inputs["labels"] = tokenizer((["A", "B"] * 100)[: len(text)], return_tensors="pt")["input_ids"] # simple training loop model.train() optimizer = torch.optim.Adam(model.parameters(), lr=lr) losses = [] for _ in range(num_epochs): outputs = model(**inputs) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() losses.append(loss.item()) # loss improved by at least 50% assert losses[-1] < (0.5 * losses[0]) # check that saving and loading works torch.manual_seed(0) model.eval() logits_before = model(**inputs).logits tokens_before = model.generate(**inputs) with model.disable_adapter(): logits_disabled = model(**inputs).logits tokens_disabled = model.generate(**inputs) assert not torch.allclose(logits_before, logits_disabled, atol=atol, rtol=rtol) assert not torch.allclose(tokens_before, tokens_disabled, atol=atol, rtol=rtol) # saving and loading with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) base_model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) loaded = PeftModel.from_pretrained(base_model, tmp_dir) torch.manual_seed(0) output_after = loaded(**inputs).logits tokens_after = loaded.generate(**inputs) assert torch.allclose(logits_before, output_after, atol=atol, rtol=rtol) assert torch.allclose(tokens_before, tokens_after, atol=atol, rtol=rtol)
peft/tests/test_poly.py/0
{ "file_path": "peft/tests/test_poly.py", "repo_id": "peft", "token_count": 1541 }
229
#!/usr/bin/env python3 """ Checkpoint Cleaning Script Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc. and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256 calculation for model zoo compatibility. Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) """ import torch import argparse import os import hashlib import shutil import tempfile from timm.models import load_state_dict try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--output', default='', type=str, metavar='PATH', help='output path') parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', help='use ema version of weights if present') parser.add_argument('--no-hash', dest='no_hash', action='store_true', help='no hash in output filename') parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true', help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint') parser.add_argument('--safetensors', action='store_true', help='Save weights using safetensors instead of the default torch way (pickle).') def main(): args = parser.parse_args() if os.path.exists(args.output): print("Error: Output filename ({}) already exists.".format(args.output)) exit(1) clean_checkpoint( args.checkpoint, args.output, not args.no_use_ema, args.no_hash, args.clean_aux_bn, safe_serialization=args.safetensors, ) def clean_checkpoint( checkpoint, output, use_ema=True, no_hash=False, clean_aux_bn=False, safe_serialization: bool=False, ): # Load an existing checkpoint to CPU, strip everything but the state_dict and re-save if checkpoint and os.path.isfile(checkpoint): print("=> Loading checkpoint '{}'".format(checkpoint)) state_dict = load_state_dict(checkpoint, use_ema=use_ema) new_state_dict = {} for k, v in state_dict.items(): if clean_aux_bn and 'aux_bn' in k: # If all aux_bn keys are removed, the SplitBN layers will end up as normal and # load with the unmodified model using BatchNorm2d. continue name = k[7:] if k.startswith('module.') else k new_state_dict[name] = v print("=> Loaded state_dict from '{}'".format(checkpoint)) ext = '' if output: checkpoint_root, checkpoint_base = os.path.split(output) checkpoint_base, ext = os.path.splitext(checkpoint_base) else: checkpoint_root = '' checkpoint_base = os.path.split(checkpoint)[1] checkpoint_base = os.path.splitext(checkpoint_base)[0] temp_filename = '__' + checkpoint_base if safe_serialization: assert _has_safetensors, "`pip install safetensors` to use .safetensors" safetensors.torch.save_file(new_state_dict, temp_filename) else: torch.save(new_state_dict, temp_filename) with open(temp_filename, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() if ext: final_ext = ext else: final_ext = ('.safetensors' if safe_serialization else '.pth') if no_hash: final_filename = checkpoint_base + final_ext else: final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + final_ext shutil.move(temp_filename, os.path.join(checkpoint_root, final_filename)) print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash)) return final_filename else: print("Error: Checkpoint ({}) doesn't exist".format(checkpoint)) return '' if __name__ == '__main__': main()
pytorch-image-models/clean_checkpoint.py/0
{ "file_path": "pytorch-image-models/clean_checkpoint.py", "repo_id": "pytorch-image-models", "token_count": 1771 }
230
# CSP-ResNet **CSPResNet** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNet](https://paperswithcode.com/method/resnet). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('cspresnet50', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `cspresnet50`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('cspresnet50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{wang2019cspnet, title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN}, author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh}, year={2019}, eprint={1911.11929}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: CSP ResNet Paper: Title: 'CSPNet: A New Backbone that can Enhance Learning Capability of CNN' URL: https://paperswithcode.com/paper/cspnet-a-new-backbone-that-can-enhance Models: - Name: cspresnet50 In Collection: CSP ResNet Metadata: FLOPs: 5924992000 Parameters: 21620000 File Size: 86679303 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - Label Smoothing - Polynomial Learning Rate Decay - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: cspresnet50 LR: 0.1 Layers: 50 Crop Pct: '0.887' Momentum: 0.9 Batch Size: 128 Image Size: '256' Weight Decay: 0.005 Interpolation: bilinear Training Steps: 8000000 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L415 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.57% Top 5 Accuracy: 94.71% -->
pytorch-image-models/hfdocs/source/models/csp-resnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/csp-resnet.mdx", "repo_id": "pytorch-image-models", "token_count": 1709 }
231
# RegNetX **RegNetX** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w_{0} > 0 \\), and slope \\( w_{a} > 0 \\), and generates a different block width \\( u_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): \\( u_{j} = w_{0} + w_{a}\cdot{j} \\) For **RegNetX** we have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w_{m} \geq 2 \\) (the width multiplier). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('regnetx_002', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `regnetx_002`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('regnetx_002', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{radosavovic2020designing, title={Designing Network Design Spaces}, author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, year={2020}, eprint={2003.13678}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: RegNetX Paper: Title: Designing Network Design Spaces URL: https://paperswithcode.com/paper/designing-network-design-spaces Models: - Name: regnetx_002 In Collection: RegNetX Metadata: FLOPs: 255276032 Parameters: 2680000 File Size: 10862199 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_002 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L337 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 68.75% Top 5 Accuracy: 88.56% - Name: regnetx_004 In Collection: RegNetX Metadata: FLOPs: 510619136 Parameters: 5160000 File Size: 20841309 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_004 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L343 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.39% Top 5 Accuracy: 90.82% - Name: regnetx_006 In Collection: RegNetX Metadata: FLOPs: 771659136 Parameters: 6200000 File Size: 24965172 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_006 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L349 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.84% Top 5 Accuracy: 91.68% - Name: regnetx_008 In Collection: RegNetX Metadata: FLOPs: 1027038208 Parameters: 7260000 File Size: 29235944 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_008 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L355 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.05% Top 5 Accuracy: 92.34% - Name: regnetx_016 In Collection: RegNetX Metadata: FLOPs: 2059337856 Parameters: 9190000 File Size: 36988158 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_016 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L361 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.95% Top 5 Accuracy: 93.43% - Name: regnetx_032 In Collection: RegNetX Metadata: FLOPs: 4082555904 Parameters: 15300000 File Size: 61509573 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_032 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L367 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.15% Top 5 Accuracy: 94.09% - Name: regnetx_040 In Collection: RegNetX Metadata: FLOPs: 5095167744 Parameters: 22120000 File Size: 88844824 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_040 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L373 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.48% Top 5 Accuracy: 94.25% - Name: regnetx_064 In Collection: RegNetX Metadata: FLOPs: 8303405824 Parameters: 26210000 File Size: 105184854 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_064 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L379 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.06% Top 5 Accuracy: 94.47% - Name: regnetx_080 In Collection: RegNetX Metadata: FLOPs: 10276726784 Parameters: 39570000 File Size: 158720042 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_080 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L385 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.21% Top 5 Accuracy: 94.55% - Name: regnetx_120 In Collection: RegNetX Metadata: FLOPs: 15536378368 Parameters: 46110000 File Size: 184866342 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_120 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L391 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.61% Top 5 Accuracy: 94.73% - Name: regnetx_160 In Collection: RegNetX Metadata: FLOPs: 20491740672 Parameters: 54280000 File Size: 217623862 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_160 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L397 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.84% Top 5 Accuracy: 94.82% - Name: regnetx_320 In Collection: RegNetX Metadata: FLOPs: 40798958592 Parameters: 107810000 File Size: 431962133 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnetx_320 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L403 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.25% Top 5 Accuracy: 95.03% -->
pytorch-image-models/hfdocs/source/models/regnetx.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/regnetx.mdx", "repo_id": "pytorch-image-models", "token_count": 6566 }
232
# Results CSV files containing an ImageNet-1K and out-of-distribution (OOD) test set validation results for all models with pretrained weights is located in the repository [results folder](https://github.com/rwightman/pytorch-image-models/tree/master/results). ## Self-trained Weights The table below includes ImageNet-1k validation results of model weights that I've trained myself. It is not updated as frequently as the csv results outputs linked above. |Model | Acc@1 (Err) | Acc@5 (Err) | Param # (M) | Interpolation | Image Size | |---|---|---|---|---|---| | efficientnet_b3a | 82.242 (17.758) | 96.114 (3.886) | 12.23 | bicubic | 320 (1.0 crop) | | efficientnet_b3 | 82.076 (17.924) | 96.020 (3.980) | 12.23 | bicubic | 300 | | regnet_32 | 82.002 (17.998) | 95.906 (4.094) | 19.44 | bicubic | 224 | | skresnext50d_32x4d | 81.278 (18.722) | 95.366 (4.634) | 27.5 | bicubic | 288 (1.0 crop) | | seresnext50d_32x4d | 81.266 (18.734) | 95.620 (4.380) | 27.6 | bicubic | 224 | | efficientnet_b2a | 80.608 (19.392) | 95.310 (4.690) | 9.11 | bicubic | 288 (1.0 crop) | | resnet50d | 80.530 (19.470) | 95.160 (4.840) | 25.6 | bicubic | 224 | | mixnet_xl | 80.478 (19.522) | 94.932 (5.068) | 11.90 | bicubic | 224 | | efficientnet_b2 | 80.402 (19.598) | 95.076 (4.924) | 9.11 | bicubic | 260 | | seresnet50 | 80.274 (19.726) | 95.070 (4.930) | 28.1 | bicubic | 224 | | skresnext50d_32x4d | 80.156 (19.844) | 94.642 (5.358) | 27.5 | bicubic | 224 | | cspdarknet53 | 80.058 (19.942) | 95.084 (4.916) | 27.6 | bicubic | 256 | | cspresnext50 | 80.040 (19.960) | 94.944 (5.056) | 20.6 | bicubic | 224 | | resnext50_32x4d | 79.762 (20.238) | 94.600 (5.400) | 25 | bicubic | 224 | | resnext50d_32x4d | 79.674 (20.326) | 94.868 (5.132) | 25.1 | bicubic | 224 | | cspresnet50 | 79.574 (20.426) | 94.712 (5.288) | 21.6 | bicubic | 256 | | ese_vovnet39b | 79.320 (20.680) | 94.710 (5.290) | 24.6 | bicubic | 224 | | resnetblur50 | 79.290 (20.710) | 94.632 (5.368) | 25.6 | bicubic | 224 | | dpn68b | 79.216 (20.784) | 94.414 (5.586) | 12.6 | bicubic | 224 | | resnet50 | 79.038 (20.962) | 94.390 (5.610) | 25.6 | bicubic | 224 | | mixnet_l | 78.976 (21.024 | 94.184 (5.816) | 7.33 | bicubic | 224 | | efficientnet_b1 | 78.692 (21.308) | 94.086 (5.914) | 7.79 | bicubic | 240 | | efficientnet_es | 78.066 (21.934) | 93.926 (6.074) | 5.44 | bicubic | 224 | | seresnext26t_32x4d | 77.998 (22.002) | 93.708 (6.292) | 16.8 | bicubic | 224 | | seresnext26tn_32x4d | 77.986 (22.014) | 93.746 (6.254) | 16.8 | bicubic | 224 | | efficientnet_b0 | 77.698 (22.302) | 93.532 (6.468) | 5.29 | bicubic | 224 | | seresnext26d_32x4d | 77.602 (22.398) | 93.608 (6.392) | 16.8 | bicubic | 224 | | mobilenetv2_120d | 77.294 (22.706 | 93.502 (6.498) | 5.8 | bicubic | 224 | | mixnet_m | 77.256 (22.744) | 93.418 (6.582) | 5.01 | bicubic | 224 | | resnet34d | 77.116 (22.884) | 93.382 (6.618) | 21.8 | bicubic | 224 | | seresnext26_32x4d | 77.104 (22.896) | 93.316 (6.684) | 16.8 | bicubic | 224 | | skresnet34 | 76.912 (23.088) | 93.322 (6.678) | 22.2 | bicubic | 224 | | ese_vovnet19b_dw | 76.798 (23.202) | 93.268 (6.732) | 6.5 | bicubic | 224 | | resnet26d | 76.68 (23.32) | 93.166 (6.834) | 16 | bicubic | 224 | | densenetblur121d | 76.576 (23.424) | 93.190 (6.810) | 8.0 | bicubic | 224 | | mobilenetv2_140 | 76.524 (23.476) | 92.990 (7.010) | 6.1 | bicubic | 224 | | mixnet_s | 75.988 (24.012) | 92.794 (7.206) | 4.13 | bicubic | 224 | | mobilenetv3_large_100 | 75.766 (24.234) | 92.542 (7.458) | 5.5 | bicubic | 224 | | mobilenetv3_rw | 75.634 (24.366) | 92.708 (7.292) | 5.5 | bicubic | 224 | | mnasnet_a1 | 75.448 (24.552) | 92.604 (7.396) | 3.89 | bicubic | 224 | | resnet26 | 75.292 (24.708) | 92.57 (7.43) | 16 | bicubic | 224 | | fbnetc_100 | 75.124 (24.876) | 92.386 (7.614) | 5.6 | bilinear | 224 | | resnet34 | 75.110 (24.890) | 92.284 (7.716) | 22 | bilinear | 224 | | mobilenetv2_110d | 75.052 (24.948) | 92.180 (7.820) | 4.5 | bicubic | 224 | | seresnet34 | 74.808 (25.192) | 92.124 (7.876) | 22 | bilinear | 224 | | mnasnet_b1 | 74.658 (25.342) | 92.114 (7.886) | 4.38 | bicubic | 224 | | spnasnet_100 | 74.084 (25.916) | 91.818 (8.182) | 4.42 | bilinear | 224 | | skresnet18 | 73.038 (26.962) | 91.168 (8.832) | 11.9 | bicubic | 224 | | mobilenetv2_100 | 72.978 (27.022) | 91.016 (8.984) | 3.5 | bicubic | 224 | | resnet18d | 72.260 (27.740) | 90.696 (9.304) | 11.7 | bicubic | 224 | | seresnet18 | 71.742 (28.258) | 90.334 (9.666) | 11.8 | bicubic | 224 | ## Ported and Other Weights For weights ported from other deep learning frameworks (Tensorflow, MXNet GluonCV) or copied from other PyTorch sources, please see the full results tables for ImageNet and various OOD test sets at in the [results tables](https://github.com/rwightman/pytorch-image-models/tree/master/results). Model code .py files contain links to original sources of models and weights.
pytorch-image-models/hfdocs/source/results.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/results.mdx", "repo_id": "pytorch-image-models", "token_count": 2259 }
233
import logging from .constants import * _logger = logging.getLogger(__name__) def resolve_data_config( args=None, pretrained_cfg=None, model=None, use_test_size=False, verbose=False ): assert model or args or pretrained_cfg, "At least one of model, args, or pretrained_cfg required for data config." args = args or {} pretrained_cfg = pretrained_cfg or {} if not pretrained_cfg and model is not None and hasattr(model, 'pretrained_cfg'): pretrained_cfg = model.pretrained_cfg data_config = {} # Resolve input/image size in_chans = 3 if args.get('in_chans', None) is not None: in_chans = args['in_chans'] elif args.get('chans', None) is not None: in_chans = args['chans'] input_size = (in_chans, 224, 224) if args.get('input_size', None) is not None: assert isinstance(args['input_size'], (tuple, list)) assert len(args['input_size']) == 3 input_size = tuple(args['input_size']) in_chans = input_size[0] # input_size overrides in_chans elif args.get('img_size', None) is not None: assert isinstance(args['img_size'], int) input_size = (in_chans, args['img_size'], args['img_size']) else: if use_test_size and pretrained_cfg.get('test_input_size', None) is not None: input_size = pretrained_cfg['test_input_size'] elif pretrained_cfg.get('input_size', None) is not None: input_size = pretrained_cfg['input_size'] data_config['input_size'] = input_size # resolve interpolation method data_config['interpolation'] = 'bicubic' if args.get('interpolation', None): data_config['interpolation'] = args['interpolation'] elif pretrained_cfg.get('interpolation', None): data_config['interpolation'] = pretrained_cfg['interpolation'] # resolve dataset + model mean for normalization data_config['mean'] = IMAGENET_DEFAULT_MEAN if args.get('mean', None) is not None: mean = tuple(args['mean']) if len(mean) == 1: mean = tuple(list(mean) * in_chans) else: assert len(mean) == in_chans data_config['mean'] = mean elif pretrained_cfg.get('mean', None): data_config['mean'] = pretrained_cfg['mean'] # resolve dataset + model std deviation for normalization data_config['std'] = IMAGENET_DEFAULT_STD if args.get('std', None) is not None: std = tuple(args['std']) if len(std) == 1: std = tuple(list(std) * in_chans) else: assert len(std) == in_chans data_config['std'] = std elif pretrained_cfg.get('std', None): data_config['std'] = pretrained_cfg['std'] # resolve default inference crop crop_pct = DEFAULT_CROP_PCT if args.get('crop_pct', None): crop_pct = args['crop_pct'] else: if use_test_size and pretrained_cfg.get('test_crop_pct', None): crop_pct = pretrained_cfg['test_crop_pct'] elif pretrained_cfg.get('crop_pct', None): crop_pct = pretrained_cfg['crop_pct'] data_config['crop_pct'] = crop_pct # resolve default crop percentage crop_mode = DEFAULT_CROP_MODE if args.get('crop_mode', None): crop_mode = args['crop_mode'] elif pretrained_cfg.get('crop_mode', None): crop_mode = pretrained_cfg['crop_mode'] data_config['crop_mode'] = crop_mode if verbose: _logger.info('Data processing configuration for current model + dataset:') for n, v in data_config.items(): _logger.info('\t%s: %s' % (n, str(v))) return data_config def resolve_model_data_config( model, args=None, pretrained_cfg=None, use_test_size=False, verbose=False, ): """ Resolve Model Data Config This is equivalent to resolve_data_config() but with arguments re-ordered to put model first. Args: model (nn.Module): the model instance args (dict): command line arguments / configuration in dict form (overrides pretrained_cfg) pretrained_cfg (dict): pretrained model config (overrides pretrained_cfg attached to model) use_test_size (bool): use the test time input resolution (if one exists) instead of default train resolution verbose (bool): enable extra logging of resolved values Returns: dictionary of config """ return resolve_data_config( args=args, pretrained_cfg=pretrained_cfg, model=model, use_test_size=use_test_size, verbose=verbose, )
pytorch-image-models/timm/data/config.py/0
{ "file_path": "pytorch-image-models/timm/data/config.py", "repo_id": "pytorch-image-models", "token_count": 1927 }
234
import os import pickle def load_class_map(map_or_filename, root=''): if isinstance(map_or_filename, dict): assert dict, 'class_map dict must be non-empty' return map_or_filename class_map_path = map_or_filename if not os.path.exists(class_map_path): class_map_path = os.path.join(root, class_map_path) assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % map_or_filename class_map_ext = os.path.splitext(map_or_filename)[-1].lower() if class_map_ext == '.txt': with open(class_map_path) as f: class_to_idx = {v.strip(): k for k, v in enumerate(f)} elif class_map_ext == '.pkl': with open(class_map_path, 'rb') as f: class_to_idx = pickle.load(f) else: assert False, f'Unsupported class map file extension ({class_map_ext}).' return class_to_idx
pytorch-image-models/timm/data/readers/class_map.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/class_map.py", "repo_id": "pytorch-image-models", "token_count": 387 }
235
from ._fx import ( create_feature_extractor, get_graph_node_names, register_notrace_function, register_notrace_module, is_notrace_module, is_notrace_function, get_notrace_modules, get_notrace_functions, ) from .activations import * from .adaptive_avgmax_pool import ( adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d, ) from .attention import Attention, AttentionRope, maybe_add_mask from .attention2d import MultiQueryAttention2d, Attention2d, MultiQueryAttentionV2 from .attention_pool import AttentionPoolLatent from .attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding from .blur_pool import BlurPool2d, create_aa from .classifier import create_classifier, ClassifierHead, NormMlpClassifierHead, ClNormMlpClassifierHead from .cond_conv2d import CondConv2d, get_condconv_initializer from .config import ( is_exportable, is_scriptable, is_no_jit, use_fused_attn, set_exportable, set_scriptable, set_no_jit, set_layer_config, set_fused_attn, set_reentrant_ckpt, use_reentrant_ckpt, ) from .conv2d_same import Conv2dSame, conv2d_same from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct from .create_act import create_act_layer, get_act_layer, get_act_fn from .create_attn import get_attn, create_attn from .create_conv2d import create_conv2d from .create_norm import get_norm_layer, create_norm_layer from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from .evo_norm import ( EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2, EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a, ) from .fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d from .format import Format, get_channel_dim, get_spatial_dim, nchw_to, nhwc_to from .gather_excite import GatherExcite from .global_context import GlobalContext from .grid import ndgrid, meshgrid from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple from .hybrid_embed import HybridEmbed, HybridEmbedWithSize from .inplace_abn import InplaceAbn from .layer_scale import LayerScale, LayerScale2d from .linear import Linear from .mixed_conv2d import MixedConv2d from .mlp import Mlp, GluMlp, GatedMlp, SwiGLU, SwiGLUPacked, ConvMlp, GlobalResponseNormMlp from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .norm import ( GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, LayerNormFp32, LayerNorm2dFp32, RmsNorm, RmsNorm2d, RmsNormFp32, RmsNorm2dFp32, SimpleNorm, SimpleNorm2d, SimpleNormFp32, SimpleNorm2dFp32, ) from .norm_act import ( BatchNormAct2d, GroupNormAct, GroupNorm1Act, LayerNormAct, LayerNormAct2d, LayerNormActFp32, LayerNormAct2dFp32, RmsNormAct, RmsNormAct2d, RmsNormActFp32, RmsNormAct2dFp32, SyncBatchNormAct, convert_sync_batchnorm, FrozenBatchNormAct2d, freeze_batch_norm_2d, unfreeze_batch_norm_2d, ) from .padding import get_padding, get_same_padding, pad_same from .patch_dropout import PatchDropout, PatchDropoutWithIndices, patch_dropout_forward from .patch_embed import PatchEmbed, PatchEmbedWithSize, PatchEmbedInterpolator, resample_patch_embed from .pool1d import global_pool_nlc from .pool2d_same import AvgPool2dSame, create_pool2d from .pos_embed import resample_abs_pos_embed, resample_abs_pos_embed_nhwc from .pos_embed_rel import ( RelPosMlp, RelPosBias, RelPosBiasTf, gen_relative_position_index, gen_relative_log_coords, resize_rel_pos_bias_table, resize_rel_pos_bias_table_simple, resize_rel_pos_bias_table_levit, ) from .pos_embed_sincos import ( pixel_freq_bands, freq_bands, build_sincos2d_pos_embed, build_fourier_pos_embed, build_rotary_pos_embed, apply_rot_embed, apply_rot_embed_cat, apply_rot_embed_list, apply_keep_indices_nlc, FourierEmbed, RotaryEmbedding, RotaryEmbeddingCat, RotaryEmbeddingMixed, get_mixed_freqs, ) from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from .selective_kernel import SelectiveKernel from .separable_conv import SeparableConv2d, SeparableConvNormAct from .space_to_depth import SpaceToDepth, DepthToSpace from .split_attn import SplitAttn from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from .test_time_pool import TestTimePoolHead, apply_test_time_pool from .trace_utils import _assert, _float_to_int from .typing import LayerType, PadType, disable_compiler from .weight_init import ( trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_, init_weight_jax, init_weight_vit, )
pytorch-image-models/timm/layers/__init__.py/0
{ "file_path": "pytorch-image-models/timm/layers/__init__.py", "repo_id": "pytorch-image-models", "token_count": 2075 }
236
""" Conv2d + BN + Act Hacked together by / Copyright 2020 Ross Wightman """ from typing import Any, Dict, Optional, Type from torch import nn as nn from .typing import LayerType, PadType from .blur_pool import create_aa from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer class ConvNormAct(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 1, stride: int = 1, padding: PadType = '', dilation: int = 1, groups: int = 1, bias: bool = False, apply_norm: bool = True, apply_act: bool = True, norm_layer: LayerType = nn.BatchNorm2d, act_layer: Optional[LayerType] = nn.ReLU, aa_layer: Optional[LayerType] = None, drop_layer: Optional[Type[nn.Module]] = None, conv_kwargs: Optional[Dict[str, Any]] = None, norm_kwargs: Optional[Dict[str, Any]] = None, act_kwargs: Optional[Dict[str, Any]] = None, ): super(ConvNormAct, self).__init__() conv_kwargs = conv_kwargs or {} norm_kwargs = norm_kwargs or {} act_kwargs = act_kwargs or {} use_aa = aa_layer is not None and stride > 1 self.conv = create_conv2d( in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, padding=padding, dilation=dilation, groups=groups, bias=bias, **conv_kwargs, ) if apply_norm: # NOTE for backwards compatibility with models that use separate norm and act layer definitions norm_act_layer = get_norm_act_layer(norm_layer, act_layer) # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` if drop_layer: norm_kwargs['drop_layer'] = drop_layer self.bn = norm_act_layer( out_channels, apply_act=apply_act, act_kwargs=act_kwargs, **norm_kwargs, ) else: self.bn = nn.Sequential() if drop_layer: norm_kwargs['drop_layer'] = drop_layer self.bn.add_module('drop', drop_layer()) self.aa = create_aa(aa_layer, out_channels, stride=stride, enable=use_aa, noop=None) @property def in_channels(self): return self.conv.in_channels @property def out_channels(self): return self.conv.out_channels def forward(self, x): x = self.conv(x) x = self.bn(x) aa = getattr(self, 'aa', None) if aa is not None: x = self.aa(x) return x ConvBnAct = ConvNormAct ConvNormActAa = ConvNormAct # backwards compat, when they were separate
pytorch-image-models/timm/layers/conv_bn_act.py/0
{ "file_path": "pytorch-image-models/timm/layers/conv_bn_act.py", "repo_id": "pytorch-image-models", "token_count": 1446 }
237
""" Halo Self Attention Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` - https://arxiv.org/abs/2103.12731 @misc{2103.12731, Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and Jonathon Shlens}, Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, Year = {2021}, } Status: This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. The attention mechanism works but it's slow as implemented. Hacked together by / Copyright 2021 Ross Wightman """ from typing import List import torch from torch import nn import torch.nn.functional as F from .helpers import make_divisible from .weight_init import trunc_normal_ from .trace_utils import _assert def rel_logits_1d(q, rel_k, permute_mask: List[int]): """ Compute relative logits along one dimension As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 Args: q: (batch, height, width, dim) rel_k: (2 * window - 1, dim) permute_mask: permute output dim according to this """ B, H, W, dim = q.shape rel_size = rel_k.shape[0] win_size = (rel_size + 1) // 2 x = (q @ rel_k.transpose(-1, -2)) x = x.reshape(-1, W, rel_size) # pad to shift from relative to absolute indexing x_pad = F.pad(x, [0, 1]).flatten(1) x_pad = F.pad(x_pad, [0, rel_size - W]) # reshape and slice out the padded elements x_pad = x_pad.reshape(-1, W + 1, rel_size) x = x_pad[:, :W, win_size - 1:] # reshape and tile x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) return x.permute(permute_mask) class PosEmbedRel(nn.Module): """ Relative Position Embedding As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 """ def __init__(self, block_size, win_size, dim_head, scale): """ Args: block_size (int): block size win_size (int): neighbourhood window size dim_head (int): attention head dim scale (float): scale factor (for init) """ super().__init__() self.block_size = block_size self.dim_head = dim_head self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) def forward(self, q): B, BB, HW, _ = q.shape # relative logits in width dimension. q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) # relative logits in height dimension. q = q.transpose(1, 2) rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) rel_logits = rel_logits_h + rel_logits_w rel_logits = rel_logits.reshape(B, BB, HW, -1) return rel_logits class HaloAttn(nn.Module): """ Halo Attention Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` - https://arxiv.org/abs/2103.12731 The internal dimensions of the attention module are controlled by the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query and key (qk) dimensions are determined by * num_heads * dim_head if dim_head is not None * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda) stride: output stride of the module, query downscaled if > 1 (default: 1). num_heads: parallel attention heads (default: 8). dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set block_size (int): size of blocks. (default: 8) halo_size (int): size of halo overlap. (default: 3) qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool) : add bias to q, k, and v projections avg_down (bool): use average pool downsample instead of strided query blocks scale_pos_embed (bool): scale the position embedding as well as Q @ K """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False): super().__init__() dim_out = dim_out or dim assert dim_out % num_heads == 0 assert stride in (1, 2) self.num_heads = num_heads self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.dim_head_v = dim_out // self.num_heads self.dim_out_qk = num_heads * self.dim_head_qk self.dim_out_v = num_heads * self.dim_head_v self.scale = self.dim_head_qk ** -0.5 self.scale_pos_embed = scale_pos_embed self.block_size = self.block_size_ds = block_size self.halo_size = halo_size self.win_size = block_size + halo_size * 2 # neighbourhood window size self.block_stride = 1 use_avg_pool = False if stride > 1: use_avg_pool = avg_down or block_size % stride != 0 self.block_stride = 1 if use_avg_pool else stride self.block_size_ds = self.block_size // self.block_stride # FIXME not clear if this stride behaviour is what the paper intended # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving # data in unfolded block form. I haven't wrapped my head around how that'd look. self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias) self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) self.pos_embed = PosEmbedRel( block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity() self.reset_parameters() def reset_parameters(self): std = self.q.weight.shape[1] ** -0.5 # fan-in trunc_normal_(self.q.weight, std=std) trunc_normal_(self.kv.weight, std=std) trunc_normal_(self.pos_embed.height_rel, std=self.scale) trunc_normal_(self.pos_embed.width_rel, std=self.scale) def forward(self, x): B, C, H, W = x.shape _assert(H % self.block_size == 0, '') _assert(W % self.block_size == 0, '') num_h_blocks = H // self.block_size num_w_blocks = W // self.block_size num_blocks = num_h_blocks * num_w_blocks q = self.q(x) # unfold q = q.reshape( -1, self.dim_head_qk, num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4) # B, num_heads * dim_head * block_size ** 2, num_blocks q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) # B * num_heads, num_blocks, block_size ** 2, dim_head kv = self.kv(x) # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach. # FIXME figure out how to switch impl between this and conv2d if XLA being used. kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v if self.scale_pos_embed: attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale else: attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q) # B * num_heads, num_blocks, block_size ** 2, win_size ** 2 attn = attn.softmax(dim=-1) out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks # fold out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks) out = out.permute(0, 3, 1, 4, 2).contiguous().view( B, self.dim_out_v, H // self.block_stride, W // self.block_stride) # B, dim_out, H // block_stride, W // block_stride out = self.pool(out) return out """ Three alternatives for overlapping windows. `.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold() if is_xla: # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment. WW = self.win_size ** 2 pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size) kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size) elif self.stride_tricks: kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() kv = kv.as_strided(( B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) else: kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) kv = kv.reshape( B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3) """
pytorch-image-models/timm/layers/halo_attn.py/0
{ "file_path": "pytorch-image-models/timm/layers/halo_attn.py", "repo_id": "pytorch-image-models", "token_count": 4601 }
238
from typing import Optional, Tuple, Union import torch import torch.nn as nn def patch_dropout_forward( x: torch.Tensor, prob: float, num_prefix_tokens: int, ordered: bool, training: bool, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """ Common forward logic for patch dropout. Args: x: Input tensor of shape (B, L, D) prob: Dropout probability num_prefix_tokens: Number of prefix tokens to preserve ordered: Whether to maintain patch order training: Whether in training mode Returns: Tuple of (output tensor, keep_indices or None) """ if not training or prob == 0.: return x, None if num_prefix_tokens: prefix_tokens, x = x[:, :num_prefix_tokens], x[:, num_prefix_tokens:] else: prefix_tokens = None B = x.shape[0] L = x.shape[1] num_keep = max(1, int(L * (1. - prob))) keep_indices = torch.argsort(torch.randn(B, L, device=x.device), dim=-1)[:, :num_keep] if ordered: # NOTE does not need to maintain patch order in typical transformer use, # but possibly useful for debug / visualization keep_indices = keep_indices.sort(dim=-1)[0] x = x.gather(1, keep_indices.unsqueeze(-1).expand((-1, -1) + x.shape[2:])) if prefix_tokens is not None: x = torch.cat((prefix_tokens, x), dim=1) return x, keep_indices class PatchDropout(nn.Module): """ Patch Dropout without returning indices. https://arxiv.org/abs/2212.00794 and https://arxiv.org/pdf/2208.07220 """ def __init__( self, prob: float = 0.5, num_prefix_tokens: int = 1, ordered: bool = False, ): super().__init__() assert 0 <= prob < 1. self.prob = prob self.num_prefix_tokens = num_prefix_tokens # exclude CLS token (or other prefix tokens) self.ordered = ordered def forward(self, x: torch.Tensor) -> torch.Tensor: output, _ = patch_dropout_forward( x, self.prob, self.num_prefix_tokens, self.ordered, self.training ) return output class PatchDropoutWithIndices(nn.Module): """ Patch Dropout that returns both output and keep indices. https://arxiv.org/abs/2212.00794 and https://arxiv.org/pdf/2208.07220 """ def __init__( self, prob: float = 0.5, num_prefix_tokens: int = 1, ordered: bool = False, ): super().__init__() assert 0 <= prob < 1. self.prob = prob self.num_prefix_tokens = num_prefix_tokens # exclude CLS token (or other prefix tokens) self.ordered = ordered def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: return patch_dropout_forward( x, self.prob, self.num_prefix_tokens, self.ordered, self.training )
pytorch-image-models/timm/layers/patch_dropout.py/0
{ "file_path": "pytorch-image-models/timm/layers/patch_dropout.py", "repo_id": "pytorch-image-models", "token_count": 1406 }
239
from contextlib import nullcontext from functools import wraps from typing import Callable, Optional, Tuple, Type, TypeVar, Union, overload, ContextManager import torch __all__ = ["LayerType", "PadType", "nullwrap", "disable_compiler"] LayerType = Union[str, Callable, Type[torch.nn.Module]] PadType = Union[str, int, Tuple[int, int]] F = TypeVar("F", bound=Callable[..., object]) @overload def nullwrap(fn: F) -> F: ... # decorator form @overload def nullwrap(fn: None = ...) -> ContextManager: ... # context‑manager form def nullwrap(fn: Optional[F] = None): # as a context manager if fn is None: return nullcontext() # `with nullwrap():` # as a decorator @wraps(fn) def wrapper(*args, **kwargs): return fn(*args, **kwargs) return wrapper # `@nullwrap` disable_compiler = getattr(getattr(torch, "compiler", None), "disable", None) or nullwrap
pytorch-image-models/timm/layers/typing.py/0
{ "file_path": "pytorch-image-models/timm/layers/typing.py", "repo_id": "pytorch-image-models", "token_count": 316 }
240
import collections.abc import math import re from collections import defaultdict from itertools import chain from typing import Any, Callable, Dict, Iterator, Optional, Tuple, Type, Union import torch import torch.utils.checkpoint from torch import nn as nn from torch import Tensor from timm.layers import use_reentrant_ckpt __all__ = ['model_parameters', 'named_apply', 'named_modules', 'named_modules_with_params', 'adapt_input_conv', 'group_with_matcher', 'group_modules', 'group_parameters', 'flatten_modules', 'checkpoint_seq', 'checkpoint'] def model_parameters(model: nn.Module, exclude_head: bool = False): if exclude_head: # FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering return [p for p in model.parameters()][:-2] else: return model.parameters() def named_apply( fn: Callable, module: nn.Module, name='', depth_first: bool = True, include_root: bool = False, ) -> nn.Module: if not depth_first and include_root: fn(module=module, name=name) for child_name, child_module in module.named_children(): child_name = '.'.join((name, child_name)) if name else child_name named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) if depth_first and include_root: fn(module=module, name=name) return module def named_modules( module: nn.Module, name: str = '', depth_first: bool = True, include_root: bool = False, ): if not depth_first and include_root: yield name, module for child_name, child_module in module.named_children(): child_name = '.'.join((name, child_name)) if name else child_name yield from named_modules( module=child_module, name=child_name, depth_first=depth_first, include_root=True) if depth_first and include_root: yield name, module def named_modules_with_params( module: nn.Module, name: str = '', depth_first: bool = True, include_root: bool = False, ): if module._parameters and not depth_first and include_root: yield name, module for child_name, child_module in module.named_children(): child_name = '.'.join((name, child_name)) if name else child_name yield from named_modules_with_params( module=child_module, name=child_name, depth_first=depth_first, include_root=True) if module._parameters and depth_first and include_root: yield name, module MATCH_PREV_GROUP = (99999,) def group_with_matcher( named_objects: Iterator[Tuple[str, Any]], group_matcher: Union[Dict, Callable], return_values: bool = False, reverse: bool = False ): if isinstance(group_matcher, dict): # dictionary matcher contains a dict of raw-string regex expr that must be compiled compiled = [] for group_ordinal, (group_name, mspec) in enumerate(group_matcher.items()): if mspec is None: continue # map all matching specifications into 3-tuple (compiled re, prefix, suffix) if isinstance(mspec, (tuple, list)): # multi-entry match specifications require each sub-spec to be a 2-tuple (re, suffix) for sspec in mspec: compiled += [(re.compile(sspec[0]), (group_ordinal,), sspec[1])] else: compiled += [(re.compile(mspec), (group_ordinal,), None)] group_matcher = compiled def _get_grouping(name): if isinstance(group_matcher, (list, tuple)): for match_fn, prefix, suffix in group_matcher: r = match_fn.match(name) if r: parts = (prefix, r.groups(), suffix) # map all tuple elem to int for numeric sort, filter out None entries return tuple(map(float, chain.from_iterable(filter(None, parts)))) return float('inf'), # un-matched layers (neck, head) mapped to largest ordinal else: ord = group_matcher(name) if not isinstance(ord, collections.abc.Iterable): return ord, return tuple(ord) # map layers into groups via ordinals (ints or tuples of ints) from matcher grouping = defaultdict(list) for k, v in named_objects: grouping[_get_grouping(k)].append(v if return_values else k) # remap to integers layer_id_to_param = defaultdict(list) lid = -1 for k in sorted(filter(lambda x: x is not None, grouping.keys())): if lid < 0 or k[-1] != MATCH_PREV_GROUP[0]: lid += 1 layer_id_to_param[lid].extend(grouping[k]) if reverse: assert not return_values, "reverse mapping only sensible for name output" # output reverse mapping param_to_layer_id = {} for lid, lm in layer_id_to_param.items(): for n in lm: param_to_layer_id[n] = lid return param_to_layer_id return layer_id_to_param def group_parameters( module: nn.Module, group_matcher, return_values: bool = False, reverse: bool = False, ): return group_with_matcher( module.named_parameters(), group_matcher, return_values=return_values, reverse=reverse) def group_modules( module: nn.Module, group_matcher, return_values: bool = False, reverse: bool = False, ): return group_with_matcher( named_modules_with_params(module), group_matcher, return_values=return_values, reverse=reverse) def flatten_modules( named_modules: Iterator[Tuple[str, nn.Module]], depth: int = 1, prefix: Union[str, Tuple[str, ...]] = '', module_types: Union[str, Tuple[Type[nn.Module]]] = 'sequential', ): prefix_is_tuple = isinstance(prefix, tuple) if isinstance(module_types, str): if module_types == 'container': module_types = (nn.Sequential, nn.ModuleList, nn.ModuleDict) else: module_types = (nn.Sequential,) for name, module in named_modules: if depth and isinstance(module, module_types): yield from flatten_modules( module.named_children(), depth - 1, prefix=(name,) if prefix_is_tuple else name, module_types=module_types, ) else: if prefix_is_tuple: name = prefix + (name,) yield name, module else: if prefix: name = '.'.join([prefix, name]) yield name, module def checkpoint( function, *args, use_reentrant: Optional[bool] = None, **kwargs, ): """ checkpoint wrapper fn A thin wrapper around torch.utils.checkpoint.checkpoint to default use_reentrant to False """ if use_reentrant is None: use_reentrant = use_reentrant_ckpt() return torch.utils.checkpoint.checkpoint( function, *args, use_reentrant=use_reentrant, **kwargs, ) def checkpoint_seq( functions, x, every: int = 1, flatten: bool = False, skip_last: bool = False, use_reentrant: Optional[bool] = None, ): r"""A helper function for checkpointing sequential models. Sequential models execute a list of modules/functions in order (sequentially). Therefore, we can divide such a sequence into segments and checkpoint each segment. All segments except run in :func:`torch.no_grad` manner, i.e., not storing the intermediate activations. The inputs of each checkpointed segment will be saved for re-running the segment in the backward pass. See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works. .. warning:: Checkpointing currently only supports :func:`torch.autograd.backward` and only if its `inputs` argument is not passed. :func:`torch.autograd.grad` is not supported. .. warning: At least one of the inputs needs to have :code:`requires_grad=True` if grads are needed for model inputs, otherwise the checkpointed part of the model won't have gradients. Args: functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially. x: A Tensor that is input to :attr:`functions` every: checkpoint every-n functions (default: 1) flatten: flatten nn.Sequential of nn.Sequentials skip_last: skip checkpointing the last function in the sequence if True use_reentrant: Use re-entrant checkpointing Returns: Output of running :attr:`functions` sequentially on :attr:`*inputs` Example: >>> model = nn.Sequential(...) >>> input_var = checkpoint_seq(model, input_var, every=2) """ if use_reentrant is None: use_reentrant = use_reentrant_ckpt() def run_function(start, end, functions): def forward(_x): for j in range(start, end + 1): _x = functions[j](_x) return _x return forward if isinstance(functions, torch.nn.Sequential): functions = functions.children() if flatten: functions = chain.from_iterable(functions) if not isinstance(functions, (tuple, list)): functions = tuple(functions) num_checkpointed = len(functions) if skip_last: num_checkpointed -= 1 end = -1 for start in range(0, num_checkpointed, every): end = min(start + every - 1, num_checkpointed - 1) x = torch.utils.checkpoint.checkpoint( run_function(start, end, functions), x, use_reentrant=use_reentrant, ) if skip_last: return run_function(end + 1, len(functions) - 1, functions)(x) return x def adapt_input_conv(in_chans: int, conv_weight: Tensor) -> Tensor: conv_type = conv_weight.dtype conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU O, I, J, K = conv_weight.shape if in_chans == 1: if I > 3: assert conv_weight.shape[1] % 3 == 0 # For models with space2depth stems conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) conv_weight = conv_weight.sum(dim=2, keepdim=False) else: conv_weight = conv_weight.sum(dim=1, keepdim=True) elif in_chans != 3: if I != 3: raise NotImplementedError('Weight format not supported by conversion.') else: # NOTE this strategy should be better than random init, but there could be other combinations of # the original RGB input layer weights that'd work better for specific cases. repeat = int(math.ceil(in_chans / 3)) conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] conv_weight *= (3 / float(in_chans)) conv_weight = conv_weight.to(conv_type) return conv_weight
pytorch-image-models/timm/models/_manipulate.py/0
{ "file_path": "pytorch-image-models/timm/models/_manipulate.py", "repo_id": "pytorch-image-models", "token_count": 4690 }
241
""" ConvNeXt Papers: * `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf @Article{liu2022convnet, author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, title = {A ConvNet for the 2020s}, journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2022}, } * `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808 @article{Woo2023ConvNeXtV2, title={ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders}, author={Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon and Saining Xie}, year={2023}, journal={arXiv preprint arXiv:2301.00808}, } Original code and weights from: * https://github.com/facebookresearch/ConvNeXt, original copyright below * https://github.com/facebookresearch/ConvNeXt-V2, original copyright below Model defs atto, femto, pico, nano and _ols / _hnf variants are timm originals. Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman """ # ConvNeXt # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the MIT license # ConvNeXt-V2 # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree (Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)) # No code was used directly from ConvNeXt-V2, however the weights are CC BY-NC 4.0 so beware if using commercially. from functools import partial from typing import Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import trunc_normal_, AvgPool2dSame, DropPath, Mlp, GlobalResponseNormMlp, \ LayerNorm2d, LayerNorm, RmsNorm2d, RmsNorm, create_conv2d, get_act_layer, get_norm_layer, make_divisible, to_ntuple from timm.layers import SimpleNorm2d, SimpleNorm from timm.layers import NormMlpClassifierHead, ClassifierHead from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['ConvNeXt'] # model_registry will add each entrypoint fn to this class Downsample(nn.Module): """Downsample module for ConvNeXt.""" def __init__(self, in_chs: int, out_chs: int, stride: int = 1, dilation: int = 1) -> None: """Initialize Downsample module. Args: in_chs: Number of input channels. out_chs: Number of output channels. stride: Stride for downsampling. dilation: Dilation rate. """ super().__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() if in_chs != out_chs: self.conv = create_conv2d(in_chs, out_chs, 1, stride=1) else: self.conv = nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" x = self.pool(x) x = self.conv(x) return x class ConvNeXtBlock(nn.Module): """ConvNeXt Block. There are two equivalent implementations: (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW. """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 7, stride: int = 1, dilation: Union[int, Tuple[int, int]] = (1, 1), mlp_ratio: float = 4, conv_mlp: bool = False, conv_bias: bool = True, use_grn: bool = False, ls_init_value: Optional[float] = 1e-6, act_layer: Union[str, Callable] = 'gelu', norm_layer: Optional[Callable] = None, drop_path: float = 0., ): """ Args: in_chs: Block input channels. out_chs: Block output channels (same as in_chs if None). kernel_size: Depthwise convolution kernel size. stride: Stride of depthwise convolution. dilation: Tuple specifying input and output dilation of block. mlp_ratio: MLP expansion ratio. conv_mlp: Use 1x1 convolutions for MLP and a NCHW compatible norm layer if True. conv_bias: Apply bias for all convolution (linear) layers. use_grn: Use GlobalResponseNorm in MLP (from ConvNeXt-V2) ls_init_value: Layer-scale init values, layer-scale applied if not None. act_layer: Activation layer. norm_layer: Normalization layer (defaults to LN if not specified). drop_path: Stochastic depth probability. """ super().__init__() out_chs = out_chs or in_chs dilation = to_ntuple(2)(dilation) act_layer = get_act_layer(act_layer) if not norm_layer: norm_layer = LayerNorm2d if conv_mlp else LayerNorm mlp_layer = partial(GlobalResponseNormMlp if use_grn else Mlp, use_conv=conv_mlp) self.use_conv_mlp = conv_mlp self.conv_dw = create_conv2d( in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation[0], depthwise=True, bias=conv_bias, ) self.norm = norm_layer(out_chs) self.mlp = mlp_layer(out_chs, int(mlp_ratio * out_chs), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(out_chs)) if ls_init_value is not None else None if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: self.shortcut = Downsample(in_chs, out_chs, stride=stride, dilation=dilation[0]) else: self.shortcut = nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" shortcut = x x = self.conv_dw(x) if self.use_conv_mlp: x = self.norm(x) x = self.mlp(x) else: x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.mlp(x) x = x.permute(0, 3, 1, 2) if self.gamma is not None: x = x.mul(self.gamma.reshape(1, -1, 1, 1)) x = self.drop_path(x) + self.shortcut(shortcut) return x class ConvNeXtStage(nn.Module): """ConvNeXt stage (multiple blocks).""" def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 7, stride: int = 2, depth: int = 2, dilation: Tuple[int, int] = (1, 1), drop_path_rates: Optional[List[float]] = None, ls_init_value: float = 1.0, conv_mlp: bool = False, conv_bias: bool = True, use_grn: bool = False, act_layer: Union[str, Callable] = 'gelu', norm_layer: Optional[Callable] = None, norm_layer_cl: Optional[Callable] = None ) -> None: """Initialize ConvNeXt stage. Args: in_chs: Number of input channels. out_chs: Number of output channels. kernel_size: Kernel size for depthwise convolution. stride: Stride for downsampling. depth: Number of blocks in stage. dilation: Dilation rates. drop_path_rates: Drop path rates for each block. ls_init_value: Initial value for layer scale. conv_mlp: Use convolutional MLP. conv_bias: Use bias in convolutions. use_grn: Use global response normalization. act_layer: Activation layer. norm_layer: Normalization layer. norm_layer_cl: Normalization layer for channels last. """ super().__init__() self.grad_checkpointing = False if in_chs != out_chs or stride > 1 or dilation[0] != dilation[1]: ds_ks = 2 if stride > 1 or dilation[0] != dilation[1] else 1 pad = 'same' if dilation[1] > 1 else 0 # same padding needed if dilation used self.downsample = nn.Sequential( norm_layer(in_chs), create_conv2d( in_chs, out_chs, kernel_size=ds_ks, stride=stride, dilation=dilation[0], padding=pad, bias=conv_bias, ), ) in_chs = out_chs else: self.downsample = nn.Identity() drop_path_rates = drop_path_rates or [0.] * depth stage_blocks = [] for i in range(depth): stage_blocks.append(ConvNeXtBlock( in_chs=in_chs, out_chs=out_chs, kernel_size=kernel_size, dilation=dilation[1], drop_path=drop_path_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp, conv_bias=conv_bias, use_grn=use_grn, act_layer=act_layer, norm_layer=norm_layer if conv_mlp else norm_layer_cl, )) in_chs = out_chs self.blocks = nn.Sequential(*stage_blocks) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x # map of norm layers with NCHW (2D) and channels last variants _NORM_MAP = { 'layernorm': (LayerNorm2d, LayerNorm), 'layernorm2d': (LayerNorm2d, LayerNorm), 'simplenorm': (SimpleNorm2d, SimpleNorm), 'simplenorm2d': (SimpleNorm2d, SimpleNorm), 'rmsnorm': (RmsNorm2d, RmsNorm), 'rmsnorm2d': (RmsNorm2d, RmsNorm), } def _get_norm_layers(norm_layer: Union[Callable, str], conv_mlp: bool, norm_eps: float): norm_layer = norm_layer or 'layernorm' if norm_layer in _NORM_MAP: norm_layer_cl = _NORM_MAP[norm_layer][0] if conv_mlp else _NORM_MAP[norm_layer][1] norm_layer = _NORM_MAP[norm_layer][0] if norm_eps is not None: norm_layer = partial(norm_layer, eps=norm_eps) norm_layer_cl = partial(norm_layer_cl, eps=norm_eps) else: assert conv_mlp, \ 'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input' norm_layer = get_norm_layer(norm_layer) norm_layer_cl = norm_layer if norm_eps is not None: norm_layer_cl = partial(norm_layer_cl, eps=norm_eps) return norm_layer, norm_layer_cl class ConvNeXt(nn.Module): """ConvNeXt model architecture. A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf """ def __init__( self, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', output_stride: int = 32, depths: Tuple[int, ...] = (3, 3, 9, 3), dims: Tuple[int, ...] = (96, 192, 384, 768), kernel_sizes: Union[int, Tuple[int, ...]] = 7, ls_init_value: Optional[float] = 1e-6, stem_type: str = 'patch', patch_size: int = 4, head_init_scale: float = 1., head_norm_first: bool = False, head_hidden_size: Optional[int] = None, conv_mlp: bool = False, conv_bias: bool = True, use_grn: bool = False, act_layer: Union[str, Callable] = 'gelu', norm_layer: Optional[Union[str, Callable]] = None, norm_eps: Optional[float] = None, drop_rate: float = 0., drop_path_rate: float = 0., ): """ Args: in_chans: Number of input image channels. num_classes: Number of classes for classification head. global_pool: Global pooling type. output_stride: Output stride of network, one of (8, 16, 32). depths: Number of blocks at each stage. dims: Feature dimension at each stage. kernel_sizes: Depthwise convolution kernel-sizes for each stage. ls_init_value: Init value for Layer Scale, disabled if None. stem_type: Type of stem. patch_size: Stem patch size for patch stem. head_init_scale: Init scaling value for classifier weights and biases. head_norm_first: Apply normalization before global pool + head. head_hidden_size: Size of MLP hidden layer in head if not None and head_norm_first == False. conv_mlp: Use 1x1 conv in MLP, improves speed for small networks w/ chan last. conv_bias: Use bias layers w/ all convolutions. use_grn: Use Global Response Norm (ConvNeXt-V2) in MLP. act_layer: Activation layer type. norm_layer: Normalization layer type. drop_rate: Head pre-classifier dropout rate. drop_path_rate: Stochastic depth drop rate. """ super().__init__() assert output_stride in (8, 16, 32) kernel_sizes = to_ntuple(4)(kernel_sizes) norm_layer, norm_layer_cl = _get_norm_layers(norm_layer, conv_mlp, norm_eps) act_layer = get_act_layer(act_layer) self.num_classes = num_classes self.drop_rate = drop_rate self.feature_info = [] assert stem_type in ('patch', 'overlap', 'overlap_tiered', 'overlap_act') if stem_type == 'patch': # NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4 self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size, bias=conv_bias), norm_layer(dims[0]), ) stem_stride = patch_size else: mid_chs = make_divisible(dims[0] // 2) if 'tiered' in stem_type else dims[0] self.stem = nn.Sequential(*filter(None, [ nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias), act_layer() if 'act' in stem_type else None, nn.Conv2d(mid_chs, dims[0], kernel_size=3, stride=2, padding=1, bias=conv_bias), norm_layer(dims[0]), ])) stem_stride = 4 self.stages = nn.Sequential() dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] prev_chs = dims[0] curr_stride = stem_stride dilation = 1 # 4 feature resolution stages, each consisting of multiple residual blocks for i in range(4): stride = 2 if curr_stride == 2 or i > 0 else 1 if curr_stride >= output_stride and stride > 1: dilation *= stride stride = 1 curr_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 out_chs = dims[i] stages.append(ConvNeXtStage( prev_chs, out_chs, kernel_size=kernel_sizes[i], stride=stride, dilation=(first_dilation, dilation), depth=depths[i], drop_path_rates=dp_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp, conv_bias=conv_bias, use_grn=use_grn, act_layer=act_layer, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, )) prev_chs = out_chs # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = self.head_hidden_size = prev_chs # if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets # otherwise pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights) if head_norm_first: assert not head_hidden_size self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer, act_layer='gelu', ) self.head_hidden_size = self.head.num_features named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Union[str, List]]: """Create regex patterns for parameter grouping. Args: coarse: Use coarse grouping. Returns: Dictionary mapping group names to regex patterns. """ return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.downsample', (0,)), # blocks (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm_pre', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing. Args: enable: Whether to enable gradient checkpointing. """ for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: """Get the classifier module.""" return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: """Reset the classifier head. Args: num_classes: Number of classes for new classifier. global_pool: Global pooling type. """ self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """Forward features that returns intermediates. Args: x: Input image tensor. indices: Take last n blocks if int, all if None, select matching indices if sequence. norm: Apply norm layer to compatible intermediates. stop_early: Stop iterating over blocks when last desired intermediate hit. output_fmt: Shape of intermediate feature outputs. intermediates_only: Only return intermediate features. Returns: List of intermediate features or tuple of (final features, intermediates). """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.stem(x) last_idx = len(self.stages) - 1 if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): x = stage(x) if feat_idx in take_indices: if norm and feat_idx == last_idx: intermediates.append(self.norm_pre(x)) else: intermediates.append(x) if intermediates_only: return intermediates if feat_idx == last_idx: x = self.norm_pre(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ) -> List[int]: """Prune layers not required for specified intermediates. Args: indices: Indices of intermediate layers to keep. prune_norm: Whether to prune normalization layer. prune_head: Whether to prune the classifier head. Returns: List of indices that were kept. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_norm: self.norm_pre = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: """Forward pass through feature extraction layers.""" x = self.stem(x) x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: """Forward pass through classifier head. Args: x: Feature tensor. pre_logits: Return features before final classifier. Returns: Output tensor. """ return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module: nn.Module, name: Optional[str] = None, head_init_scale: float = 1.0) -> None: """Initialize model weights. Args: module: Module to initialize. name: Module name. head_init_scale: Scale factor for head initialization. """ if isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_(module.weight, std=.02) nn.init.zeros_(module.bias) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): """ Remap FB checkpoints -> timm """ if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: return state_dict # non-FB checkpoint if 'model' in state_dict: state_dict = state_dict['model'] out_dict = {} if 'visual.trunk.stem.0.weight' in state_dict: out_dict = {k.replace('visual.trunk.', ''): v for k, v in state_dict.items() if k.startswith('visual.trunk.')} if 'visual.head.proj.weight' in state_dict: out_dict['head.fc.weight'] = state_dict['visual.head.proj.weight'] out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0]) elif 'visual.head.mlp.fc1.weight' in state_dict: out_dict['head.pre_logits.fc.weight'] = state_dict['visual.head.mlp.fc1.weight'] out_dict['head.pre_logits.fc.bias'] = state_dict['visual.head.mlp.fc1.bias'] out_dict['head.fc.weight'] = state_dict['visual.head.mlp.fc2.weight'] out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.mlp.fc2.weight'].shape[0]) return out_dict import re for k, v in state_dict.items(): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) k = k.replace('dwconv', 'conv_dw') k = k.replace('pwconv', 'mlp.fc') if 'grn' in k: k = k.replace('grn.beta', 'mlp.grn.bias') k = k.replace('grn.gamma', 'mlp.grn.weight') v = v.reshape(v.shape[-1]) k = k.replace('head.', 'head.fc.') if k.startswith('norm.'): k = k.replace('norm', 'head.norm') if v.ndim == 2 and 'head' not in k: model_shape = model.state_dict()[k].shape v = v.reshape(model_shape) out_dict[k] = v return out_dict def _create_convnext(variant, pretrained=False, **kwargs): if kwargs.get('pretrained_cfg', '') == 'fcmae': # NOTE fcmae pretrained weights have no classifier or final norm-layer (`head.norm`) # This is workaround loading with num_classes=0 w/o removing norm-layer. kwargs.setdefault('pretrained_strict', False) model = build_model_with_cfg( ConvNeXt, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', **kwargs } def _cfgv2(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', 'license': 'cc-by-nc-4.0', 'paper_ids': 'arXiv:2301.00808', 'paper_name': 'ConvNeXt-V2: Co-designing and Scaling ConvNets with Masked Autoencoders', 'origin_url': 'https://github.com/facebookresearch/ConvNeXt-V2', **kwargs } default_cfgs = generate_default_cfgs({ # timm specific variants 'convnext_tiny.in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_zepto_rms.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)), 'convnext_zepto_rms_ols.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.9), 'convnext_atto.d2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_d2-01bb0f51.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_atto_ols.a2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_ols_a2-78d1c8f3.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_atto_rms.untrained': _cfg( #hf_hub_id='timm/', test_input_size=(3, 256, 256), test_crop_pct=0.95), 'convnext_femto.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_d1-d71d5b4c.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_femto_ols.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_ols_d1-246bf2ed.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_pico.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_d1-10ad7f0d.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_pico_ols.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_ols_d1-611f0ca7.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano.in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano.d1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_d1h-7eb4bdea.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano_ols.d1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_ols_d1h-ae424a9a.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny_hnf.a2h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_tiny_hnf_a2h-ab7e9df2.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano.r384_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'convnext_tiny.in12k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_small.in12k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_nano.in12k': _cfg( hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_nano.r384_in12k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=11821), 'convnext_nano.r384_ad_in12k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=11821), 'convnext_tiny.in12k': _cfg( hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_small.in12k': _cfg( hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_tiny.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_base.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_large.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_xlarge.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_base.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_large.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_small.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_base.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_xlarge.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_tiny.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_small.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_base.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_large.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_xlarge.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnextv2_nano.fcmae_ft_in22k_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_nano.fcmae_ft_in22k_in1k_384': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_tiny.fcmae_ft_in22k_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_tiny.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_base.fcmae_ft_in22k_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_base.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_large.fcmae_ft_in22k_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_large.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_huge.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_huge.fcmae_ft_in22k_in1k_512': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt", hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(15, 15), crop_pct=1.0, crop_mode='squash'), 'convnextv2_atto.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_femto.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_pico.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_nano.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_tiny.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_base.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_large.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_huge.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_atto.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_atto_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_femto.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_femto_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_pico.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_pico_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_nano.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_nano_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_tiny.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_tiny_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_base.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_base_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_large.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_large_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_huge.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_huge_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_small.untrained': _cfg(), # CLIP weights, fine-tuned on in1k or in12k + in1k 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_base.clip_laion2b_augreg_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laiona_augreg_ft_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_augreg_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0 ), 'convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash' ), 'convnext_xxlarge.clip_laion2b_soup_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laion2b_augreg_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_320': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_augreg_ft_in12k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_xxlarge.clip_laion2b_soup_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), # CLIP original image tower weights 'convnext_base.clip_laion2b': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laion2b_augreg': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona_320': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona_augreg_320': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640), 'convnext_large_mlp.clip_laion2b_augreg': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=768), 'convnext_large_mlp.clip_laion2b_ft_320': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768), 'convnext_large_mlp.clip_laion2b_ft_soup_320': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768), 'convnext_xxlarge.clip_laion2b_soup': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024), 'convnext_xxlarge.clip_laion2b_rewind': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024), "test_convnext.r160_in1k": _cfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), "test_convnext2.r160_in1k": _cfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), "test_convnext3.r160_in1k": _cfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), }) @register_model def convnext_zepto_rms(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M model_args = dict(depths=(2, 2, 4, 2), dims=(32, 64, 128, 256), conv_mlp=True, norm_layer='simplenorm') model = _create_convnext('convnext_zepto_rms', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_zepto_rms_ols(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M model_args = dict( depths=(2, 2, 4, 2), dims=(32, 64, 128, 256), conv_mlp=True, norm_layer='simplenorm', stem_type='overlap_act') model = _create_convnext('convnext_zepto_rms_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_atto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True) model = _create_convnext('convnext_atto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_atto_ols(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant with overlapping 3x3 conv stem, wider than non-ols femto above, current param count 3.7M model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_atto_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_atto_rms(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, norm_layer='rmsnorm2d') model = _create_convnext('convnext_atto_rms', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_femto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True) model = _create_convnext('convnext_femto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_femto_ols(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_femto_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_pico(pretrained=False, **kwargs) -> ConvNeXt: # timm pico variant model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True) model = _create_convnext('convnext_pico', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_pico_ols(pretrained=False, **kwargs) -> ConvNeXt: # timm nano variant with overlapping 3x3 conv stem model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_pico_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_nano(pretrained=False, **kwargs) -> ConvNeXt: # timm nano variant with standard stem and head model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True) model = _create_convnext('convnext_nano', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_nano_ols(pretrained=False, **kwargs) -> ConvNeXt: # experimental nano variant with overlapping conv stem model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True, stem_type='overlap') model = _create_convnext('convnext_nano_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_tiny_hnf(pretrained=False, **kwargs) -> ConvNeXt: # experimental tiny variant with norm before pooling in head (head norm first) model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True) model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_tiny(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768)) model = _create_convnext('convnext_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_small(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768]) model = _create_convnext('convnext_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_base(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024]) model = _create_convnext('convnext_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_large(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536]) model = _create_convnext('convnext_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_large_mlp(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], head_hidden_size=1536) model = _create_convnext('convnext_large_mlp', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_xlarge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048]) model = _create_convnext('convnext_xlarge', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_xxlarge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 4, 30, 3], dims=[384, 768, 1536, 3072], norm_eps=kwargs.pop('norm_eps', 1e-5)) model = _create_convnext('convnext_xxlarge', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_atto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M model_args = dict( depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_atto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_femto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant model_args = dict( depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_femto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_pico(pretrained=False, **kwargs) -> ConvNeXt: # timm pico variant model_args = dict( depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_pico', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_nano(pretrained=False, **kwargs) -> ConvNeXt: # timm nano variant with standard stem and head model_args = dict( depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_nano', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_tiny(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_small(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_base(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_large(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_huge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_huge', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def test_convnext(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[1, 2, 4, 2], dims=[24, 32, 48, 64], norm_eps=kwargs.pop('norm_eps', 1e-5), act_layer='gelu_tanh') model = _create_convnext('test_convnext', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def test_convnext2(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[1, 1, 1, 1], dims=[32, 64, 96, 128], norm_eps=kwargs.pop('norm_eps', 1e-5), act_layer='gelu_tanh') model = _create_convnext('test_convnext2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def test_convnext3(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict( depths=[1, 1, 1, 1], dims=[32, 64, 96, 128], norm_eps=kwargs.pop('norm_eps', 1e-5), kernel_sizes=(7, 5, 5, 3), act_layer='silu') model = _create_convnext('test_convnext3', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { 'convnext_tiny_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k', 'convnext_small_in22ft1k': 'convnext_small.fb_in22k_ft_in1k', 'convnext_base_in22ft1k': 'convnext_base.fb_in22k_ft_in1k', 'convnext_large_in22ft1k': 'convnext_large.fb_in22k_ft_in1k', 'convnext_xlarge_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k', 'convnext_tiny_384_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k_384', 'convnext_small_384_in22ft1k': 'convnext_small.fb_in22k_ft_in1k_384', 'convnext_base_384_in22ft1k': 'convnext_base.fb_in22k_ft_in1k_384', 'convnext_large_384_in22ft1k': 'convnext_large.fb_in22k_ft_in1k_384', 'convnext_xlarge_384_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k_384', 'convnext_tiny_in22k': 'convnext_tiny.fb_in22k', 'convnext_small_in22k': 'convnext_small.fb_in22k', 'convnext_base_in22k': 'convnext_base.fb_in22k', 'convnext_large_in22k': 'convnext_large.fb_in22k', 'convnext_xlarge_in22k': 'convnext_xlarge.fb_in22k', })
pytorch-image-models/timm/models/convnext.py/0
{ "file_path": "pytorch-image-models/timm/models/convnext.py", "repo_id": "pytorch-image-models", "token_count": 28603 }
242
"""FasterNet Run, Don't Walk: Chasing Higher FLOPS for Faster Neural Networks - paper: https://arxiv.org/abs/2303.03667 - code: https://github.com/JierunChen/FasterNet @article{chen2023run, title={Run, Don't Walk: Chasing Higher FLOPS for Faster Neural Networks}, author={Chen, Jierun and Kao, Shiu-hong and He, Hao and Zhuo, Weipeng and Wen, Song and Lee, Chul-Ho and Chan, S-H Gary}, journal={arXiv preprint arXiv:2303.03667}, year={2023} } Modifications by / Copyright 2025 Ryan Hou & Ross Wightman, original copyrights below """ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from functools import partial from typing import Any, Dict, List, Optional, Set, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, Linear, DropPath, trunc_normal_, LayerType from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['FasterNet'] class Partial_conv3(nn.Module): def __init__(self, dim: int, n_div: int, forward: str): super().__init__() self.dim_conv3 = dim // n_div self.dim_untouched = dim - self.dim_conv3 self.partial_conv3 = nn.Conv2d(self.dim_conv3, self.dim_conv3, 3, 1, 1, bias=False) if forward == 'slicing': self.forward = self.forward_slicing elif forward == 'split_cat': self.forward = self.forward_split_cat else: raise NotImplementedError def forward_slicing(self, x: torch.Tensor) -> torch.Tensor: # only for inference x = x.clone() # !!! Keep the original input intact for the residual connection later x[:, :self.dim_conv3, :, :] = self.partial_conv3(x[:, :self.dim_conv3, :, :]) return x def forward_split_cat(self, x: torch.Tensor) -> torch.Tensor: # for training/inference x1, x2 = torch.split(x, [self.dim_conv3, self.dim_untouched], dim=1) x1 = self.partial_conv3(x1) x = torch.cat((x1, x2), 1) return x class MLPBlock(nn.Module): def __init__( self, dim: int, n_div: int, mlp_ratio: float, drop_path: float, layer_scale_init_value: float, act_layer: LayerType = partial(nn.ReLU, inplace=True), norm_layer: LayerType = nn.BatchNorm2d, pconv_fw_type: str = 'split_cat', ): super().__init__() mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = nn.Sequential(*[ nn.Conv2d(dim, mlp_hidden_dim, 1, bias=False), norm_layer(mlp_hidden_dim), act_layer(), nn.Conv2d(mlp_hidden_dim, dim, 1, bias=False), ]) self.spatial_mixing = Partial_conv3(dim, n_div, pconv_fw_type) if layer_scale_init_value > 0: self.layer_scale = nn.Parameter( layer_scale_init_value * torch.ones((dim)), requires_grad=True) else: self.layer_scale = None self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x x = self.spatial_mixing(x) if self.layer_scale is not None: x = shortcut + self.drop_path( self.layer_scale.unsqueeze(-1).unsqueeze(-1) * self.mlp(x)) else: x = shortcut + self.drop_path(self.mlp(x)) return x class Block(nn.Module): def __init__( self, dim: int, depth: int, n_div: int, mlp_ratio: float, drop_path: float, layer_scale_init_value: float, act_layer: LayerType = partial(nn.ReLU, inplace=True), norm_layer: LayerType = nn.BatchNorm2d, pconv_fw_type: str = 'split_cat', use_merge: bool = True, merge_size: Union[int, Tuple[int, int]] = 2, ): super().__init__() self.grad_checkpointing = False self.blocks = nn.Sequential(*[ MLPBlock( dim=dim, n_div=n_div, mlp_ratio=mlp_ratio, drop_path=drop_path[i], layer_scale_init_value=layer_scale_init_value, norm_layer=norm_layer, act_layer=act_layer, pconv_fw_type=pconv_fw_type, ) for i in range(depth) ]) self.downsample = PatchMerging( dim=dim // 2, patch_size=merge_size, norm_layer=norm_layer, ) if use_merge else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class PatchEmbed(nn.Module): def __init__( self, in_chans: int, embed_dim: int, patch_size: Union[int, Tuple[int, int]] = 4, norm_layer: LayerType = nn.BatchNorm2d, ): super().__init__() self.proj = nn.Conv2d(in_chans, embed_dim, patch_size, patch_size, bias=False) self.norm = norm_layer(embed_dim) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.norm(self.proj(x)) class PatchMerging(nn.Module): def __init__( self, dim: int, patch_size: Union[int, Tuple[int, int]] = 2, norm_layer: LayerType = nn.BatchNorm2d, ): super().__init__() self.reduction = nn.Conv2d(dim, 2 * dim, patch_size, patch_size, bias=False) self.norm = norm_layer(2 * dim) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.norm(self.reduction(x)) class FasterNet(nn.Module): def __init__( self, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 96, depths: Union[int, Tuple[int, ...]] = (1, 2, 8, 2), mlp_ratio: float = 2., n_div: int = 4, patch_size: Union[int, Tuple[int, int]] = 4, merge_size: Union[int, Tuple[int, int]] = 2, patch_norm: bool = True, feature_dim: int = 1280, drop_rate: float = 0., drop_path_rate: float = 0.1, layer_scale_init_value: float = 0., act_layer: LayerType = partial(nn.ReLU, inplace=True), norm_layer: LayerType = nn.BatchNorm2d, pconv_fw_type: str = 'split_cat', ): super().__init__() assert pconv_fw_type in ('split_cat', 'slicing',) self.num_classes = num_classes self.drop_rate = drop_rate if not isinstance(depths, (list, tuple)): depths = (depths) # it means the model has only one stage self.num_stages = len(depths) self.feature_info = [] self.patch_embed = PatchEmbed( in_chans=in_chans, embed_dim=embed_dim, patch_size=patch_size, norm_layer=norm_layer if patch_norm else nn.Identity, ) # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # build layers stages_list = [] for i in range(self.num_stages): dim = int(embed_dim * 2 ** i) stage = Block( dim=dim, depth=depths[i], n_div=n_div, mlp_ratio=mlp_ratio, drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])], layer_scale_init_value=layer_scale_init_value, norm_layer=norm_layer, act_layer=act_layer, pconv_fw_type=pconv_fw_type, use_merge=False if i == 0 else True, merge_size=merge_size, ) stages_list.append(stage) self.feature_info += [dict(num_chs=dim, reduction=2**(i+2), module=f'stages.{i}')] self.stages = nn.Sequential(*stages_list) # building last several layers self.num_features = prev_chs = int(embed_dim * 2 ** (self.num_stages - 1)) self.head_hidden_size = out_chs = feature_dim # 1280 self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=False) self.act = act_layer() self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(out_chs, num_classes, bias=True) if num_classes > 0 else nn.Identity() self._initialize_weights() def _initialize_weights(self): for name, m in self.named_modules(): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self) -> Set: return set() @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Any]: matcher = dict( stem=r'^patch_embed', # stem and embed blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+).downsample', (0,)), (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^conv_head', (99999,)), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes # cannot meaningfully change pooling of efficient head after creation self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.patch_embed(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) x = self.stages(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: x = self.global_pool(x) x = self.conv_head(x) x = self.act(x) x = self.flatten(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.classifier(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict: Dict[str, torch.Tensor], model: nn.Module) -> Dict[str, torch.Tensor]: # if 'avgpool_pre_head' in state_dict: # return state_dict # # out_dict = { # 'conv_head.weight': state_dict.pop('avgpool_pre_head.1.weight'), # 'classifier.weight': state_dict.pop('head.weight'), # 'classifier.bias': state_dict.pop('head.bias') # } # # stage_mapping = { # 'stages.1.': 'stages.1.downsample.', # 'stages.2.': 'stages.1.', # 'stages.3.': 'stages.2.downsample.', # 'stages.4.': 'stages.2.', # 'stages.5.': 'stages.3.downsample.', # 'stages.6.': 'stages.3.' # } # # for k, v in state_dict.items(): # for old_prefix, new_prefix in stage_mapping.items(): # if k.startswith(old_prefix): # k = k.replace(old_prefix, new_prefix) # break # out_dict[k] = v return state_dict def _cfg(url: str = '', **kwargs: Any) -> Dict[str, Any]: return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 1.0, 'interpolation': 'bicubic', 'test_crop_pct': 0.9, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'classifier', 'paper_ids': 'arXiv:2303.03667', 'paper_name': "Run, Don't Walk: Chasing Higher FLOPS for Faster Neural Networks", 'origin_url': 'https://github.com/JierunChen/FasterNet', **kwargs } default_cfgs = generate_default_cfgs({ 'fasternet_t0.in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_t0-epoch.281-val_acc1.71.9180.pth', ), 'fasternet_t1.in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_t1-epoch.291-val_acc1.76.2180.pth', ), 'fasternet_t2.in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_t2-epoch.289-val_acc1.78.8860.pth', ), 'fasternet_s.in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_s-epoch.299-val_acc1.81.2840.pth', ), 'fasternet_m.in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_m-epoch.291-val_acc1.82.9620.pth', ), 'fasternet_l.in1k': _cfg( hf_hub_id='timm/', #url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_l-epoch.299-val_acc1.83.5060.pth', ), }) def _create_fasternet(variant: str, pretrained: bool = False, **kwargs: Any) -> FasterNet: model = build_model_with_cfg( FasterNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs, ) return model @register_model def fasternet_t0(pretrained: bool = False, **kwargs: Any) -> FasterNet: model_args = dict(embed_dim=40, depths=(1, 2, 8, 2), drop_path_rate=0.0, act_layer=nn.GELU) return _create_fasternet('fasternet_t0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fasternet_t1(pretrained: bool = False, **kwargs: Any) -> FasterNet: model_args = dict(embed_dim=64, depths=(1, 2, 8, 2), drop_path_rate=0.02, act_layer=nn.GELU) return _create_fasternet('fasternet_t1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fasternet_t2(pretrained: bool = False, **kwargs: Any) -> FasterNet: model_args = dict(embed_dim=96, depths=(1, 2, 8, 2), drop_path_rate=0.05) return _create_fasternet('fasternet_t2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fasternet_s(pretrained: bool = False, **kwargs: Any) -> FasterNet: model_args = dict(embed_dim=128, depths=(1, 2, 13, 2), drop_path_rate=0.1) return _create_fasternet('fasternet_s', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fasternet_m(pretrained: bool = False, **kwargs: Any) -> FasterNet: model_args = dict(embed_dim=144, depths=(3, 4, 18, 3), drop_path_rate=0.2) return _create_fasternet('fasternet_m', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def fasternet_l(pretrained: bool = False, **kwargs: Any) -> FasterNet: model_args = dict(embed_dim=192, depths=(3, 4, 18, 3), drop_path_rate=0.3) return _create_fasternet('fasternet_l', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/fasternet.py/0
{ "file_path": "pytorch-image-models/timm/models/fasternet.py", "repo_id": "pytorch-image-models", "token_count": 8752 }
243
""" Inception-V3 Originally from torchvision Inception3 model Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE """ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import trunc_normal_, create_classifier, Linear, ConvNormAct from ._builder import build_model_with_cfg from ._builder import resolve_pretrained_cfg from ._manipulate import flatten_modules from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['InceptionV3'] # model_registry will add each entrypoint fn to this class InceptionA(nn.Module): def __init__(self, in_channels, pool_features, conv_block=None): super(InceptionA, self).__init__() conv_block = conv_block or ConvNormAct self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch5x5 = self.branch5x5_1(x) branch5x5 = self.branch5x5_2(branch5x5) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionB(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionB, self).__init__() conv_block = conv_block or ConvNormAct self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) def _forward(self, x): branch3x3 = self.branch3x3(x) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionC(nn.Module): def __init__(self, in_channels, channels_7x7, conv_block=None): super(InceptionC, self).__init__() conv_block = conv_block or ConvNormAct self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) c7 = channels_7x7 self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch_pool = conv_block(in_channels, 192, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch7x7 = self.branch7x7_1(x) branch7x7 = self.branch7x7_2(branch7x7) branch7x7 = self.branch7x7_3(branch7x7) branch7x7dbl = self.branch7x7dbl_1(x) branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionD(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionD, self).__init__() conv_block = conv_block or ConvNormAct self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) def _forward(self, x): branch3x3 = self.branch3x3_1(x) branch3x3 = self.branch3x3_2(branch3x3) branch7x7x3 = self.branch7x7x3_1(x) branch7x7x3 = self.branch7x7x3_2(branch7x7x3) branch7x7x3 = self.branch7x7x3_3(branch7x7x3) branch7x7x3 = self.branch7x7x3_4(branch7x7x3) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch7x7x3, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionE(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionE, self).__init__() conv_block = conv_block or ConvNormAct self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch_pool = conv_block(in_channels, 192, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [ self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3), ] branch3x3 = torch.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [ self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl), ] branch3x3dbl = torch.cat(branch3x3dbl, 1) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionAux(nn.Module): def __init__(self, in_channels, num_classes, conv_block=None): super(InceptionAux, self).__init__() conv_block = conv_block or ConvNormAct self.conv0 = conv_block(in_channels, 128, kernel_size=1) self.conv1 = conv_block(128, 768, kernel_size=5) self.conv1.stddev = 0.01 self.fc = Linear(768, num_classes) self.fc.stddev = 0.001 def forward(self, x): # N x 768 x 17 x 17 x = F.avg_pool2d(x, kernel_size=5, stride=3) # N x 768 x 5 x 5 x = self.conv0(x) # N x 128 x 5 x 5 x = self.conv1(x) # N x 768 x 1 x 1 # Adaptive average pooling x = F.adaptive_avg_pool2d(x, (1, 1)) # N x 768 x 1 x 1 x = torch.flatten(x, 1) # N x 768 x = self.fc(x) # N x 1000 return x class InceptionV3(nn.Module): """Inception-V3 """ aux_logits: torch.jit.Final[bool] def __init__( self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=False, norm_layer='batchnorm2d', norm_eps=1e-3, act_layer='relu', ): super(InceptionV3, self).__init__() self.num_classes = num_classes self.aux_logits = aux_logits conv_block = partial( ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True), ) self.Conv2d_1a_3x3 = conv_block(in_chans, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1) self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2) self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3) self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2) self.Mixed_5b = InceptionA(192, pool_features=32, conv_block=conv_block) self.Mixed_5c = InceptionA(256, pool_features=64, conv_block=conv_block) self.Mixed_5d = InceptionA(288, pool_features=64, conv_block=conv_block) self.Mixed_6a = InceptionB(288, conv_block=conv_block) self.Mixed_6b = InceptionC(768, channels_7x7=128, conv_block=conv_block) self.Mixed_6c = InceptionC(768, channels_7x7=160, conv_block=conv_block) self.Mixed_6d = InceptionC(768, channels_7x7=160, conv_block=conv_block) self.Mixed_6e = InceptionC(768, channels_7x7=192, conv_block=conv_block) if aux_logits: self.AuxLogits = InceptionAux(768, num_classes, conv_block=conv_block) else: self.AuxLogits = None self.Mixed_7a = InceptionD(768, conv_block=conv_block) self.Mixed_7b = InceptionE(1280, conv_block=conv_block) self.Mixed_7c = InceptionE(2048, conv_block=conv_block) self.feature_info = [ dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'), dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'), dict(num_chs=288, reduction=8, module='Mixed_5d'), dict(num_chs=768, reduction=16, module='Mixed_6e'), dict(num_chs=2048, reduction=32, module='Mixed_7c'), ] self.num_features = self.head_hidden_size = 2048 self.global_pool, self.head_drop, self.fc = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate, ) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): stddev = m.stddev if hasattr(m, 'stddev') else 0.1 trunc_normal_(m.weight, std=stddev) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} module_map.pop(('fc',)) def _matcher(name): if any([name.startswith(n) for n in ('Conv2d_1', 'Conv2d_2')]): return 0 elif any([name.startswith(n) for n in ('Conv2d_3', 'Conv2d_4')]): return 1 else: for k in module_map.keys(): if k == tuple(name.split('.')[:len(k)]): return module_map[k] return float('inf') return _matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.fc def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_preaux(self, x): x = self.Conv2d_1a_3x3(x) # N x 32 x 149 x 149 x = self.Conv2d_2a_3x3(x) # N x 32 x 147 x 147 x = self.Conv2d_2b_3x3(x) # N x 64 x 147 x 147 x = self.Pool1(x) # N x 64 x 73 x 73 x = self.Conv2d_3b_1x1(x) # N x 80 x 73 x 73 x = self.Conv2d_4a_3x3(x) # N x 192 x 71 x 71 x = self.Pool2(x) # N x 192 x 35 x 35 x = self.Mixed_5b(x) # N x 256 x 35 x 35 x = self.Mixed_5c(x) # N x 288 x 35 x 35 x = self.Mixed_5d(x) # N x 288 x 35 x 35 x = self.Mixed_6a(x) # N x 768 x 17 x 17 x = self.Mixed_6b(x) # N x 768 x 17 x 17 x = self.Mixed_6c(x) # N x 768 x 17 x 17 x = self.Mixed_6d(x) # N x 768 x 17 x 17 x = self.Mixed_6e(x) # N x 768 x 17 x 17 return x def forward_postaux(self, x): x = self.Mixed_7a(x) # N x 1280 x 8 x 8 x = self.Mixed_7b(x) # N x 2048 x 8 x 8 x = self.Mixed_7c(x) # N x 2048 x 8 x 8 return x def forward_features(self, x): x = self.forward_preaux(x) if self.aux_logits: aux = self.AuxLogits(x) x = self.forward_postaux(x) return x, aux x = self.forward_postaux(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) if pre_logits: return x x = self.fc(x) return x def forward(self, x): if self.aux_logits: x, aux = self.forward_features(x) x = self.forward_head(x) return x, aux x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_v3(variant, pretrained=False, **kwargs): pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None)) aux_logits = kwargs.get('aux_logits', False) has_aux_logits = False if pretrained_cfg: # only torchvision pretrained weights have aux logits has_aux_logits = pretrained_cfg.tag == 'tv_in1k' if aux_logits: assert not kwargs.pop('features_only', False) load_strict = has_aux_logits else: load_strict = not has_aux_logits return build_model_with_cfg( InceptionV3, variant, pretrained, pretrained_cfg=pretrained_cfg, pretrained_strict=load_strict, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ # original PyTorch weights, ported from Tensorflow but modified 'inception_v3.tv_in1k': _cfg( # NOTE checkpoint has aux logit layer weights hf_hub_id='timm/', url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'), # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) 'inception_v3.tf_in1k': _cfg(hf_hub_id='timm/'), # my port of Tensorflow adversarially trained Inception V3 from # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz 'inception_v3.tf_adv_in1k': _cfg(hf_hub_id='timm/'), # from gluon pretrained models, best performing in terms of accuracy/loss metrics # https://gluon-cv.mxnet.io/model_zoo/classification.html 'inception_v3.gluon_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults std=IMAGENET_DEFAULT_STD, # also works well with inception defaults ) }) @register_model def inception_v3(pretrained=False, **kwargs) -> InceptionV3: model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs) return model register_model_deprecations(__name__, { 'tf_inception_v3': 'inception_v3.tf_in1k', 'adv_inception_v3': 'inception_v3.tf_adv_in1k', 'gluon_inception_v3': 'inception_v3.gluon_in1k', })
pytorch-image-models/timm/models/inception_v3.py/0
{ "file_path": "pytorch-image-models/timm/models/inception_v3.py", "repo_id": "pytorch-image-models", "token_count": 8637 }
244
""" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 Paper: `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets Status: * These models are a work in progress, experiments ongoing. * Pretrained weights for two models so far, more to come. * Model details updated to closer match official JAX code now that it's released * NF-ResNet, NF-RegNet-B, and NFNet-F models supported Hacked together by / copyright Ross Wightman, 2021. """ from collections import OrderedDict from dataclasses import dataclass, replace from functools import partial from typing import Any, Callable, Dict, Optional, Tuple import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame, \ get_act_layer, get_act_fn, get_attn, make_divisible from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['NormFreeNet', 'NfCfg'] # model_registry will add each entrypoint fn to this @dataclass class NfCfg: """Configuration for Normalization-Free Networks.""" depths: Tuple[int, int, int, int] channels: Tuple[int, int, int, int] alpha: float = 0.2 stem_type: str = '3x3' stem_chs: Optional[int] = None group_size: Optional[int] = None attn_layer: Optional[str] = None attn_kwargs: Optional[Dict[str, Any]] = None attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used width_factor: float = 1.0 bottle_ratio: float = 0.5 num_features: int = 0 # num out_channels for final conv, no final_conv if 0 ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models gamma_in_act: bool = False same_padding: bool = False std_conv_eps: float = 1e-5 skipinit: bool = False # disabled by default, non-trivial performance impact zero_init_fc: bool = False act_layer: str = 'silu' class GammaAct(nn.Module): """Activation function with gamma scaling factor.""" def __init__(self, act_type: str = 'relu', gamma: float = 1.0, inplace: bool = False): """Initialize GammaAct. Args: act_type: Type of activation function. gamma: Scaling factor for activation output. inplace: Whether to perform activation in-place. """ super().__init__() self.act_fn = get_act_fn(act_type) self.gamma = gamma self.inplace = inplace def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x: Input tensor. Returns: Scaled activation output. """ return self.act_fn(x, inplace=self.inplace).mul_(self.gamma) def act_with_gamma(act_type: str, gamma: float = 1.) -> Callable: """Create activation function factory with gamma scaling. Args: act_type: Type of activation function. gamma: Scaling factor for activation output. Returns: Activation function factory. """ def _create(inplace: bool = False) -> GammaAct: return GammaAct(act_type, gamma=gamma, inplace=inplace) return _create class DownsampleAvg(nn.Module): """AvgPool downsampling as in 'D' ResNet variants with dilation support.""" def __init__( self, in_chs: int, out_chs: int, stride: int = 1, dilation: int = 1, first_dilation: Optional[int] = None, conv_layer: Callable = ScaledStdConv2d, ): """Initialize DownsampleAvg. Args: in_chs: Input channels. out_chs: Output channels. stride: Stride for downsampling. dilation: Dilation rate. first_dilation: First dilation rate (unused). conv_layer: Convolution layer type. """ super(DownsampleAvg, self).__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = conv_layer(in_chs, out_chs, 1, stride=1) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x: Input tensor. Returns: Downsampled tensor. """ return self.conv(self.pool(x)) @register_notrace_module # reason: mul_ causes FX to drop a relevant node. https://github.com/pytorch/pytorch/issues/68301 class NormFreeBlock(nn.Module): """Normalization-Free pre-activation block. """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, stride: int = 1, dilation: int = 1, first_dilation: Optional[int] = None, alpha: float = 1.0, beta: float = 1.0, bottle_ratio: float = 0.25, group_size: Optional[int] = None, ch_div: int = 1, reg: bool = True, extra_conv: bool = False, skipinit: bool = False, attn_layer: Optional[Callable] = None, attn_gain: float = 2.0, act_layer: Optional[Callable] = None, conv_layer: Callable = ScaledStdConv2d, drop_path_rate: float = 0., ): """Initialize NormFreeBlock. Args: in_chs: Input channels. out_chs: Output channels. stride: Stride for convolution. dilation: Dilation rate. first_dilation: First dilation rate. alpha: Alpha scaling factor for residual. beta: Beta scaling factor for pre-activation. bottle_ratio: Bottleneck ratio. group_size: Group convolution size. ch_div: Channel divisor for rounding. reg: Use RegNet-style configuration. extra_conv: Add extra 3x3 convolution. skipinit: Use skipinit initialization. attn_layer: Attention layer type. attn_gain: Attention gain factor. act_layer: Activation layer type. conv_layer: Convolution layer type. drop_path_rate: Stochastic depth drop rate. """ super().__init__() first_dilation = first_dilation or dilation out_chs = out_chs or in_chs # RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div) groups = 1 if not group_size else mid_chs // group_size if group_size and group_size % ch_div == 0: mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error self.alpha = alpha self.beta = beta self.attn_gain = attn_gain if in_chs != out_chs or stride != 1 or dilation != first_dilation: self.downsample = DownsampleAvg( in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer, ) else: self.downsample = None self.act1 = act_layer() self.conv1 = conv_layer(in_chs, mid_chs, 1) self.act2 = act_layer(inplace=True) self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) if extra_conv: self.act2b = act_layer(inplace=True) self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups) else: self.act2b = None self.conv2b = None if reg and attn_layer is not None: self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3 else: self.attn = None self.act3 = act_layer() self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.) if not reg and attn_layer is not None: self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3 else: self.attn_last = None self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x: Input tensor. Returns: Output tensor. """ out = self.act1(x) * self.beta # shortcut branch shortcut = x if self.downsample is not None: shortcut = self.downsample(out) # residual branch out = self.conv1(out) out = self.conv2(self.act2(out)) if self.conv2b is not None: out = self.conv2b(self.act2b(out)) if self.attn is not None: out = self.attn_gain * self.attn(out) out = self.conv3(self.act3(out)) if self.attn_last is not None: out = self.attn_gain * self.attn_last(out) out = self.drop_path(out) if self.skipinit_gain is not None: out.mul_(self.skipinit_gain) out = out * self.alpha + shortcut return out def create_stem( in_chs: int, out_chs: int, stem_type: str = '', conv_layer: Optional[Callable] = None, act_layer: Optional[Callable] = None, preact_feature: bool = True, ) -> Tuple[nn.Sequential, int, Dict[str, Any]]: """Create stem module for NFNet models. Args: in_chs: Input channels. out_chs: Output channels. stem_type: Type of stem ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', etc.). conv_layer: Convolution layer type. act_layer: Activation layer type. preact_feature: Use pre-activation feature. Returns: Tuple of (stem_module, stem_stride, stem_feature_info). """ stem_stride = 2 stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv') stem = OrderedDict() assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool') if 'deep' in stem_type: if 'quad' in stem_type: # 4 deep conv stack as in NFNet-F models assert 'pool' not in stem_type stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs) strides = (2, 1, 1, 2) stem_stride = 4 stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3') else: if 'tiered' in stem_type: stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py else: stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets strides = (2, 1, 1) stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2') last_idx = len(stem_chs) - 1 for i, (c, s) in enumerate(zip(stem_chs, strides)): stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s) if i != last_idx: stem[f'act{i + 2}'] = act_layer(inplace=True) in_chs = c elif '3x3' in stem_type: # 3x3 stem conv as in RegNet stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2) else: # 7x7 stem conv as in ResNet stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) if 'pool' in stem_type: stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1) stem_stride = 4 return nn.Sequential(stem), stem_stride, stem_feature # from https://github.com/deepmind/deepmind-research/tree/master/nfnets _nonlin_gamma = dict( identity=1.0, celu=1.270926833152771, elu=1.2716004848480225, gelu=1.7015043497085571, leaky_relu=1.70590341091156, log_sigmoid=1.9193484783172607, log_softmax=1.0002083778381348, relu=1.7139588594436646, relu6=1.7131484746932983, selu=1.0008515119552612, sigmoid=4.803835391998291, silu=1.7881293296813965, softsign=2.338853120803833, softplus=1.9203323125839233, tanh=1.5939117670059204, ) class NormFreeNet(nn.Module): """ Normalization-Free Network As described in : `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 and `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and the (preact) ResNet models described earlier in the paper. There are a few differences: * channels are rounded to be divisible by 8 by default (keep tensor core kernels happy), this changes channel dim and param counts slightly from the paper models * activation correcting gamma constants are moved into the ScaledStdConv as it has less performance impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl. * a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but apply it in each activation. This is slightly slower, numerically different, but matches official impl. * skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput for what it is/does. Approx 8-10% throughput loss. """ def __init__( self, cfg: NfCfg, num_classes: int = 1000, in_chans: int = 3, global_pool: str = 'avg', output_stride: int = 32, drop_rate: float = 0., drop_path_rate: float = 0., **kwargs: Any, ): """ Args: cfg: Model architecture configuration. num_classes: Number of classifier classes. in_chans: Number of input channels. global_pool: Global pooling type. output_stride: Output stride of network, one of (8, 16, 32). drop_rate: Dropout rate. drop_path_rate: Stochastic depth drop-path rate. **kwargs: Extra kwargs overlayed onto cfg. """ super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False cfg = replace(cfg, **kwargs) assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})." conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d if cfg.gamma_in_act: act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer]) conv_layer = partial(conv_layer, eps=cfg.std_conv_eps) else: act_layer = get_act_layer(cfg.act_layer) conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps) attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div) self.stem, stem_stride, stem_feat = create_stem( in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer, ) self.feature_info = [stem_feat] drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] prev_chs = stem_chs net_stride = stem_stride dilation = 1 expected_var = 1.0 stages = [] for stage_idx, stage_depth in enumerate(cfg.depths): stride = 1 if stage_idx == 0 and stem_stride > 2 else 2 if net_stride >= output_stride and stride > 1: dilation *= stride stride = 1 net_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 blocks = [] for block_idx in range(cfg.depths[stage_idx]): first_block = block_idx == 0 and stage_idx == 0 out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div) blocks += [NormFreeBlock( in_chs=prev_chs, out_chs=out_chs, alpha=cfg.alpha, beta=1. / expected_var ** 0.5, stride=stride if block_idx == 0 else 1, dilation=dilation, first_dilation=first_dilation, group_size=cfg.group_size, bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio, ch_div=cfg.ch_div, reg=cfg.reg, extra_conv=cfg.extra_conv, skipinit=cfg.skipinit, attn_layer=attn_layer, attn_gain=cfg.attn_gain, act_layer=act_layer, conv_layer=conv_layer, drop_path_rate=drop_path_rates[stage_idx][block_idx], )] if block_idx == 0: expected_var = 1. # expected var is reset after first block of each stage expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance first_dilation = dilation prev_chs = out_chs self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')] stages += [nn.Sequential(*blocks)] self.stages = nn.Sequential(*stages) if cfg.num_features: # The paper NFRegNet models have an EfficientNet-like final head convolution. self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div) self.final_conv = conv_layer(prev_chs, self.num_features, 1) self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv') else: self.num_features = prev_chs self.final_conv = nn.Identity() self.final_act = act_layer(inplace=cfg.num_features > 0) self.head_hidden_size = self.num_features self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) for n, m in self.named_modules(): if 'fc' in n and isinstance(m, nn.Linear): if cfg.zero_init_fc: nn.init.zeros_(m.weight) else: nn.init.normal_(m.weight, 0., .01) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear') if m.bias is not None: nn.init.zeros_(m.bias) @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Any]: """Group parameters for optimization.""" matcher = dict( stem=r'^stem', blocks=[ (r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None), (r'^final_conv', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing.""" self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: """Get the classifier head.""" return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: """Reset the classifier head. Args: num_classes: Number of classes for new classifier. global_pool: Global pooling type. """ self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x: torch.Tensor) -> torch.Tensor: """Forward pass through feature extraction layers. Args: x: Input tensor. Returns: Feature tensor. """ x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.final_conv(x) x = self.final_act(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: """Forward pass through classifier head. Args: x: Input features. pre_logits: Return features before final linear layer. Returns: Classification logits or features. """ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x: Input tensor. Returns: Output logits. """ x = self.forward_features(x) x = self.forward_head(x) return x def _nfres_cfg( depths: Tuple[int, ...], channels: Tuple[int, ...] = (256, 512, 1024, 2048), group_size: Optional[int] = None, act_layer: str = 'relu', attn_layer: Optional[str] = None, attn_kwargs: Optional[Dict[str, Any]] = None, ) -> NfCfg: """Create NFNet ResNet configuration. Args: depths: Number of blocks in each stage. channels: Channel dimensions for each stage. group_size: Group convolution size. act_layer: Activation layer type. attn_layer: Attention layer type. attn_kwargs: Attention layer arguments. Returns: NFNet configuration. """ attn_kwargs = attn_kwargs or {} cfg = NfCfg( depths=depths, channels=channels, stem_type='7x7_pool', stem_chs=64, bottle_ratio=0.25, group_size=group_size, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs, ) return cfg def _nfreg_cfg(depths: Tuple[int, ...], channels: Tuple[int, ...] = (48, 104, 208, 440)) -> NfCfg: """Create NFNet RegNet configuration. Args: depths: Number of blocks in each stage. channels: Channel dimensions for each stage. Returns: NFNet configuration. """ num_features = 1280 * channels[-1] // 440 attn_kwargs = dict(rd_ratio=0.5) cfg = NfCfg( depths=depths, channels=channels, stem_type='3x3', group_size=8, width_factor=0.75, bottle_ratio=2.25, num_features=num_features, reg=True, attn_layer='se', attn_kwargs=attn_kwargs, ) return cfg def _nfnet_cfg( depths: Tuple[int, ...], channels: Tuple[int, ...] = (256, 512, 1536, 1536), group_size: int = 128, bottle_ratio: float = 0.5, feat_mult: float = 2., act_layer: str = 'gelu', attn_layer: str = 'se', attn_kwargs: Optional[Dict[str, Any]] = None, ) -> NfCfg: """Create NFNet configuration. Args: depths: Number of blocks in each stage. channels: Channel dimensions for each stage. group_size: Group convolution size. bottle_ratio: Bottleneck ratio. feat_mult: Feature multiplier for final layer. act_layer: Activation layer type. attn_layer: Attention layer type. attn_kwargs: Attention layer arguments. Returns: NFNet configuration. """ num_features = int(channels[-1] * feat_mult) attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5) cfg = NfCfg( depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=group_size, bottle_ratio=bottle_ratio, extra_conv=True, num_features=num_features, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs, ) return cfg def _dm_nfnet_cfg( depths: Tuple[int, ...], channels: Tuple[int, ...] = (256, 512, 1536, 1536), act_layer: str = 'gelu', skipinit: bool = True, ) -> NfCfg: """Create DeepMind NFNet configuration. Args: depths: Number of blocks in each stage. channels: Channel dimensions for each stage. act_layer: Activation layer type. skipinit: Use skipinit initialization. Returns: NFNet configuration. """ cfg = NfCfg( depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128, bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit, num_features=int(channels[-1] * 2.0), act_layer=act_layer, attn_layer='se', attn_kwargs=dict(rd_ratio=0.5), ) return cfg model_cfgs = dict( # NFNet-F models w/ GELU compatible with DeepMind weights dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)), dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)), dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)), dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)), dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)), dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)), dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)), # NFNet-F models w/ GELU nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)), nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)), nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)), nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)), nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)), nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)), nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)), nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)), # Experimental 'light' versions of NFNet-F that are little leaner, w/ SiLU act nfnet_l0=_nfnet_cfg( depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), eca_nfnet_l0=_nfnet_cfg( depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l1=_nfnet_cfg( depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l2=_nfnet_cfg( depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l3=_nfnet_cfg( depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), # EffNet influenced RegNet defs. # NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8. nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)), nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)), nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)), nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)), nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)), nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)), # ResNet (preact, D style deep stem/avg down) defs nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)), nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)), nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)), nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()), nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()), nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()), test_nfnet=_nfnet_cfg( depths=(1, 1, 1, 1), channels=(32, 64, 96, 128), feat_mult=1.5, group_size=8, bottle_ratio=0.25, attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), ) def _create_normfreenet(variant: str, pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Create a NormFreeNet model. Args: variant: Model variant name. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: NormFreeNet model instance. """ model_cfg = model_cfgs[variant] feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg( NormFreeNet, variant, pretrained, model_cfg=model_cfg, feature_cfg=feature_cfg, **kwargs, ) def _dcfg(url: str = '', **kwargs: Any) -> Dict[str, Any]: """Create default configuration dictionary. Args: url: Model weight URL. **kwargs: Additional configuration options. Returns: Configuration dictionary. """ return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'dm_nfnet_f0.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9, crop_mode='squash'), 'dm_nfnet_f1.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91, crop_mode='squash'), 'dm_nfnet_f2.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92, crop_mode='squash'), 'dm_nfnet_f3.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94, crop_mode='squash'), 'dm_nfnet_f4.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951, crop_mode='squash'), 'dm_nfnet_f5.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954, crop_mode='squash'), 'dm_nfnet_f6.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956, crop_mode='squash'), 'nfnet_f0': _dcfg( url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), 'nfnet_f1': _dcfg( url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), 'nfnet_f2': _dcfg( url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), 'nfnet_f3': _dcfg( url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), 'nfnet_f4': _dcfg( url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), 'nfnet_f5': _dcfg( url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), 'nfnet_f6': _dcfg( url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), 'nfnet_f7': _dcfg( url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), 'nfnet_l0.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_nfnet_l0.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_nfnet_l1.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), test_crop_pct=1.0), 'eca_nfnet_l2.ra3_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), test_crop_pct=1.0), 'eca_nfnet_l3': _dcfg( url='', pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), test_crop_pct=1.0), 'nf_regnet_b0': _dcfg( url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'), 'nf_regnet_b1.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec 'nf_regnet_b2': _dcfg( url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'), 'nf_regnet_b3': _dcfg( url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'), 'nf_regnet_b4': _dcfg( url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'), 'nf_regnet_b5': _dcfg( url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'), 'nf_resnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_resnet50.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'), 'nf_resnet101': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet50': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet101': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet50': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet101': _dcfg(url='', first_conv='stem.conv'), 'test_nfnet.r160_in1k': _dcfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, input_size=(3, 160, 160), pool_size=(5, 5)), }) @register_model def dm_nfnet_f0(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F0 (DeepMind weight compatible).""" return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f1(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F1 (DeepMind weight compatible).""" return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f2(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F2 (DeepMind weight compatible).""" return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f3(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F3 (DeepMind weight compatible).""" return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f4(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F4 (DeepMind weight compatible).""" return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f5(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F5 (DeepMind weight compatible).""" return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f6(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F6 (DeepMind weight compatible).""" return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs) @register_model def nfnet_f0(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F0.""" return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs) @register_model def nfnet_f1(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F1.""" return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs) @register_model def nfnet_f2(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F2.""" return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs) @register_model def nfnet_f3(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F3.""" return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs) @register_model def nfnet_f4(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F4.""" return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs) @register_model def nfnet_f5(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F5.""" return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs) @register_model def nfnet_f6(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F6.""" return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs) @register_model def nfnet_f7(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-F7.""" return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs) @register_model def nfnet_l0(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """NFNet-L0b w/ SiLU. My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio """ return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l0(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """ECA-NFNet-L0 w/ SiLU. My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l1(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """ECA-NFNet-L1 w/ SiLU. My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l2(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """ECA-NFNet-L2 w/ SiLU. My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l3(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """ECA-NFNet-L3 w/ SiLU. My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b0(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free RegNet-B0. """ return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b1(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free RegNet-B1. """ return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b2(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free RegNet-B2. """ return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b3(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free RegNet-B3. """ return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b4(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free RegNet-B4. """ return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b5(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free RegNet-B5. """ return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) @register_model def nf_resnet26(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free ResNet-26. """ return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs) @register_model def nf_resnet50(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free ResNet-50. """ return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs) @register_model def nf_resnet101(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free ResNet-101. """ return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs) @register_model def nf_seresnet26(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free SE-ResNet26.""" return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs) @register_model def nf_seresnet50(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free SE-ResNet50.""" return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs) @register_model def nf_seresnet101(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free SE-ResNet101.""" return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet26(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free ECA-ResNet26.""" return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet50(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free ECA-ResNet50.""" return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet101(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Normalization-Free ECA-ResNet101.""" return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs) @register_model def test_nfnet(pretrained: bool = False, **kwargs: Any) -> NormFreeNet: """Test NFNet model for experimentation.""" return _create_normfreenet('test_nfnet', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/nfnet.py/0
{ "file_path": "pytorch-image-models/timm/models/nfnet.py", "repo_id": "pytorch-image-models", "token_count": 21054 }
245
""" Sequencer Paper: `Sequencer: Deep LSTM for Image Classification` - https://arxiv.org/pdf/2205.01972.pdf """ # Copyright (c) 2022. Yuki Tatsunami # Licensed under the Apache License, Version 2.0 (the "License"); import math from functools import partial from itertools import accumulate from typing import Optional, Tuple import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT from timm.layers import lecun_normal_, DropPath, Mlp, PatchEmbed, ClassifierHead from ._builder import build_model_with_cfg from ._manipulate import named_apply from ._registry import register_model, generate_default_cfgs __all__ = ['Sequencer2d'] # model_registry will add each entrypoint fn to this def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: if flax: # Flax defaults lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)): stdv = 1.0 / math.sqrt(module.hidden_size) for weight in module.parameters(): nn.init.uniform_(weight, -stdv, stdv) elif hasattr(module, 'init_weights'): module.init_weights() class RNNIdentity(nn.Module): def __init__(self, *args, **kwargs): super(RNNIdentity, self).__init__() def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, None]: return x, None class RNN2dBase(nn.Module): def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, bidirectional: bool = True, union="cat", with_fc=True, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = 2 * hidden_size if bidirectional else hidden_size self.union = union self.with_vertical = True self.with_horizontal = True self.with_fc = with_fc self.fc = None if with_fc: if union == "cat": self.fc = nn.Linear(2 * self.output_size, input_size) elif union == "add": self.fc = nn.Linear(self.output_size, input_size) elif union == "vertical": self.fc = nn.Linear(self.output_size, input_size) self.with_horizontal = False elif union == "horizontal": self.fc = nn.Linear(self.output_size, input_size) self.with_vertical = False else: raise ValueError("Unrecognized union: " + union) elif union == "cat": pass if 2 * self.output_size != input_size: raise ValueError(f"The output channel {2 * self.output_size} is different from the input channel {input_size}.") elif union == "add": pass if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") elif union == "vertical": if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") self.with_horizontal = False elif union == "horizontal": if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") self.with_vertical = False else: raise ValueError("Unrecognized union: " + union) self.rnn_v = RNNIdentity() self.rnn_h = RNNIdentity() def forward(self, x): B, H, W, C = x.shape if self.with_vertical: v = x.permute(0, 2, 1, 3) v = v.reshape(-1, H, C) v, _ = self.rnn_v(v) v = v.reshape(B, W, H, -1) v = v.permute(0, 2, 1, 3) else: v = None if self.with_horizontal: h = x.reshape(-1, W, C) h, _ = self.rnn_h(h) h = h.reshape(B, H, W, -1) else: h = None if v is not None and h is not None: if self.union == "cat": x = torch.cat([v, h], dim=-1) else: x = v + h elif v is not None: x = v elif h is not None: x = h if self.fc is not None: x = self.fc(x) return x class LSTM2d(RNN2dBase): def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, bidirectional: bool = True, union="cat", with_fc=True, ): super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc) if self.with_vertical: self.rnn_v = nn.LSTM( input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional, ) if self.with_horizontal: self.rnn_h = nn.LSTM( input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional, ) class Sequencer2dBlock(nn.Module): def __init__( self, dim, hidden_size, mlp_ratio=3.0, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_layers=1, bidirectional=True, union="cat", with_fc=True, drop=0., drop_path=0., ): super().__init__() channels_dim = int(mlp_ratio * dim) self.norm1 = norm_layer(dim) self.rnn_tokens = rnn_layer( dim, hidden_size, num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.rnn_tokens(self.norm1(x))) x = x + self.drop_path(self.mlp_channels(self.norm2(x))) return x class Shuffle(nn.Module): def __init__(self): super().__init__() def forward(self, x): if self.training: B, H, W, C = x.shape r = torch.randperm(H * W) x = x.reshape(B, -1, C) x = x[:, r, :].reshape(B, H, W, -1) return x class Downsample2d(nn.Module): def __init__(self, input_dim, output_dim, patch_size): super().__init__() self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = x.permute(0, 3, 1, 2) x = self.down(x) x = x.permute(0, 2, 3, 1) return x class Sequencer2dStage(nn.Module): def __init__( self, dim, dim_out, depth, patch_size, hidden_size, mlp_ratio, downsample=False, block_layer=Sequencer2dBlock, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_layers=1, bidirectional=True, union="cat", with_fc=True, drop=0., drop_path=0., ): super().__init__() if downsample: self.downsample = Downsample2d(dim, dim_out, patch_size) else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] for block_idx in range(depth): blocks.append(block_layer( dim_out, hidden_size, mlp_ratio=mlp_ratio, rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, drop=drop, drop_path=drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path, )) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class Sequencer2d(nn.Module): def __init__( self, num_classes=1000, img_size=224, in_chans=3, global_pool='avg', layers=(4, 3, 8, 3), patch_sizes=(7, 2, 2, 1), embed_dims=(192, 384, 384, 384), hidden_sizes=(48, 96, 96, 96), mlp_ratios=(3.0, 3.0, 3.0, 3.0), block_layer=Sequencer2dBlock, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_rnn_layers=1, bidirectional=True, union="cat", with_fc=True, drop_rate=0., drop_path_rate=0., nlhb=False, stem_norm=False, ): super().__init__() assert global_pool in ('', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = embed_dims[-1] # for consistency with other models self.feature_dim = -1 # channel dim index for feature outputs (rank 4, NHWC) self.output_fmt = 'NHWC' self.feature_info = [] self.stem = PatchEmbed( img_size=None, patch_size=patch_sizes[0], in_chans=in_chans, embed_dim=embed_dims[0], norm_layer=norm_layer if stem_norm else None, flatten=False, output_fmt='NHWC', ) assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios) reductions = list(accumulate(patch_sizes, lambda x, y: x * y)) stages = [] prev_dim = embed_dims[0] for i, _ in enumerate(embed_dims): stages += [Sequencer2dStage( prev_dim, embed_dims[i], depth=layers[i], downsample=i > 0, patch_size=patch_sizes[i], hidden_size=hidden_sizes[i], mlp_ratio=mlp_ratios[i], block_layer=block_layer, rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, num_layers=num_rnn_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, drop=drop_rate, drop_path=drop_path_rate, )] prev_dim = embed_dims[i] self.feature_info += [dict(num_chs=prev_dim, reduction=reductions[i], module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.norm = norm_layer(embed_dims[-1]) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, input_fmt=self.output_fmt, ) self.init_weights(nlhb=nlhb) def init_weights(self, nlhb=False): head_bias = -math.log(self.num_classes) if nlhb else 0. named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=[ (r'^stages\.(\d+)', None), (r'^norm', (99999,)) ] if coarse else [ (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^stages\.(\d+)\.downsample', (0,)), (r'^norm', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ Remap original checkpoints -> timm """ if 'stages.0.blocks.0.norm1.weight' in state_dict: return state_dict # already translated checkpoint if 'model' in state_dict: state_dict = state_dict['model'] import re out_dict = {} for k, v in state_dict.items(): k = re.sub(r'blocks.([0-9]+).([0-9]+).down', lambda x: f'stages.{int(x.group(1)) + 1}.downsample.down', k) k = re.sub(r'blocks.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_sequencer2d(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(3)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( Sequencer2d, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.proj', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'sequencer2d_s.in1k': _cfg(hf_hub_id='timm/'), 'sequencer2d_m.in1k': _cfg(hf_hub_id='timm/'), 'sequencer2d_l.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def sequencer2d_s(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[4, 3, 8, 3], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, ) model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def sequencer2d_m(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[4, 3, 14, 3], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, **kwargs) model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def sequencer2d_l(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[8, 8, 16, 4], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, **kwargs) model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/sequencer.py/0
{ "file_path": "pytorch-image-models/timm/models/sequencer.py", "repo_id": "pytorch-image-models", "token_count": 9247 }
246
""" Relative Position Vision Transformer (ViT) in PyTorch NOTE: these models are experimental / WIP, expect changes Hacked together by / Copyright 2022, Ross Wightman """ import logging import math from functools import partial from typing import List, Optional, Tuple, Type, Union try: from typing import Literal except ImportError: from typing_extensions import Literal import torch import torch.nn as nn from torch.jit import Final from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import PatchEmbed, Mlp, DropPath, RelPosMlp, RelPosBias, use_fused_attn, LayerType from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint from ._registry import generate_default_cfgs, register_model from .vision_transformer import get_init_weights_vit __all__ = ['VisionTransformerRelPos'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class RelPosAttention(nn.Module): fused_attn: Final[bool] def __init__( self, dim, num_heads=8, qkv_bias=False, qk_norm=False, rel_pos_cls=None, attn_drop=0., proj_drop=0., norm_layer=nn.LayerNorm, ): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.rel_pos = rel_pos_cls(num_heads=num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) q = self.q_norm(q) k = self.k_norm(k) if self.fused_attn: if self.rel_pos is not None: attn_bias = self.rel_pos.get_bias() elif shared_rel_pos is not None: attn_bias = shared_rel_pos else: attn_bias = None x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_bias, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if self.rel_pos is not None: attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class RelPosBlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, rel_pos_cls=None, init_values=None, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = RelPosAttention( dim, num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, attn_drop=attn_drop, proj_drop=proj_drop, ) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ResPostRelPosBlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, rel_pos_cls=None, init_values=None, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.init_values = init_values self.attn = RelPosAttention( dim, num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, attn_drop=attn_drop, proj_drop=proj_drop, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.init_weights() def init_weights(self): # NOTE this init overrides that base model init with specific changes for the block type if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = x + self.drop_path1(self.norm1(self.attn(x, shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class VisionTransformerRelPos(nn.Module): """ Vision Transformer w/ Relative Position Bias Differing from classic vit, this impl * uses relative position index (swin v1 / beit) or relative log coord + mlp (swin v2) pos embed * defaults to no class token (can be enabled) * defaults to global avg pool for head (can be changed) * layer-scale (residual branch gain) enabled """ def __init__( self, img_size: Union[int, Tuple[int, int]] = 224, patch_size: Union[int, Tuple[int, int]] = 16, in_chans: int = 3, num_classes: int = 1000, global_pool: Literal['', 'avg', 'token', 'map'] = 'avg', embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4., qkv_bias: bool = True, qk_norm: bool = False, init_values: Optional[float] = 1e-6, class_token: bool = False, fc_norm: bool = False, rel_pos_type: str = 'mlp', rel_pos_dim: Optional[int] = None, shared_rel_pos: bool = False, drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., weight_init: Literal['skip', 'jax', 'moco', ''] = 'skip', fix_init: bool = False, embed_layer: Type[nn.Module] = PatchEmbed, norm_layer: Optional[LayerType] = None, act_layer: Optional[LayerType] = None, block_fn: Type[nn.Module] = RelPosBlock ): """ Args: img_size: input image size patch_size: patch size in_chans: number of input channels num_classes: number of classes for classification head global_pool: type of global pooling for final sequence (default: 'avg') embed_dim: embedding dimension depth: depth of transformer num_heads: number of attention heads mlp_ratio: ratio of mlp hidden dim to embedding dim qkv_bias: enable bias for qkv if True qk_norm: Enable normalization of query and key in attention init_values: layer-scale init values class_token: use class token (default: False) fc_norm: use pre classifier norm instead of pre-pool rel_pos_type: type of relative position shared_rel_pos: share relative pos across all blocks drop_rate: dropout rate proj_drop_rate: projection dropout rate attn_drop_rate: attention dropout rate drop_path_rate: stochastic depth rate weight_init: weight init scheme fix_init: apply weight initialization fix (scaling w/ layer index) embed_layer: patch embedding layer norm_layer: normalization layer act_layer: MLP activation layer """ super().__init__() assert global_pool in ('', 'avg', 'token') assert class_token or global_pool != 'token' norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models self.num_prefix_tokens = 1 if class_token else 0 self.grad_checkpointing = False self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) feat_size = self.patch_embed.grid_size r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size rel_pos_args = dict(window_size=feat_size, prefix_tokens=self.num_prefix_tokens) if rel_pos_type.startswith('mlp'): if rel_pos_dim: rel_pos_args['hidden_dim'] = rel_pos_dim if 'swin' in rel_pos_type: rel_pos_args['mode'] = 'swin' rel_pos_cls = partial(RelPosMlp, **rel_pos_args) else: rel_pos_cls = partial(RelPosBias, **rel_pos_args) self.shared_rel_pos = None if shared_rel_pos: self.shared_rel_pos = rel_pos_cls(num_heads=num_heads) # NOTE shared rel pos currently mutually exclusive w/ per-block, but could support both... rel_pos_cls = None self.cls_token = nn.Parameter(torch.zeros(1, self.num_prefix_tokens, embed_dim)) if class_token else None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ block_fn( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, ) for i in range(depth)]) self.feature_info = [ dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] self.norm = norm_layer(embed_dim) if not fc_norm else nn.Identity() # Classifier Head self.fc_norm = norm_layer(embed_dim) if fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if weight_init != 'skip': self.init_weights(weight_init) if fix_init: self.fix_init_weight() def init_weights(self, mode=''): assert mode in ('jax', 'moco', '') if self.cls_token is not None: nn.init.normal_(self.cls_token, std=1e-6) named_apply(get_init_weights_vit(mode), self) def fix_init_weight(self): def rescale(param, _layer_id): param.div_(math.sqrt(2.0 * _layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) @torch.jit.ignore def no_weight_decay(self): return {'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'token') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, return_prefix_tokens: bool = False, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence return_prefix_tokens: Return both prefix and spatial intermediate tokens norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] take_indices, max_index = feature_take_indices(len(self.blocks), indices) # forward pass B, _, height, width = x.shape x = self.patch_embed(x) if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for i, blk in enumerate(blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, shared_rel_pos=shared_rel_pos) else: x = blk(x, shared_rel_pos=shared_rel_pos) if i in take_indices: # normalize intermediates with final norm layer if enabled intermediates.append(self.norm(x) if norm else x) # process intermediates if self.num_prefix_tokens: # split prefix (e.g. class, distill) and spatial feature tokens prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] if reshape: # reshape to BCHW output format H, W = self.patch_embed.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if not torch.jit.is_scripting() and return_prefix_tokens: # return_prefix not support in torchscript due to poor type handling intermediates = list(zip(intermediates, prefix_tokens)) if intermediates_only: return intermediates x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_norm: self.norm = nn.Identity() if prune_head: self.fc_norm = nn.Identity() self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, shared_rel_pos=shared_rel_pos) else: x = blk(x, shared_rel_pos=shared_rel_pos) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_vision_transformer_relpos(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg( VisionTransformerRelPos, variant, pretrained, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'vit_relpos_base_patch32_plus_rpn_256.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_replos_base_patch32_plus_rpn_256-sw-dd486f51.pth', hf_hub_id='timm/', input_size=(3, 256, 256)), 'vit_relpos_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240)), 'vit_relpos_small_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_small_patch16_224-sw-ec2778b4.pth', hf_hub_id='timm/'), 'vit_relpos_medium_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_224-sw-11c174af.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_224-sw-49049aed.pth', hf_hub_id='timm/'), 'vit_srelpos_small_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_small_patch16_224-sw-6cdb8849.pth', hf_hub_id='timm/'), 'vit_srelpos_medium_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_medium_patch16_224-sw-ad702b8c.pth', hf_hub_id='timm/'), 'vit_relpos_medium_patch16_cls_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_cls_224-sw-cfe8e259.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_cls_224.untrained': _cfg(), 'vit_relpos_base_patch16_clsgap_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_gapcls_224-sw-1a341d6c.pth', hf_hub_id='timm/'), 'vit_relpos_small_patch16_rpn_224.untrained': _cfg(), 'vit_relpos_medium_patch16_rpn_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_rpn_224-sw-5d2befd8.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_rpn_224.untrained': _cfg(), }) @register_model def vit_relpos_base_patch32_plus_rpn_256(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/32+) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch32_plus_rpn_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_plus_240(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16+) w/ relative log-coord position, no class token """ model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos( 'vit_relpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos( 'vit_relpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_srelpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=False, rel_pos_dim=384, shared_rel_pos=True) model = _create_vision_transformer_relpos( 'vit_srelpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_srelpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, rel_pos_dim=512, shared_rel_pos=True) model = _create_vision_transformer_relpos( 'vit_srelpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-M/16) w/ relative log-coord position, class token present """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, rel_pos_dim=256, class_token=True, global_pool='token') model = _create_vision_transformer_relpos( 'vit_relpos_medium_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, class token present """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, class_token=True, global_pool='token') model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_clsgap_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, class token present NOTE this config is a bit of a mistake, class token was enabled but global avg-pool w/ fc-norm was not disabled Leaving here for comparisons w/ a future re-train as it performs quite well. """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True, class_token=True) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_clsgap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_small_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_small_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_medium_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/vision_transformer_relpos.py/0
{ "file_path": "pytorch-image-models/timm/models/vision_transformer_relpos.py", "repo_id": "pytorch-image-models", "token_count": 13432 }
247
""" AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 Code: https://github.com/clovaai/AdamP Copyright (c) 2020-present NAVER Corp. MIT license """ import torch import torch.nn.functional as F from torch.optim.optimizer import Optimizer import math def _channel_view(x) -> torch.Tensor: return x.reshape(x.size(0), -1) def _layer_view(x) -> torch.Tensor: return x.reshape(1, -1) def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): wd = 1. expand_size = (-1,) + (1,) * (len(p.shape) - 1) for view_func in [_channel_view, _layer_view]: param_view = view_func(p) grad_view = view_func(grad) cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() # FIXME this is a problem for PyTorch XLA if cosine_sim.max() < delta / math.sqrt(param_view.size(1)): p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size) perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size) wd = wd_ratio return perturb, wd return perturb, wd class AdamP(Optimizer): def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False, ): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, delta=delta, wd_ratio=wd_ratio, nesterov=nesterov, ) super(AdamP, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad beta1, beta2 = group['betas'] nesterov = group['nesterov'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) # Adam exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) step_size = group['lr'] / bias_correction1 if nesterov: perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom else: perturb = exp_avg / denom # Projection wd_ratio = 1. if len(p.shape) > 1: perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps']) # Weight decay if group['weight_decay'] > 0: p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio) # Step p.add_(perturb, alpha=-step_size) return loss
pytorch-image-models/timm/optim/adamp.py/0
{ "file_path": "pytorch-image-models/timm/optim/adamp.py", "repo_id": "pytorch-image-models", "token_count": 2028 }
248
"""RAdam Optimizer. Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 NOTE: This impl has been deprecated in favour of torch.optim.RAdam and remains as a reference """ import math import torch from torch.optim.optimizer import Optimizer class RAdamLegacy(Optimizer): """ PyTorch RAdam optimizer NOTE: This impl has been deprecated in favour of torch.optim.AdamW and remains as a reference """ def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, ): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)] ) super(RAdamLegacy, self).__init__(params, defaults) def __setstate__(self, state): super(RAdamLegacy, self).__setstate__(state) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_fp32 = p.float() state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_fp32) state['exp_avg_sq'] = torch.zeros_like(p_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) state['step'] += 1 buffered = group['buffer'][int(state['step'] % 10)] if state['step'] == buffered[0]: num_sma, step_size = buffered[1], buffered[2] else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] num_sma_max = 2 / (1 - beta2) - 1 num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = num_sma # more conservative since it's an approximated value if num_sma >= 5: step_size = group['lr'] * math.sqrt( (1 - beta2_t) * (num_sma - 4) / (num_sma_max - 4) * (num_sma - 2) / num_sma * num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) else: step_size = group['lr'] / (1 - beta1 ** state['step']) buffered[2] = step_size if group['weight_decay'] != 0: p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) # more conservative since it's an approximated value if num_sma >= 5: denom = exp_avg_sq.sqrt().add_(group['eps']) p_fp32.addcdiv_(exp_avg, denom, value=-step_size) else: p_fp32.add_(exp_avg, alpha=-step_size) p.copy_(p_fp32) return loss
pytorch-image-models/timm/optim/radam.py/0
{ "file_path": "pytorch-image-models/timm/optim/radam.py", "repo_id": "pytorch-image-models", "token_count": 2159 }
249
import fnmatch import re from collections import OrderedDict from typing import Union, Optional, List import torch class AttentionExtract(torch.nn.Module): # defaults should cover a significant number of timm models with attention maps. default_node_names = ['*attn.softmax'] default_module_names = ['*attn_drop'] def __init__( self, model: Union[torch.nn.Module], names: Optional[List[str]] = None, mode: str = 'eval', method: str = 'fx', hook_type: str = 'forward', use_regex: bool = False, ): """ Extract attention maps (or other activations) from a model by name. Args: model: Instantiated model to extract from. names: List of concrete or wildcard names to extract. Names are nodes for fx and modules for hooks. mode: 'train' or 'eval' model mode. method: 'fx' or 'hook' extraction method. hook_type: 'forward' or 'forward_pre' hooks used. use_regex: Use regex instead of fnmatch """ super().__init__() assert mode in ('train', 'eval') if mode == 'train': model = model.train() else: model = model.eval() assert method in ('fx', 'hook') if method == 'fx': # names are activation node names from timm.models._features_fx import get_graph_node_names, GraphExtractNet node_names = get_graph_node_names(model)[0 if mode == 'train' else 1] names = names or self.default_node_names if use_regex: regexes = [re.compile(r) for r in names] matched = [g for g in node_names if any([r.match(g) for r in regexes])] else: matched = [g for g in node_names if any([fnmatch.fnmatch(g, n) for n in names])] if not matched: raise RuntimeError(f'No node names found matching {names}.') self.model = GraphExtractNet(model, matched, return_dict=True) self.hooks = None else: # names are module names assert hook_type in ('forward', 'forward_pre') from timm.models._features import FeatureHooks module_names = [n for n, m in model.named_modules()] names = names or self.default_module_names if use_regex: regexes = [re.compile(r) for r in names] matched = [m for m in module_names if any([r.match(m) for r in regexes])] else: matched = [m for m in module_names if any([fnmatch.fnmatch(m, n) for n in names])] if not matched: raise RuntimeError(f'No module names found matching {names}.') self.model = model self.hooks = FeatureHooks(matched, model.named_modules(), default_hook_type=hook_type) self.names = matched self.mode = mode self.method = method def forward(self, x): if self.hooks is not None: self.model(x) output = self.hooks.get_output(device=x.device) else: output = self.model(x) return output
pytorch-image-models/timm/utils/attention_extract.py/0
{ "file_path": "pytorch-image-models/timm/utils/attention_extract.py", "repo_id": "pytorch-image-models", "token_count": 1467 }
250
#!/usr/bin/env python3 """ ImageNet Training Script This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet training results with some of the latest networks and training techniques. It favours canonical PyTorch and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit. This script was started from an early version of the PyTorch ImageNet example (https://github.com/pytorch/examples/tree/master/imagenet) NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples (https://github.com/NVIDIA/apex/tree/master/examples/imagenet) Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) """ import argparse import copy import importlib import json import logging import os import time from collections import OrderedDict from contextlib import suppress from datetime import datetime from functools import partial import torch import torch.nn as nn import torchvision.utils import yaml from torch.nn.parallel import DistributedDataParallel as NativeDDP from timm import utils from timm.data import create_dataset, create_loader, create_naflex_loader, resolve_data_config, \ Mixup, FastCollateMixup, AugMixDataset from timm.layers import convert_splitbn_model, convert_sync_batchnorm, set_fast_norm from timm.loss import JsdCrossEntropy, SoftTargetCrossEntropy, BinaryCrossEntropy, LabelSmoothingCrossEntropy from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint, model_parameters from timm.optim import create_optimizer_v2, optimizer_kwargs from timm.scheduler import create_scheduler_v2, scheduler_kwargs from timm.utils import ApexScaler, NativeScaler try: from apex import amp from apex.parallel import DistributedDataParallel as ApexDDP from apex.parallel import convert_syncbn_model has_apex = True except ImportError: has_apex = False try: import wandb has_wandb = True except ImportError: has_wandb = False try: from functorch.compile import memory_efficient_fusion has_functorch = True except ImportError as e: has_functorch = False has_compile = hasattr(torch, 'compile') _logger = logging.getLogger('train') # The first arg parser parses out only the --config argument, this argument is used to # load a yaml file containing key-values that override the defaults for the main parser below config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) parser.add_argument('-c', '--config', default='', type=str, metavar='FILE', help='YAML config file specifying default arguments') parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') # Dataset parameters group = parser.add_argument_group('Dataset parameters') # Keep this argument outside the dataset group because it is positional. parser.add_argument('data', nargs='?', metavar='DIR', const=None, help='path to dataset (positional is *deprecated*, use --data-dir)') group.add_argument('--data-dir', metavar='DIR', help='path to dataset (root dir)') group.add_argument('--dataset', metavar='NAME', default='', help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)') group.add_argument('--train-split', metavar='NAME', default='train', help='dataset train split (default: train)') group.add_argument('--val-split', metavar='NAME', default='validation', help='dataset validation split (default: validation)') group.add_argument('--train-num-samples', default=None, type=int, metavar='N', help='Manually specify num samples in train split, for IterableDatasets.') group.add_argument('--val-num-samples', default=None, type=int, metavar='N', help='Manually specify num samples in validation split, for IterableDatasets.') group.add_argument('--dataset-download', action='store_true', default=False, help='Allow download of dataset for torch/ and tfds/ datasets that support it.') group.add_argument('--class-map', default='', type=str, metavar='FILENAME', help='path to class to idx mapping file (default: "")') group.add_argument('--input-img-mode', default=None, type=str, help='Dataset image conversion mode for input images.') group.add_argument('--input-key', default=None, type=str, help='Dataset key for input images.') group.add_argument('--target-key', default=None, type=str, help='Dataset key for target labels.') group.add_argument('--dataset-trust-remote-code', action='store_true', default=False, help='Allow huggingface dataset import to execute code downloaded from the dataset\'s repo.') # Model parameters group = parser.add_argument_group('Model parameters') group.add_argument('--model', default='resnet50', type=str, metavar='MODEL', help='Name of model to train (default: "resnet50")') group.add_argument('--pretrained', action='store_true', default=False, help='Start with pretrained version of specified network (if avail)') group.add_argument('--pretrained-path', default=None, type=str, help='Load this checkpoint as if they were the pretrained weights (with adaptation).') group.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH', help='Load this checkpoint into model after initialization (default: none)') group.add_argument('--resume', default='', type=str, metavar='PATH', help='Resume full model and optimizer state from checkpoint (default: none)') group.add_argument('--no-resume-opt', action='store_true', default=False, help='prevent resume of optimizer state when resuming model') group.add_argument('--num-classes', type=int, default=None, metavar='N', help='number of label classes (Model default if None)') group.add_argument('--gp', default=None, type=str, metavar='POOL', help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') group.add_argument('--img-size', type=int, default=None, metavar='N', help='Image size (default: None => model default)') group.add_argument('--in-chans', type=int, default=None, metavar='N', help='Image input channels (default: None => 3)') group.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') group.add_argument('--crop-pct', default=None, type=float, metavar='N', help='Input image center crop percent (for validation only)') group.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') group.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of dataset') group.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') group.add_argument('-b', '--batch-size', type=int, default=128, metavar='N', help='Input batch size for training (default: 128)') group.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N', help='Validation batch size override (default: None)') group.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') group.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") group.add_argument('--grad-accum-steps', type=int, default=1, metavar='N', help='The number of steps to accumulate gradients (default: 1)') group.add_argument('--grad-checkpointing', action='store_true', default=False, help='Enable gradient checkpointing through model blocks/stages') group.add_argument('--fast-norm', default=False, action='store_true', help='enable experimental fast-norm') group.add_argument('--model-kwargs', nargs='*', default={}, action=utils.ParseKwargs) group.add_argument('--head-init-scale', default=None, type=float, help='Head initialization scale') group.add_argument('--head-init-bias', default=None, type=float, help='Head initialization bias value') group.add_argument('--torchcompile-mode', type=str, default=None, help="torch.compile mode (default: None).") # scripting / codegen scripting_group = group.add_mutually_exclusive_group() scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true', help='torch.jit.script the full model') scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor', help="Enable compilation w/ specified backend (default: inductor).") # Device & distributed group = parser.add_argument_group('Device parameters') group.add_argument('--device', default='cuda', type=str, help="Device (accelerator) to use.") group.add_argument('--amp', action='store_true', default=False, help='use NVIDIA Apex AMP or Native AMP for mixed precision training') group.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16)') group.add_argument('--amp-impl', default='native', type=str, help='AMP impl to use, "native" or "apex" (default: native)') group.add_argument('--model-dtype', default=None, type=str, help='Model dtype override (non-AMP) (default: float32)') group.add_argument('--no-ddp-bb', action='store_true', default=False, help='Force broadcast buffers for native DDP to off.') group.add_argument('--synchronize-step', action='store_true', default=False, help='torch.cuda.synchronize() end of each step') group.add_argument("--local_rank", default=0, type=int) group.add_argument('--device-modules', default=None, type=str, nargs='+', help="Python imports for device backend modules.") # Optimizer parameters group = parser.add_argument_group('Optimizer parameters') group.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', help='Optimizer (default: "sgd")') group.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: None, use opt default)') group.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') group.add_argument('--momentum', type=float, default=0.9, metavar='M', help='Optimizer momentum (default: 0.9)') group.add_argument('--weight-decay', type=float, default=2e-5, help='weight decay (default: 2e-5)') group.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') group.add_argument('--clip-mode', type=str, default='norm', help='Gradient clipping mode. One of ("norm", "value", "agc")') group.add_argument('--layer-decay', type=float, default=None, help='layer-wise learning rate decay (default: None)') group.add_argument('--layer-decay-min-scale', type=float, default=0, help='layer-wise lr decay minimum scale clamp (default: 0)') group.add_argument('--layer-decay-no-opt-scale', type=float, default=None, help='layer-wise lr decay no optimization scale (default: None)') group.add_argument('--opt-kwargs', nargs='*', default={}, action=utils.ParseKwargs) # Learning rate schedule parameters group = parser.add_argument_group('Learning rate schedule parameters') group.add_argument('--sched', type=str, default='cosine', metavar='SCHEDULER', help='LR scheduler (default: "cosine"') group.add_argument('--sched-on-updates', action='store_true', default=False, help='Apply LR scheduler step on update instead of epoch end.') group.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate, overrides lr-base if set (default: None)') group.add_argument('--lr-base', type=float, default=0.1, metavar='LR', help='base learning rate: lr = lr_base * global_batch_size / base_size') group.add_argument('--lr-base-size', type=int, default=256, metavar='DIV', help='base learning rate batch size (divisor, default: 256).') group.add_argument('--lr-base-scale', type=str, default='', metavar='SCALE', help='base learning rate vs batch_size scaling ("linear", "sqrt", based on opt if empty)') group.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages') group.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)') group.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)') group.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT', help='learning rate cycle len multiplier (default: 1.0)') group.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT', help='amount to decay each learning rate cycle (default: 0.5)') group.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N', help='learning rate cycle limit, cycles enabled if > 1') group.add_argument('--lr-k-decay', type=float, default=1.0, help='learning rate k-decay for cosine/poly (default: 1.0)') group.add_argument('--warmup-lr', type=float, default=1e-5, metavar='LR', help='warmup learning rate (default: 1e-5)') group.add_argument('--min-lr', type=float, default=0, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (default: 0)') group.add_argument('--epochs', type=int, default=300, metavar='N', help='number of epochs to train (default: 300)') group.add_argument('--epoch-repeats', type=float, default=0., metavar='N', help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).') group.add_argument('--start-epoch', default=None, type=int, metavar='N', help='manual epoch number (useful on restarts)') group.add_argument('--decay-milestones', default=[90, 180, 270], type=int, nargs='+', metavar="MILESTONES", help='list of decay epoch indices for multistep lr. must be increasing') group.add_argument('--decay-epochs', type=float, default=90, metavar='N', help='epoch interval to decay LR') group.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') group.add_argument('--warmup-prefix', action='store_true', default=False, help='Exclude warmup period from decay schedule.'), group.add_argument('--cooldown-epochs', type=int, default=0, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends') group.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10)') group.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)') # Augmentation & regularization parameters group = parser.add_argument_group('Augmentation and regularization parameters') group.add_argument('--no-aug', action='store_true', default=False, help='Disable all training augmentation, override other train aug args') group.add_argument('--train-crop-mode', type=str, default=None, help='Crop-mode in train'), group.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT', help='Random resize scale (default: 0.08 1.0)') group.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO', help='Random resize aspect ratio (default: 0.75 1.33)') group.add_argument('--hflip', type=float, default=0.5, help='Horizontal flip training aug probability') group.add_argument('--vflip', type=float, default=0., help='Vertical flip training aug probability') group.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') group.add_argument('--color-jitter-prob', type=float, default=None, metavar='PCT', help='Probability of applying any color jitter.') group.add_argument('--grayscale-prob', type=float, default=None, metavar='PCT', help='Probability of applying random grayscale conversion.') group.add_argument('--gaussian-blur-prob', type=float, default=None, metavar='PCT', help='Probability of applying gaussian blur.') group.add_argument('--aa', type=str, default=None, metavar='NAME', help='Use AutoAugment policy. "v0" or "original". (default: None)'), group.add_argument('--aug-repeats', type=float, default=0, help='Number of augmentation repetitions (distributed training only) (default: 0)') group.add_argument('--aug-splits', type=int, default=0, help='Number of augmentation splits (default: 0, valid: 0 or >=2)') group.add_argument('--jsd-loss', action='store_true', default=False, help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.') group.add_argument('--bce-loss', action='store_true', default=False, help='Enable BCE loss w/ Mixup/CutMix use.') group.add_argument('--bce-sum', action='store_true', default=False, help='Sum over classes when using BCE loss.') group.add_argument('--bce-target-thresh', type=float, default=None, help='Threshold for binarizing softened BCE targets (default: None, disabled).') group.add_argument('--bce-pos-weight', type=float, default=None, help='Positive weighting for BCE loss.') group.add_argument('--reprob', type=float, default=0., metavar='PCT', help='Random erase prob (default: 0.)') group.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') group.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') group.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') group.add_argument('--mixup', type=float, default=0.0, help='mixup alpha, mixup enabled if > 0. (default: 0.)') group.add_argument('--cutmix', type=float, default=0.0, help='cutmix alpha, cutmix enabled if > 0. (default: 0.)') group.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') group.add_argument('--mixup-prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') group.add_argument('--mixup-switch-prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') group.add_argument('--mixup-mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') group.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N', help='Turn off mixup after this epoch, disabled if 0 (default: 0)') group.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') group.add_argument('--train-interpolation', type=str, default='random', help='Training interpolation (random, bilinear, bicubic default: "random")') group.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') group.add_argument('--drop-connect', type=float, default=None, metavar='PCT', help='Drop connect rate, DEPRECATED, use drop-path (default: None)') group.add_argument('--drop-path', type=float, default=None, metavar='PCT', help='Drop path rate (default: None)') group.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)') # Batch norm parameters (only works with gen_efficientnet based models currently) group = parser.add_argument_group('Batch norm parameters', 'Only works with gen_efficientnet based models currently.') group.add_argument('--bn-momentum', type=float, default=None, help='BatchNorm momentum override (if not None)') group.add_argument('--bn-eps', type=float, default=None, help='BatchNorm epsilon override (if not None)') group.add_argument('--sync-bn', action='store_true', help='Enable NVIDIA Apex or Torch synchronized BatchNorm.') group.add_argument('--dist-bn', type=str, default='reduce', help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")') group.add_argument('--split-bn', action='store_true', help='Enable separate BN layers per augmentation split.') # Model Exponential Moving Average group = parser.add_argument_group('Model exponential moving average parameters') group.add_argument('--model-ema', action='store_true', default=False, help='Enable tracking moving average of model weights.') group.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') group.add_argument('--model-ema-decay', type=float, default=0.9998, help='Decay factor for model weights moving average (default: 0.9998)') group.add_argument('--model-ema-warmup', action='store_true', help='Enable warmup for model EMA decay.') # Misc group = parser.add_argument_group('Miscellaneous parameters') group.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 42)') group.add_argument('--worker-seeding', type=str, default='all', help='worker seed mode (default: all)') group.add_argument('--log-interval', type=int, default=50, metavar='N', help='how many batches to wait before logging training status') group.add_argument('--recovery-interval', type=int, default=0, metavar='N', help='how many batches to wait before writing recovery checkpoint') group.add_argument('--checkpoint-hist', type=int, default=10, metavar='N', help='number of checkpoints to keep (default: 10)') group.add_argument('-j', '--workers', type=int, default=4, metavar='N', help='how many training processes to use (default: 4)') group.add_argument('--save-images', action='store_true', default=False, help='save images of input batches every log interval for debugging') group.add_argument('--pin-mem', action='store_true', default=False, help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') group.add_argument('--no-prefetcher', action='store_true', default=False, help='disable fast prefetcher') group.add_argument('--output', default='', type=str, metavar='PATH', help='path to output folder (default: none, current dir)') group.add_argument('--experiment', default='', type=str, metavar='NAME', help='name of train experiment, name of sub-folder for output') group.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC', help='Best metric (default: "top1"') group.add_argument('--tta', type=int, default=0, metavar='N', help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)') group.add_argument('--use-multi-epochs-loader', action='store_true', default=False, help='use the multi-epochs-loader to save time at the beginning of every epoch') group.add_argument('--log-wandb', action='store_true', default=False, help='log training and validation metrics to wandb') group.add_argument('--wandb-project', default=None, type=str, help='wandb project name') group.add_argument('--wandb-tags', default=[], type=str, nargs='+', help='wandb tags') group.add_argument('--wandb-resume-id', default='', type=str, metavar='ID', help='If resuming a run, the id of the run in wandb') # NaFlex scheduled loader arguments group.add_argument('--naflex-loader', action='store_true', default=False, help='Use NaFlex loader (Requires NaFlex compatible model)') group.add_argument('--naflex-train-seq-lens', type=int, nargs='+', default=[128, 256, 576, 784, 1024], help='Sequence lengths to use for NaFlex loader') group.add_argument('--naflex-max-seq-len', type=int, default=576, help='Fixed maximum sequence length for NaFlex loader (validation)') group.add_argument('--naflex-patch-sizes', type=int, nargs='+', default=None, help='List of patch sizes for variable patch size training (e.g., 8 12 16 24 32)') group.add_argument('--naflex-patch-size-probs', type=float, nargs='+', default=None, help='Probabilities for each patch size (must sum to 1.0, uniform if not specified)') group.add_argument('--naflex-loss-scale', default='linear', type=str, help='Scale loss (gradient) by batch_size ("none", "sqrt", or "linear")') def _parse_args(): # Do we have a config file to parse? args_config, remaining = config_parser.parse_known_args() if args_config.config: with open(args_config.config, 'r') as f: cfg = yaml.safe_load(f) parser.set_defaults(**cfg) # The main arg parser parses the rest of the args, the usual # defaults will have been overridden if config file specified. args = parser.parse_args(remaining) # Cache the args as a text string to save them in the output dir later args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) return args, args_text def main(): utils.setup_default_logging() args, args_text = _parse_args() if args.device_modules: for module in args.device_modules: importlib.import_module(module) if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True args.prefetcher = not args.no_prefetcher args.grad_accum_steps = max(1, args.grad_accum_steps) device = utils.init_distributed_device(args) if args.distributed: _logger.info( 'Training in distributed mode with multiple processes, 1 device per process.' f'Process {args.rank}, total {args.world_size}, device {args.device}.') else: _logger.info(f'Training with a single process on 1 device ({args.device}).') assert args.rank >= 0 model_dtype = None if args.model_dtype: assert args.model_dtype in ('float32', 'float16', 'bfloat16') model_dtype = getattr(torch, args.model_dtype) if model_dtype == torch.float16: _logger.warning('float16 is not recommended for training, for half precision bfloat16 is recommended.') # resolve AMP arguments based on PyTorch / Apex availability use_amp = None amp_dtype = torch.float16 if args.amp: assert model_dtype is None or model_dtype == torch.float32, 'float32 model dtype must be used with AMP' if args.amp_impl == 'apex': assert has_apex, 'AMP impl specified as APEX but APEX is not installed.' use_amp = 'apex' assert args.amp_dtype == 'float16' else: use_amp = 'native' assert args.amp_dtype in ('float16', 'bfloat16') if args.amp_dtype == 'bfloat16': amp_dtype = torch.bfloat16 utils.random_seed(args.seed, args.rank) if args.fuser: utils.set_jit_fuser(args.fuser) if args.fast_norm: set_fast_norm() in_chans = 3 if args.in_chans is not None: in_chans = args.in_chans elif args.input_size is not None: in_chans = args.input_size[0] factory_kwargs = {} if args.pretrained_path: # merge with pretrained_cfg of model, 'file' has priority over 'url' and 'hf_hub'. factory_kwargs['pretrained_cfg_overlay'] = dict( file=args.pretrained_path, num_classes=-1, # force head adaptation ) model = create_model( args.model, pretrained=args.pretrained, in_chans=in_chans, num_classes=args.num_classes, drop_rate=args.drop, drop_path_rate=args.drop_path, drop_block_rate=args.drop_block, global_pool=args.gp, bn_momentum=args.bn_momentum, bn_eps=args.bn_eps, scriptable=args.torchscript, checkpoint_path=args.initial_checkpoint, **factory_kwargs, **args.model_kwargs, ) if args.head_init_scale is not None: with torch.no_grad(): model.get_classifier().weight.mul_(args.head_init_scale) model.get_classifier().bias.mul_(args.head_init_scale) if args.head_init_bias is not None: nn.init.constant_(model.get_classifier().bias, args.head_init_bias) if args.num_classes is None: assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly if args.grad_checkpointing: model.set_grad_checkpointing(enable=True) if utils.is_primary(args): _logger.info( f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}') data_config = resolve_data_config(vars(args), model=model, verbose=utils.is_primary(args)) # setup augmentation batch splits for contrastive loss or split bn num_aug_splits = 0 if args.aug_splits > 0: assert args.aug_splits > 1, 'A split of 1 makes no sense' num_aug_splits = args.aug_splits # enable split bn (separate bn stats per batch-portion) if args.split_bn: assert num_aug_splits > 1 or args.resplit model = convert_splitbn_model(model, max(num_aug_splits, 2)) # move model to GPU, enable channels last layout if set model.to(device=device, dtype=model_dtype) # FIXME move model device & dtype into create_model if args.channels_last: model.to(memory_format=torch.channels_last) # setup synchronized BatchNorm for distributed training if args.distributed and args.sync_bn: args.dist_bn = '' # disable dist_bn when sync BN active assert not args.split_bn if has_apex and use_amp == 'apex': # Apex SyncBN used with Apex AMP # WARNING this won't currently work with models using BatchNormAct2d model = convert_syncbn_model(model) else: model = convert_sync_batchnorm(model) if utils.is_primary(args): _logger.info( 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using ' 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.') model_patch_size = None if args.naflex_loader: # NaFlexVit models have embeds.patch_size. Needs to be extracted here before mutating the model. model_patch_size = getattr(getattr(model, "embeds", None), "patch_size", None) if args.torchscript: assert not args.torchcompile assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model' assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model' model = torch.jit.script(model) if not args.lr: global_batch_size = args.batch_size * args.world_size * args.grad_accum_steps batch_ratio = global_batch_size / args.lr_base_size if not args.lr_base_scale: on = args.opt.lower() args.lr_base_scale = 'sqrt' if any([o in on for o in ('ada', 'lamb')]) else 'linear' if args.lr_base_scale == 'sqrt': batch_ratio = batch_ratio ** 0.5 args.lr = args.lr_base * batch_ratio if utils.is_primary(args): _logger.info( f'Learning rate ({args.lr}) calculated from base learning rate ({args.lr_base}) ' f'and effective global batch size ({global_batch_size}) with {args.lr_base_scale} scaling.') optimizer = create_optimizer_v2( model, **optimizer_kwargs(cfg=args), **args.opt_kwargs, ) if utils.is_primary(args): defaults = copy.deepcopy(optimizer.defaults) defaults['weight_decay'] = args.weight_decay # this isn't stored in optimizer.defaults defaults = ', '.join([f'{k}: {v}' for k, v in defaults.items()]) logging.info( f'Created {type(optimizer).__name__} ({args.opt}) optimizer: {defaults}' ) # setup automatic mixed-precision (AMP) loss scaling and op casting amp_autocast = suppress # do nothing loss_scaler = None if use_amp == 'apex': assert device.type == 'cuda' model, optimizer = amp.initialize(model, optimizer, opt_level='O1') loss_scaler = ApexScaler() if utils.is_primary(args): _logger.info('Using NVIDIA APEX AMP. Training in mixed precision.') elif use_amp == 'native': amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype) if device.type in ('cuda',) and amp_dtype == torch.float16: # loss scaler only used for float16 (half) dtype, bfloat16 does not need it loss_scaler = NativeScaler(device=device.type) if utils.is_primary(args): _logger.info('Using native Torch AMP. Training in mixed precision.') else: if utils.is_primary(args): _logger.info(f'AMP not enabled. Training in {model_dtype or torch.float32}.') # optionally resume from a checkpoint resume_epoch = None if args.resume: resume_epoch = resume_checkpoint( model, args.resume, optimizer=None if args.no_resume_opt else optimizer, loss_scaler=None if args.no_resume_opt else loss_scaler, log_info=utils.is_primary(args), ) # setup exponential moving average of model weights, SWA could be used here too model_ema = None if args.model_ema: # Important to create EMA model after cuda(), DP wrapper, and AMP but before DDP wrapper model_ema = utils.ModelEmaV3( model, decay=args.model_ema_decay, use_warmup=args.model_ema_warmup, device='cpu' if args.model_ema_force_cpu else None, ) if args.resume: load_checkpoint(model_ema.module, args.resume, use_ema=True) if args.torchcompile: model_ema = torch.compile( model_ema, backend=args.torchcompile, mode=args.torchcompile_mode, ) # setup distributed training if args.distributed: if has_apex and use_amp == 'apex': # Apex DDP preferred unless native amp is activated if utils.is_primary(args): _logger.info("Using NVIDIA APEX DistributedDataParallel.") model = ApexDDP(model, delay_allreduce=True) else: if utils.is_primary(args): _logger.info("Using native Torch DistributedDataParallel.") model = NativeDDP(model, device_ids=[device], broadcast_buffers=not args.no_ddp_bb) # NOTE: EMA model does not need to be wrapped by DDP if args.torchcompile: # torch compile should be done after DDP assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.' model = torch.compile(model, backend=args.torchcompile, mode=args.torchcompile_mode) # create the train and eval datasets if args.data and not args.data_dir: args.data_dir = args.data if args.input_img_mode is None: input_img_mode = 'RGB' if data_config['input_size'][0] == 3 else 'L' else: input_img_mode = args.input_img_mode dataset_train = create_dataset( args.dataset, root=args.data_dir, split=args.train_split, is_training=True, class_map=args.class_map, download=args.dataset_download, batch_size=args.batch_size, seed=args.seed, repeats=args.epoch_repeats, input_img_mode=input_img_mode, input_key=args.input_key, target_key=args.target_key, num_samples=args.train_num_samples, trust_remote_code=args.dataset_trust_remote_code, ) dataset_eval = None if args.val_split: dataset_eval = create_dataset( args.dataset, root=args.data_dir, split=args.val_split, is_training=False, class_map=args.class_map, download=args.dataset_download, batch_size=args.batch_size, input_img_mode=input_img_mode, input_key=args.input_key, target_key=args.target_key, num_samples=args.val_num_samples, trust_remote_code=args.dataset_trust_remote_code, ) # create data loaders w/ augmentation pipeline train_interpolation = args.train_interpolation if args.no_aug or not train_interpolation: train_interpolation = data_config['interpolation'] # Check if we should use the NaFlex scheduled loader common_loader_kwargs = dict( mean=data_config['mean'], std=data_config['std'], pin_memory=args.pin_mem, img_dtype=model_dtype or torch.float32, device=device, distributed=args.distributed, use_prefetcher=args.prefetcher, ) train_loader_kwargs = dict( batch_size=args.batch_size, is_training=True, no_aug=args.no_aug, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, re_split=args.resplit, train_crop_mode=args.train_crop_mode, scale=args.scale, ratio=args.ratio, hflip=args.hflip, vflip=args.vflip, color_jitter=args.color_jitter, color_jitter_prob=args.color_jitter_prob, grayscale_prob=args.grayscale_prob, gaussian_blur_prob=args.gaussian_blur_prob, auto_augment=args.aa, num_aug_repeats=args.aug_repeats, num_aug_splits=num_aug_splits, interpolation=train_interpolation, num_workers=args.workers, worker_seeding=args.worker_seeding, ) mixup_fn = None mixup_args = {} mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: mixup_args = dict( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.num_classes ) naflex_mode = False if args.naflex_loader: if utils.is_primary(args): _logger.info('Using NaFlex loader') assert num_aug_splits <= 1, 'Augmentation splits not supported in NaFlex mode' naflex_mixup_fn = None if mixup_active: from timm.data import NaFlexMixup mixup_args.pop('mode') # not supported mixup_args.pop('cutmix_minmax') # not supported naflex_mixup_fn = NaFlexMixup(**mixup_args) # Check if we have model's patch size for NaFlex mode if model_patch_size is None: # Fallback to default model_patch_size = (16, 16) if utils.is_primary(args): _logger.warning(f'Could not determine model patch size, using default: {model_patch_size}') # Configure patch sizes for NaFlex loader patch_loader_kwargs = {} if args.naflex_patch_sizes: # Variable patch size mode patch_loader_kwargs['patch_size_choices'] = args.naflex_patch_sizes if args.naflex_patch_size_probs: if len(args.naflex_patch_size_probs) != len(args.naflex_patch_sizes): parser.error('--naflex-patch-size-probs must have same length as --naflex-patch-sizes') patch_loader_kwargs['patch_size_choice_probs'] = args.naflex_patch_size_probs if utils.is_primary(args): _logger.info(f'Using variable patch sizes: {args.naflex_patch_sizes}') else: # Single patch size mode - use model's patch size patch_loader_kwargs['patch_size'] = model_patch_size if utils.is_primary(args): _logger.info(f'Using model patch size: {model_patch_size}') naflex_mode = True loader_train = create_naflex_loader( dataset=dataset_train, train_seq_lens=args.naflex_train_seq_lens, mixup_fn=naflex_mixup_fn, rank=args.rank, world_size=args.world_size, **patch_loader_kwargs, **common_loader_kwargs, **train_loader_kwargs, ) else: # setup mixup / cutmix collate_fn = None if mixup_active: if args.prefetcher: assert not num_aug_splits # collate conflict (need to support de-interleaving in collate mixup) collate_fn = FastCollateMixup(**mixup_args) else: mixup_fn = Mixup(**mixup_args) # wrap dataset in AugMix helper if num_aug_splits > 1: dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits) # Use standard loader loader_train = create_loader( dataset_train, input_size=data_config['input_size'], collate_fn=collate_fn, use_multi_epochs_loader=args.use_multi_epochs_loader, **common_loader_kwargs, **train_loader_kwargs, ) loader_eval = None if args.val_split: assert dataset_eval is not None eval_workers = args.workers if args.distributed and ('tfds' in args.dataset or 'wds' in args.dataset): # FIXME reduces validation padding issues when using TFDS, WDS w/ workers and distributed training eval_workers = min(2, args.workers) eval_loader_kwargs = dict( batch_size=args.validation_batch_size or args.batch_size, is_training=False, interpolation=data_config['interpolation'], num_workers=eval_workers, crop_pct=data_config['crop_pct'], ) if args.naflex_loader: # Use largest sequence length for validation loader_eval = create_naflex_loader( dataset=dataset_eval, patch_size=model_patch_size, # Use model's native patch size (already determined above) max_seq_len=args.naflex_max_seq_len, **common_loader_kwargs, **eval_loader_kwargs ) else: # Use standard loader loader_eval = create_loader( dataset_eval, input_size=data_config['input_size'], **common_loader_kwargs, **eval_loader_kwargs, ) # setup loss function if args.jsd_loss: assert num_aug_splits > 1 # JSD only valid with aug splits set train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing) elif mixup_active: # smoothing is handled with mixup target transform which outputs sparse, soft targets if args.bce_loss: train_loss_fn = BinaryCrossEntropy( target_threshold=args.bce_target_thresh, sum_classes=args.bce_sum, pos_weight=args.bce_pos_weight, ) else: train_loss_fn = SoftTargetCrossEntropy() elif args.smoothing: if args.bce_loss: train_loss_fn = BinaryCrossEntropy( smoothing=args.smoothing, target_threshold=args.bce_target_thresh, sum_classes=args.bce_sum, pos_weight=args.bce_pos_weight, ) else: train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: train_loss_fn = nn.CrossEntropyLoss() train_loss_fn = train_loss_fn.to(device=device) validate_loss_fn = nn.CrossEntropyLoss().to(device=device) # setup checkpoint saver and eval metric tracking eval_metric = args.eval_metric if loader_eval is not None else 'loss' decreasing_metric = eval_metric == 'loss' best_metric = None best_epoch = None saver = None output_dir = None if utils.is_primary(args): if args.experiment: exp_name = args.experiment else: exp_name = '-'.join([ datetime.now().strftime("%Y%m%d-%H%M%S"), safe_model_name(args.model), str(data_config['input_size'][-1]) ]) output_dir = utils.get_outdir(args.output if args.output else './output/train', exp_name) saver = utils.CheckpointSaver( model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler, checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing_metric, max_history=args.checkpoint_hist ) with open(os.path.join(output_dir, 'args.yaml'), 'w') as f: f.write(args_text) if args.log_wandb: if has_wandb: assert not args.wandb_resume_id or args.resume wandb.init( project=args.wandb_project, name=exp_name, config=args, tags=args.wandb_tags, resume="must" if args.wandb_resume_id else None, id=args.wandb_resume_id if args.wandb_resume_id else None, ) else: _logger.warning( "You've requested to log metrics to wandb but package not found. " "Metrics not being logged to wandb, try `pip install wandb`") # setup learning rate schedule and starting epoch updates_per_epoch = (len(loader_train) + args.grad_accum_steps - 1) // args.grad_accum_steps lr_scheduler, num_epochs = create_scheduler_v2( optimizer, **scheduler_kwargs(args, decreasing_metric=decreasing_metric), updates_per_epoch=updates_per_epoch, ) start_epoch = 0 if args.start_epoch is not None: # a specified start_epoch will always override the resume epoch start_epoch = args.start_epoch elif resume_epoch is not None: start_epoch = resume_epoch if lr_scheduler is not None and start_epoch > 0: if args.sched_on_updates: lr_scheduler.step_update(start_epoch * updates_per_epoch) else: lr_scheduler.step(start_epoch) if utils.is_primary(args): if args.warmup_prefix: sched_explain = '(warmup_epochs + epochs + cooldown_epochs). Warmup added to total when warmup_prefix=True' else: sched_explain = '(epochs + cooldown_epochs). Warmup within epochs when warmup_prefix=False' _logger.info( f'Scheduled epochs: {num_epochs} {sched_explain}. ' f'LR stepped per {"epoch" if lr_scheduler.t_in_epochs else "update"}.') results = [] try: for epoch in range(start_epoch, num_epochs): if hasattr(dataset_train, 'set_epoch'): dataset_train.set_epoch(epoch) elif args.distributed and hasattr(loader_train.sampler, 'set_epoch'): loader_train.sampler.set_epoch(epoch) train_metrics = train_one_epoch( epoch, model, loader_train, optimizer, train_loss_fn, args, device=device, lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir, amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_dtype=model_dtype, model_ema=model_ema, mixup_fn=mixup_fn, num_updates_total=num_epochs * updates_per_epoch, naflex_mode=naflex_mode, ) if args.distributed and args.dist_bn in ('broadcast', 'reduce'): if utils.is_primary(args): _logger.info("Distributing BatchNorm running means and vars") utils.distribute_bn(model, args.world_size, args.dist_bn == 'reduce') if loader_eval is not None: eval_metrics = validate( model, loader_eval, validate_loss_fn, args, device=device, amp_autocast=amp_autocast, model_dtype=model_dtype, ) if model_ema is not None and not args.model_ema_force_cpu: if args.distributed and args.dist_bn in ('broadcast', 'reduce'): utils.distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce') ema_eval_metrics = validate( model_ema, loader_eval, validate_loss_fn, args, device=device, amp_autocast=amp_autocast, log_suffix=' (EMA)', ) eval_metrics = ema_eval_metrics else: eval_metrics = None if output_dir is not None: lrs = [param_group['lr'] for param_group in optimizer.param_groups] utils.update_summary( epoch, train_metrics, eval_metrics, filename=os.path.join(output_dir, 'summary.csv'), lr=sum(lrs) / len(lrs), write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb, ) if eval_metrics is not None: latest_metric = eval_metrics[eval_metric] else: latest_metric = train_metrics[eval_metric] if saver is not None: # save proper checkpoint with eval metric best_metric, best_epoch = saver.save_checkpoint(epoch, metric=latest_metric) if lr_scheduler is not None: # step LR for next epoch lr_scheduler.step(epoch + 1, latest_metric) latest_results = { 'epoch': epoch, 'train': train_metrics, } if eval_metrics is not None: latest_results['validation'] = eval_metrics results.append(latest_results) except KeyboardInterrupt: pass if best_metric is not None: # log best metric as tracked by checkpoint saver _logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch)) if utils.is_primary(args): # for parsable results display, dump top-10 summaries to avoid excess console spam display_results = sorted( results, key=lambda x: x.get('validation', x.get('train')).get(eval_metric, 0), reverse=decreasing_metric, ) print(f'--result\n{json.dumps(display_results[-10:], indent=4)}') def train_one_epoch( epoch, model, loader, optimizer, loss_fn, args, device=torch.device('cuda'), lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, loss_scaler=None, model_dtype=None, model_ema=None, mixup_fn=None, num_updates_total=None, naflex_mode=False, ): if args.mixup_off_epoch and epoch >= args.mixup_off_epoch: if args.prefetcher and loader.mixup_enabled: loader.mixup_enabled = False elif mixup_fn is not None: mixup_fn.mixup_enabled = False second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order has_no_sync = hasattr(model, "no_sync") update_time_m = utils.AverageMeter() data_time_m = utils.AverageMeter() losses_m = utils.AverageMeter() model.train() accum_steps = args.grad_accum_steps last_accum_steps = len(loader) % accum_steps updates_per_epoch = (len(loader) + accum_steps - 1) // accum_steps num_updates = epoch * updates_per_epoch last_batch_idx = len(loader) - 1 last_batch_idx_to_accum = len(loader) - last_accum_steps data_start_time = update_start_time = time.time() optimizer.zero_grad() update_sample_count = 0 for batch_idx, (input, target) in enumerate(loader): last_batch = batch_idx == last_batch_idx need_update = last_batch or (batch_idx + 1) % accum_steps == 0 update_idx = batch_idx // accum_steps if batch_idx >= last_batch_idx_to_accum: accum_steps = last_accum_steps if not args.prefetcher: input, target = input.to(device=device, dtype=model_dtype), target.to(device=device) if mixup_fn is not None: input, target = mixup_fn(input, target) if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) # multiply by accum steps to get equivalent for full update data_time_m.update(accum_steps * (time.time() - data_start_time)) def _forward(): with amp_autocast(): output = model(input) _loss = loss_fn(output, target) if accum_steps > 1: _loss /= accum_steps return _loss def _backward(_loss): if loss_scaler is not None: loss_scaler( _loss, optimizer, clip_grad=args.clip_grad, clip_mode=args.clip_mode, parameters=model_parameters(model, exclude_head='agc' in args.clip_mode), create_graph=second_order, need_update=need_update, ) else: _loss.backward(create_graph=second_order) if need_update: if args.clip_grad is not None: utils.dispatch_clip_grad( model_parameters(model, exclude_head='agc' in args.clip_mode), value=args.clip_grad, mode=args.clip_mode, ) optimizer.step() if naflex_mode: assert isinstance(input, dict) batch_size = input['patches'].shape[0] # scale gradient vs the minimum batch size (for max seq len) if not args.naflex_loss_scale or args.naflex_loss_scale == 'none': local_scale = 1.0 else: local_scale = (batch_size / args.batch_size) if local_scale == 'sqrt': local_scale = local_scale ** 0.5 if args.distributed: # scale gradient btw distributed ranks, each one can have different batch size global_batch_size = utils.reduce_tensor( torch.tensor(batch_size, device=device, dtype=torch.float32), 1 # SUM ) dist_scale = args.world_size * batch_size / global_batch_size else: dist_scale = None global_batch_size = batch_size if has_no_sync and not need_update: with model.no_sync(): loss = _forward() scaled_loss = local_scale * loss if dist_scale is not None: scaled_loss *= dist_scale _backward(scaled_loss) else: loss = _forward() scaled_loss = local_scale * loss if dist_scale is not None: scaled_loss *= dist_scale _backward(scaled_loss) else: global_batch_size = batch_size = input.shape[0] if args.distributed: global_batch_size *= args.world_size if has_no_sync and not need_update: with model.no_sync(): loss = _forward() _backward(loss) else: loss = _forward() _backward(loss) losses_m.update(loss.item() * accum_steps, batch_size) update_sample_count += global_batch_size if not need_update: data_start_time = time.time() continue num_updates += 1 optimizer.zero_grad() if model_ema is not None: model_ema.update(model, step=num_updates) if args.synchronize_step: if device.type == 'cuda': torch.cuda.synchronize() elif device.type == 'npu': torch.npu.synchronize() time_now = time.time() update_time_m.update(time.time() - update_start_time) update_start_time = time_now if update_idx % args.log_interval == 0: lrl = [param_group['lr'] for param_group in optimizer.param_groups] lr = sum(lrl) / len(lrl) loss_avg, loss_now = losses_m.avg, losses_m.val if args.distributed: # synchronize current step and avg loss, each process keeps its own running avg loss_avg = utils.reduce_tensor(loss.new([loss_avg]), args.world_size).item() loss_now = utils.reduce_tensor(loss.new([loss_now]), args.world_size).item() if utils.is_primary(args): _logger.info( f'Train: {epoch} [{update_idx:>4d}/{updates_per_epoch} ' f'({100. * (update_idx + 1) / updates_per_epoch:>3.0f}%)] ' f'Loss: {loss_now:#.3g} ({loss_avg:#.3g}) ' f'Time: {update_time_m.val:.3f}s, {update_sample_count / update_time_m.val:>7.2f}/s ' f'({update_time_m.avg:.3f}s, {update_sample_count / update_time_m.avg:>7.2f}/s) ' f'LR: {lr:.3e} ' f'Data: {data_time_m.val:.3f} ({data_time_m.avg:.3f})' ) if args.save_images and output_dir: torchvision.utils.save_image( input, os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx), padding=0, normalize=True ) if saver is not None and args.recovery_interval and ( (update_idx + 1) % args.recovery_interval == 0): saver.save_recovery(epoch, batch_idx=update_idx) if lr_scheduler is not None: lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg) update_sample_count = 0 data_start_time = time.time() # end for if hasattr(optimizer, 'sync_lookahead'): optimizer.sync_lookahead() loss_avg = losses_m.avg if args.distributed: # synchronize avg loss, each process keeps its own running avg loss_avg = torch.tensor([loss_avg], device=device, dtype=torch.float32) loss_avg = utils.reduce_tensor(loss_avg, args.world_size).item() return OrderedDict([('loss', loss_avg)]) def validate( model, loader, loss_fn, args, device=torch.device('cuda'), amp_autocast=suppress, model_dtype=None, log_suffix='' ): batch_time_m = utils.AverageMeter() losses_m = utils.AverageMeter() top1_m = utils.AverageMeter() top5_m = utils.AverageMeter() model.eval() end = time.time() last_idx = len(loader) - 1 with torch.inference_mode(): for batch_idx, (input, target) in enumerate(loader): last_batch = batch_idx == last_idx if not args.prefetcher: input = input.to(device=device, dtype=model_dtype) target = target.to(device=device) if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) with amp_autocast(): output = model(input) if isinstance(output, (tuple, list)): output = output[0] # augmentation reduction reduce_factor = args.tta if reduce_factor > 1: output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2) target = target[0:target.size(0):reduce_factor] loss = loss_fn(output, target) acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) if args.distributed: reduced_loss = utils.reduce_tensor(loss.data, args.world_size) acc1 = utils.reduce_tensor(acc1, args.world_size) acc5 = utils.reduce_tensor(acc5, args.world_size) else: reduced_loss = loss.data if device.type == 'cuda': torch.cuda.synchronize() elif device.type == "npu": torch.npu.synchronize() batch_size = output.shape[0] losses_m.update(reduced_loss.item(), batch_size) top1_m.update(acc1.item(), batch_size) top5_m.update(acc5.item(), batch_size) batch_time_m.update(time.time() - end) end = time.time() if utils.is_primary(args) and (last_batch or batch_idx % args.log_interval == 0): log_name = 'Test' + log_suffix _logger.info( f'{log_name}: [{batch_idx:>4d}/{last_idx}] ' f'Time: {batch_time_m.val:.3f} ({batch_time_m.avg:.3f}) ' f'Loss: {losses_m.val:>7.3f} ({losses_m.avg:>6.3f}) ' f'Acc@1: {top1_m.val:>7.3f} ({top1_m.avg:>7.3f}) ' f'Acc@5: {top5_m.val:>7.3f} ({top5_m.avg:>7.3f})' ) metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)]) return metrics if __name__ == '__main__': main()
pytorch-image-models/train.py/0
{ "file_path": "pytorch-image-models/train.py", "repo_id": "pytorch-image-models", "token_count": 29207 }
251
# Built-in Tools Ready-to-use tool implementations provided by the `smolagents` library. These built-in tools are concrete implementations of the [`Tool`] base class, each designed for specific tasks such as web searching, Python code execution, webpage retrieval, and user interaction. You can use these tools directly in your agents without having to implement the underlying functionality yourself. Each tool handles a particular capability and follows a consistent interface, making it easy to compose them into powerful agent workflows. The built-in tools can be categorized by their primary functions: - **Information Retrieval**: Search and retrieve information from the web and specific knowledge sources. - [`ApiWebSearchTool`] - [`DuckDuckGoSearchTool`] - [`GoogleSearchTool`] - [`WebSearchTool`] - [`WikipediaSearchTool`] - **Web Interaction**: Fetch and process content from specific web pages. - [`VisitWebpageTool`] - **Code Execution**: Dynamic execution of Python code for computational tasks. - [`PythonInterpreterTool`] - **User Interaction**: Enable Human-in-the-Loop collaboration between agents and users. - [`UserInputTool`]: Collect input from users. - **Speech Processing**: Convert audio to textual data. - [`SpeechToTextTool`] - **Workflow Control**: Manage and direct the flow of agent operations. - [`FinalAnswerTool`]: Conclude agent workflow with final response. ## ApiWebSearchTool [[autodoc]] smolagents.default_tools.ApiWebSearchTool ## DuckDuckGoSearchTool [[autodoc]] smolagents.default_tools.DuckDuckGoSearchTool ## FinalAnswerTool [[autodoc]] smolagents.default_tools.FinalAnswerTool ## GoogleSearchTool [[autodoc]] smolagents.default_tools.GoogleSearchTool ## PythonInterpreterTool [[autodoc]] smolagents.default_tools.PythonInterpreterTool ## SpeechToTextTool [[autodoc]] smolagents.default_tools.SpeechToTextTool ## UserInputTool [[autodoc]] smolagents.default_tools.UserInputTool ## VisitWebpageTool [[autodoc]] smolagents.default_tools.VisitWebpageTool ## WebSearchTool [[autodoc]] smolagents.default_tools.WebSearchTool ## WikipediaSearchTool [[autodoc]] smolagents.default_tools.WikipediaSearchTool
smolagents/docs/source/en/reference/default_tools.md/0
{ "file_path": "smolagents/docs/source/en/reference/default_tools.md", "repo_id": "smolagents", "token_count": 618 }
252
# `smolagents` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/license_to_call.png" width=100%/> </div> यह लाइब्रेरी पावरफुल एजेंट्स बनाने के लिए सबसे सरल फ्रेमवर्क है! वैसे, "एजेंट्स" हैं क्या? हम अपनी परिभाषा [इस पेज पर](conceptual_guides/intro_agents) प्रदान करते हैं, जहाँ आपको यह भी पता चलेगा कि इन्हें कब उपयोग करें या न करें (स्पॉइलर: आप अक्सर एजेंट्स के बिना बेहतर काम कर सकते हैं)। यह लाइब्रेरी प्रदान करती है: ✨ **सरलता**: Agents का लॉजिक लगभग एक हजार लाइन्स ऑफ़ कोड में समाहित है। हमने रॉ कोड के ऊपर एब्स्ट्रैक्शन को न्यूनतम आकार में रखा है! 🌐 **सभी LLM के लिए सपोर्ट**: यह हब पर होस्ट किए गए मॉडल्स को उनके `transformers` वर्जन में या हमारे इन्फरेंस API के माध्यम से सपोर्ट करता है, साथ ही OpenAI, Anthropic से भी... किसी भी LLM से एजेंट को पावर करना वास्तव में आसान है। 🧑‍💻 **कोड Agents के लिए फर्स्ट-क्लास सपोर्ट**, यानी ऐसे एजेंट्स जो अपनी एक्शन्स को कोड में लिखते हैं (कोड लिखने के लिए उपयोग किए जाने वाले एजेंट्स के विपरीत), [यहाँ और पढ़ें](tutorials/secure_code_execution)। 🤗 **हब इंटीग्रेशन**: आप टूल्स को हब पर शेयर और लोड कर सकते हैं, और आगे और भी बहुत कुछ आने वाला है! ! <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./guided_tour" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">गाइडेड टूर</div> <p class="text-gray-700">बेसिक्स सीखें और एजेंट्स का उपयोग करने में परिचित हों। यदि आप पहली बार एजेंट्स का उपयोग कर रहे हैं तो यहाँ से शुरू करें!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./examples/text_to_sql" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">हाउ-टू गाइड्स</div> <p class="text-gray-700">एक विशिष्ट लक्ष्य प्राप्त करने में मदद के लिए गाइड: SQL क्वेरी जनरेट और टेस्ट करने के लिए एजेंट बनाएं!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual_guides/intro_agents" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">कॉन्सेप्चुअल गाइड्स</div> <p class="text-gray-700">महत्वपूर्ण विषयों की बेहतर समझ बनाने के लिए उच्च-स्तरीय व्याख्याएं।</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/building_good_agents" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">ट्यूटोरियल्स</div> <p class="text-gray-700">एजेंट्स बनाने के महत्वपूर्ण पहलुओं को कवर करने वाले क्ट्यूटोरियल्स।</p> </a> </div> </div>
smolagents/docs/source/hi/index.md/0
{ "file_path": "smolagents/docs/source/hi/index.md", "repo_id": "smolagents", "token_count": 2728 }
253
# 编排 multi-agent 系统 🤖🤝🤖 [[open-in-colab]] 此notebook将构建一个 **multi-agent 网络浏览器:一个有多个代理协作,使用网络进行搜索解决问题的代理系统** `ManagedAgent` 对象将封装这些管理网络搜索的agent,形成一个简单的层次结构: ``` +----------------+ | Manager agent | +----------------+ | _______________|______________ | | Code interpreter +--------------------------------+ tool | Managed agent | | +------------------+ | | | Web Search agent | | | +------------------+ | | | | | | Web Search tool | | | Visit webpage tool | +--------------------------------+ ``` 我们来一起构建这个系统。运行下列代码以安装依赖包: ``` !pip install smolagents[toolkit] --upgrade -q ``` 我们需要登录Hugging Face Hub以调用HF的Inference API: ``` from huggingface_hub import login login() ``` ⚡️ HF的Inference API 可以快速轻松地运行任何开源模型,因此我们的agent将使用HF的Inference API 中的`InferenceClientModel`类来调用 [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct)模型。 _Note:_ 基于多参数和部署模型的 Inference API 可能在没有预先通知的情况下更新或替换模型。了解更多信息,请参阅[这里](https://huggingface.co/docs/api-inference/supported-models)。 ```py model_id = "Qwen/Qwen2.5-Coder-32B-Instruct" ``` ## 🔍 创建网络搜索工具 虽然我们可以使用已经存在的 [`WebSearchTool`] 工具作为谷歌搜索的平替进行网页浏览,然后我们也需要能够查看`WebSearchTool`找到的页面。为此,我 们可以直接导入库的内置 `VisitWebpageTool`。但是我们将重新构建它以了解其工作原理。 我们将使用`markdownify` 来从头构建我们的`VisitWebpageTool`工具。 ```py import re import requests from markdownify import markdownify from requests.exceptions import RequestException from smolagents import tool @tool def visit_webpage(url: str) -> str: """Visits a webpage at the given URL and returns its content as a markdown string. Args: url: The URL of the webpage to visit. Returns: The content of the webpage converted to Markdown, or an error message if the request fails. """ try: # Send a GET request to the URL response = requests.get(url) response.raise_for_status() # Raise an exception for bad status codes # Convert the HTML content to Markdown markdown_content = markdownify(response.text).strip() # Remove multiple line breaks markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content) return markdown_content except RequestException as e: return f"Error fetching the webpage: {str(e)}" except Exception as e: return f"An unexpected error occurred: {str(e)}" ``` 现在我们初始化这个工具并测试它! ```py print(visit_webpage("https://en.wikipedia.org/wiki/Hugging_Face")[:500]) ``` ## 构建我们的 multi-agent 系统 🤖🤝🤖 现在我们有了所有工具`search`和`visit_webpage`,我们可以使用它们来创建web agent。 我们该选取什么样的配置来构建这个agent呢? - 网页浏览是一个单线程任务,不需要并行工具调用,因此JSON工具调用对于这个任务非常有效。因此我们选择`ToolCallingAgent`。 - 有时候网页搜索需要探索许多页面才能找到正确答案,所以我们更喜欢将 `max_steps` 增加到10。 ```py from smolagents import ( CodeAgent, ToolCallingAgent, InferenceClientModel, ManagedAgent, WebSearchTool, LiteLLMModel, ) model = InferenceClientModel(model_id=model_id) web_agent = ToolCallingAgent( tools=[WebSearchTool(), visit_webpage], model=model, max_steps=10, name="search", description="Runs web searches for you. Give it your query as an argument.", ) ``` 请注意,我们为这个代理赋予了 name(名称)和 description(描述)属性,这些是必需属性,以便让管理代理能够调用此代理。 然后,我们创建一个管理代理,在初始化时,将受管代理作为 managed_agents 参数传递给它。 由于这个代理的任务是进行规划和思考,高级推理能力会很有帮助,因此 CodeAgent(代码代理)将是最佳选择。 此外,我们要提出一个涉及当前年份并需要进行额外数据计算的问题:所以让我们添加 additional_authorized_imports=["time", "numpy", "pandas"],以防代理需要用到这些包。 ```py manager_agent = CodeAgent( tools=[], model=model, managed_agents=[web_agent], additional_authorized_imports=["time", "numpy", "pandas"], ) ``` 可以了!现在让我们运行我们的系统!我们选择一个需要一些计算和研究的问题: ```py answer = manager_agent.run("If LLM training continues to scale up at the current rhythm until 2030, what would be the electric power in GW required to power the biggest training runs by 2030? What would that correspond to, compared to some countries? Please provide a source for any numbers used.") ``` 我们用这个report 来回答这个问题: ``` Based on current growth projections and energy consumption estimates, if LLM trainings continue to scale up at the current rhythm until 2030: 1. The electric power required to power the biggest training runs by 2030 would be approximately 303.74 GW, which translates to about 2,660,762 GWh/year. 1. Comparing this to countries' electricity consumption: - It would be equivalent to about 34% of China's total electricity consumption. - It would exceed the total electricity consumption of India (184%), Russia (267%), and Japan (291%). - It would be nearly 9 times the electricity consumption of countries like Italy or Mexico. 2. Source of numbers: - The initial estimate of 5 GW for future LLM training comes from AWS CEO Matt Garman. - The growth projection used a CAGR of 79.80% from market research by Springs. - Country electricity consumption data is from the U.S. Energy Information Administration, primarily for the year 2021. ``` 如果[scaling hypothesis](https://gwern.net/scaling-hypothesis)持续成立的话,我们需要一些庞大的动力配置。我们的agent成功地协作解决了这个任务!✅ 💡 你可以轻松地将这个编排扩展到更多的agent:一个执行代码,一个进行网页搜索,一个处理文件加载⋯⋯
smolagents/docs/source/zh/examples/multiagents.md/0
{ "file_path": "smolagents/docs/source/zh/examples/multiagents.md", "repo_id": "smolagents", "token_count": 3419 }
254
# Async Applications with Agents This example demonstrates how to use a `CodeAgent` from the `smolagents` library in an asynchronous Starlette web application. The agent is executed in a background thread using `anyio.to_thread.run_sync`, allowing you to integrate synchronous agent logic into an async web server. ## Key Concepts - **Starlette**: A lightweight ASGI framework for building async web apps. - **anyio.to_thread.run_sync**: Runs blocking (sync) code in a thread, so it doesn't block the async event loop. - **CodeAgent**: An agent from the `smolagents` library that can be used to solve tasks programmatically. ## How it works - The Starlette app exposes a `/run-agent` endpoint that accepts a JSON payload with a `task` string. - When a request is received, the agent is run in a background thread using `anyio.to_thread.run_sync`. - The result is returned as a JSON response. ## Implementation Note **Why use a background thread?** `CodeAgent.run()` executes Python code synchronously, which would block Starlette's async event loop if called directly. By offloading this synchronous operation to a separate thread with `anyio.to_thread.run_sync`, we maintain the application's responsiveness while the agent processes requests, ensuring optimal performance in high-concurrency scenarios. ## Usage 1. **Install dependencies**: ```bash pip install smolagents starlette anyio uvicorn ``` 2. **Run the app**: ```bash uvicorn async_codeagent_starlette.main:app --reload ``` 3. **Test the endpoint**: ```bash curl -X POST http://localhost:8000/run-agent -H 'Content-Type: application/json' -d '{"task": "What is 2+2?"}' ``` ## Files - `main.py`: Main Starlette application with async endpoint using CodeAgent. - `README.md`: This file. --- This example is designed to be clear and didactic for users new to async Python and agent integration.
smolagents/examples/async_agent/README.md/0
{ "file_path": "smolagents/examples/async_agent/README.md", "repo_id": "smolagents", "token_count": 525 }
255
# Shamelessly stolen from Microsoft Autogen team: thanks to them for this great resource! # https://github.com/microsoft/autogen/blob/gaia_multiagent_v01_march_1st/autogen/browser_utils.py import copy from smolagents.models import MessageRole, Model def prepare_response(original_task: str, inner_messages, reformulation_model: Model) -> str: messages = [ { "role": MessageRole.SYSTEM, "content": [ { "type": "text", "text": f"""Earlier you were asked the following: {original_task} Your team then worked diligently to address that request. Read below a transcript of that conversation:""", } ], } ] # The first message just repeats the question, so remove it # if len(inner_messages) > 1: # del inner_messages[0] # copy them to this context try: for message in inner_messages: if not message.content: continue message = copy.deepcopy(message) message.role = MessageRole.USER messages.append(message) except Exception: messages += [{"role": MessageRole.ASSISTANT, "content": str(inner_messages)}] # ask for the final answer messages.append( { "role": MessageRole.USER, "content": [ { "type": "text", "text": f""" Read the above conversation and output a FINAL ANSWER to the question. The question is repeated here for convenience: {original_task} To output the final answer, use the following template: FINAL ANSWER: [YOUR FINAL ANSWER] Your FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. ADDITIONALLY, your FINAL ANSWER MUST adhere to any formatting instructions specified in the original question (e.g., alphabetization, sequencing, units, rounding, decimal places, etc.) If you are asked for a number, express it numerically (i.e., with digits rather than words), don't use commas, and DO NOT INCLUDE UNITS such as $ or USD or percent signs unless specified otherwise. If you are asked for a string, don't use articles or abbreviations (e.g. for cities), unless specified otherwise. Don't output any final sentence punctuation such as '.', '!', or '?'. If you are asked for a comma separated list, apply the above rules depending on whether the elements are numbers or strings. If you are unable to determine the final answer, output 'FINAL ANSWER: Unable to determine' """, } ], } ) response = reformulation_model(messages).content final_answer = response.split("FINAL ANSWER: ")[-1].strip() print("> Reformulated answer: ", final_answer) # if "unable to determine" in final_answer.lower(): # messages.append({"role": MessageRole.ASSISTANT, "content": response }) # messages.append({"role": MessageRole.USER, "content": [{"type": "text", "text": """ # I understand that a definitive answer could not be determined. Please make a well-informed EDUCATED GUESS based on the conversation. # To output the educated guess, use the following template: EDUCATED GUESS: [YOUR EDUCATED GUESS] # Your EDUCATED GUESS should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. DO NOT OUTPUT 'I don't know', 'Unable to determine', etc. # ADDITIONALLY, your EDUCATED GUESS MUST adhere to any formatting instructions specified in the original question (e.g., alphabetization, sequencing, units, rounding, decimal places, etc.) # If you are asked for a number, express it numerically (i.e., with digits rather than words), don't use commas, and don't include units such as $ or percent signs unless specified otherwise. # If you are asked for a string, don't use articles or abbreviations (e.g. cit for cities), unless specified otherwise. Don't output any final sentence punctuation such as '.', '!', or '?'. # If you are asked for a comma separated list, apply the above rules depending on whether the elements are numbers or strings. # """.strip()}]}) # response = model(messages).content # print("\n>>>Making an educated guess.\n", response) # final_answer = response.split("EDUCATED GUESS: ")[-1].strip() return final_answer
smolagents/examples/open_deep_research/scripts/reformulator.py/0
{ "file_path": "smolagents/examples/open_deep_research/scripts/reformulator.py", "repo_id": "smolagents", "token_count": 1510 }
256
from sqlalchemy import ( Column, Float, Integer, MetaData, String, Table, create_engine, insert, inspect, text, ) engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() # create city SQL table table_name = "receipts" receipts = Table( table_name, metadata_obj, Column("receipt_id", Integer, primary_key=True), Column("customer_name", String(16), primary_key=True), Column("price", Float), Column("tip", Float), ) metadata_obj.create_all(engine) rows = [ {"receipt_id": 1, "customer_name": "Alan Payne", "price": 12.06, "tip": 1.20}, {"receipt_id": 2, "customer_name": "Alex Mason", "price": 23.86, "tip": 0.24}, {"receipt_id": 3, "customer_name": "Woodrow Wilson", "price": 53.43, "tip": 5.43}, {"receipt_id": 4, "customer_name": "Margaret James", "price": 21.11, "tip": 1.00}, ] for row in rows: stmt = insert(receipts).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) inspector = inspect(engine) columns_info = [(col["name"], col["type"]) for col in inspector.get_columns("receipts")] table_description = "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info]) print(table_description) from smolagents import tool @tool def sql_engine(query: str) -> str: """ Allows you to perform SQL queries on the table. Returns a string representation of the result. The table is named 'receipts'. Its description is as follows: Columns: - receipt_id: INTEGER - customer_name: VARCHAR(16) - price: FLOAT - tip: FLOAT Args: query: The query to perform. This should be correct SQL. """ output = "" with engine.connect() as con: rows = con.execute(text(query)) for row in rows: output += "\n" + str(row) return output from smolagents import CodeAgent, InferenceClientModel agent = CodeAgent( tools=[sql_engine], model=InferenceClientModel(model_id="meta-llama/Meta-Llama-3.1-8B-Instruct"), ) agent.run("Can you give me the name of the client who got the most expensive receipt?")
smolagents/examples/text_to_sql.py/0
{ "file_path": "smolagents/examples/text_to_sql.py", "repo_id": "smolagents", "token_count": 860 }
257
system_prompt: |- You are an expert assistant who can solve any task using tool calls. You will be given a task to solve as best you can. To do so, you have been given access to some tools. The tool call you write is an action: after the tool is executed, you will get the result of the tool call as an "observation". This Action/Observation can repeat N times, you should take several steps when needed. You can use the result of the previous action as input for the next action. The observation will always be a string: it can represent a file, like "image_1.jpg". Then you can use it as input for the next action. You can do it for instance as follows: Observation: "image_1.jpg" Action: { "name": "image_transformer", "arguments": {"image": "image_1.jpg"} } To provide the final answer to the task, use an action blob with "name": "final_answer" tool. It is the only way to complete the task, else you will be stuck on a loop. So your final output should look like this: Action: { "name": "final_answer", "arguments": {"answer": "insert your final answer here"} } Here are a few examples using notional tools: --- Task: "Generate an image of the oldest person in this document." Action: { "name": "document_qa", "arguments": {"document": "document.pdf", "question": "Who is the oldest person mentioned?"} } Observation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland." Action: { "name": "image_generator", "arguments": {"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."} } Observation: "image.png" Action: { "name": "final_answer", "arguments": "image.png" } --- Task: "What is the result of the following operation: 5 + 3 + 1294.678?" Action: { "name": "python_interpreter", "arguments": {"code": "5 + 3 + 1294.678"} } Observation: 1302.678 Action: { "name": "final_answer", "arguments": "1302.678" } --- Task: "Which city has the highest population , Guangzhou or Shanghai?" Action: { "name": "web_search", "arguments": "Population Guangzhou" } Observation: ['Guangzhou has a population of 15 million inhabitants as of 2021.'] Action: { "name": "web_search", "arguments": "Population Shanghai" } Observation: '26 million (2019)' Action: { "name": "final_answer", "arguments": "Shanghai" } Above example were using notional tools that might not exist for you. You only have access to these tools: {%- for tool in tools.values() %} - {{ tool.to_tool_calling_prompt() }} {%- endfor %} {%- if managed_agents and managed_agents.values() | list %} You can also give tasks to team members. Calling a team member works similarly to calling a tool: provide the task description as the 'task' argument. Since this team member is a real human, be as detailed and verbose as necessary in your task description. You can also include any relevant variables or context using the 'additional_args' argument. Here is a list of the team members that you can call: {%- for agent in managed_agents.values() %} - {{ agent.name }}: {{ agent.description }} - Takes inputs: {{agent.inputs}} - Returns an output of type: {{agent.output_type}} {%- endfor %} {%- endif %} {%- if custom_instructions %} {{custom_instructions}} {%- endif %} Here are the rules you should always follow to solve your task: 1. ALWAYS provide a tool call, else you will fail. 2. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead. 3. Call a tool only when needed: do not call the search agent if you do not need information, try to solve the task yourself. If no tool call is needed, use final_answer tool to return your answer. 4. Never re-do a tool call that you previously did with the exact same parameters. Now Begin! planning: initial_plan : |- You are a world expert at analyzing a situation to derive facts, and plan accordingly towards solving a task. Below I will present you a task. You will need to 1. build a survey of facts known or needed to solve the task, then 2. make a plan of action to solve the task. ## 1. Facts survey You will build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need. These "facts" will typically be specific names, dates, values, etc. Your answer should use the below headings: ### 1.1. Facts given in the task List here the specific facts given in the task that could help you (there might be nothing here). ### 1.2. Facts to look up List here any facts that we may need to look up. Also list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here. ### 1.3. Facts to derive List here anything that we want to derive from the above by logical reasoning, for instance computation or simulation. Don't make any assumptions. For each item, provide a thorough reasoning. Do not add anything else on top of three headings above. ## 2. Plan Then for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS. After writing the final step of the plan, write the '<end_plan>' tag and stop there. You can leverage these tools: {%- for tool in tools.values() %} - {{ tool.to_tool_calling_prompt() }} {%- endfor %} {%- if managed_agents and managed_agents.values() | list %} You can also give tasks to team members. Calling a team member works similarly to calling a tool: provide the task description as the 'task' argument. Since this team member is a real human, be as detailed and verbose as necessary in your task description. You can also include any relevant variables or context using the 'additional_args' argument. Here is a list of the team members that you can call: {%- for agent in managed_agents.values() %} - {{ agent.name }}: {{ agent.description }} - Takes inputs: {{agent.inputs}} - Returns an output of type: {{agent.output_type}} {%- endfor %} {%- endif %} --- Now begin! Here is your task: ``` {{task}} ``` First in part 1, write the facts survey, then in part 2, write your plan. update_plan_pre_messages: |- You are a world expert at analyzing a situation, and plan accordingly towards solving a task. You have been given the following task: ``` {{task}} ``` Below you will find a history of attempts made to solve this task. You will first have to produce a survey of known and unknown facts, then propose a step-by-step high-level plan to solve the task. If the previous tries so far have met some success, your updated plan can build on these results. If you are stalled, you can make a completely new plan starting from scratch. Find the task and history below: update_plan_post_messages: |- Now write your updated facts below, taking into account the above history: ## 1. Updated facts survey ### 1.1. Facts given in the task ### 1.2. Facts that we have learned ### 1.3. Facts still to look up ### 1.4. Facts still to derive Then write a step-by-step high-level plan to solve the task above. ## 2. Plan ### 2. 1. ... Etc. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Beware that you have {remaining_steps} steps remaining. Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS. After writing the final step of the plan, write the '<end_plan>' tag and stop there. You can leverage these tools: {%- for tool in tools.values() %} - {{ tool.to_tool_calling_prompt() }} {%- endfor %} {%- if managed_agents and managed_agents.values() | list %} You can also give tasks to team members. Calling a team member works similarly to calling a tool: provide the task description as the 'task' argument. Since this team member is a real human, be as detailed and verbose as necessary in your task description. You can also include any relevant variables or context using the 'additional_args' argument. Here is a list of the team members that you can call: {%- for agent in managed_agents.values() %} - {{ agent.name }}: {{ agent.description }} - Takes inputs: {{agent.inputs}} - Returns an output of type: {{agent.output_type}} {%- endfor %} {%- endif %} Now write your new plan below. managed_agent: task: |- You're a helpful agent named '{{name}}'. You have been submitted this task by your manager. --- Task: {{task}} --- You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible to give them a clear understanding of the answer. Your final_answer WILL HAVE to contain these parts: ### 1. Task outcome (short version): ### 2. Task outcome (extremely detailed version): ### 3. Additional context (if relevant): Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost. And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback. report: |- Here is the final answer from your managed agent '{{name}}': {{final_answer}} final_answer: pre_messages: |- An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory: post_messages: |- Based on the above, please provide an answer to the following user task: {{task}}
smolagents/src/smolagents/prompts/toolcalling_agent.yaml/0
{ "file_path": "smolagents/src/smolagents/prompts/toolcalling_agent.yaml", "repo_id": "smolagents", "token_count": 3070 }
258
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import pytest from smolagents._function_type_hints_utils import DocstringParsingException, get_imports, get_json_schema @pytest.fixture def valid_func(): """A well-formed function with docstring, type hints, and return block.""" def multiply(x: int, y: float) -> float: """ Multiplies two numbers. Args: x: The first number. y: The second number. Returns: Product of x and y. """ return x * y return multiply @pytest.fixture def no_docstring_func(): """Function with no docstring.""" def sample(x: int): return x return sample @pytest.fixture def missing_arg_doc_func(): """Function with docstring but missing an argument description.""" def add(x: int, y: int): """ Adds two numbers. Args: x: The first number. """ return x + y return add @pytest.fixture def bad_return_func(): """Function docstring with missing return description (allowed).""" def do_nothing(x: str | None = None): """ Does nothing. Args: x: Some optional string. """ pass return do_nothing @pytest.fixture def complex_types_func(): def process_data(items: list[str], config: dict[str, float], point: tuple[int, int]) -> dict: """ Process some data. Args: items: List of items to process. config: Configuration parameters. point: A position as (x,y). Returns: Processed data result. """ return {"result": True} return process_data @pytest.fixture def optional_types_func(): def process_with_optional(required_arg: str, optional_arg: int | None = None) -> str: """ Process with optional argument. Args: required_arg: A required string argument. optional_arg: An optional integer argument. Returns: Processing result. """ return "processed" return process_with_optional @pytest.fixture def enum_choices_func(): def select_color(color: str) -> str: """ Select a color. Args: color: The color to select (choices: ["red", "green", "blue"]) Returns: Selected color. """ return color return select_color @pytest.fixture def union_types_func(): def process_union(value: int | str) -> bool | str: """ Process a value that can be either int or string. Args: value: An integer or string value. Returns: Processing result. """ return True if isinstance(value, int) else "string result" return process_union @pytest.fixture def nested_types_func(): def process_nested_data(data: list[dict[str, Any]]) -> list[str]: """ Process nested data structure. Args: data: List of dictionaries to process. Returns: List of processed results. """ return ["result"] return process_nested_data @pytest.fixture def typed_docstring_func(): def calculate(x: int, y: float) -> float: """ Calculate something. Args: x (int): An integer parameter with type in docstring. y (float): A float parameter with type in docstring. Returns: float: The calculated result. """ return x * y return calculate @pytest.fixture def mismatched_types_func(): def convert(value: int) -> str: """ Convert a value. Args: value (str): A string value (type mismatch with hint). Returns: int: Converted value (type mismatch with hint). """ return str(value) return convert @pytest.fixture def complex_docstring_types_func(): def process(data: dict[str, list[int]]) -> list[dict[str, Any]]: """ Process complex data. Args: data (Dict[str, List[int]]): Nested structure with types. Returns: List[Dict[str, Any]]: Processed results with types. """ return [{"result": sum(v) for k, v in data.items()}] return process @pytest.fixture def keywords_in_description_func(): def process(value: str) -> str: """ Function with Args: or Returns: keywords in its description. Args: value: A string value. Returns: str: Processed value. """ return value.upper() return process class TestGetJsonSchema: def test_get_json_schema_example(self): def fn(x: int, y: tuple[str, str, float] | None = None) -> None: """ Test function Args: x: The first input y: The second input """ pass schema = get_json_schema(fn) expected_schema = { "name": "fn", "description": "Test function", "parameters": { "type": "object", "properties": { "x": {"type": "integer", "description": "The first input"}, "y": { "type": "array", "description": "The second input", "nullable": True, "prefixItems": [{"type": "string"}, {"type": "string"}, {"type": "number"}], }, }, "required": ["x"], }, "return": {"type": "null"}, } assert schema["function"]["parameters"]["properties"]["y"] == expected_schema["parameters"]["properties"]["y"] assert schema["function"] == expected_schema @pytest.mark.parametrize( "fixture_name,should_fail", [ ("valid_func", False), # ('no_docstring_func', True), # ('missing_arg_doc_func', True), ("bad_return_func", False), ], ) def test_get_json_schema(self, request, fixture_name, should_fail): func = request.getfixturevalue(fixture_name) schema = get_json_schema(func) assert schema["type"] == "function" assert "function" in schema assert "parameters" in schema["function"] @pytest.mark.parametrize( "fixture_name,should_fail", [ # ('valid_func', False), ("no_docstring_func", True), ("missing_arg_doc_func", True), # ('bad_return_func', False), ], ) def test_get_json_schema_raises(self, request, fixture_name, should_fail): func = request.getfixturevalue(fixture_name) with pytest.raises(DocstringParsingException): get_json_schema(func) @pytest.mark.parametrize( "fixture_name,expected_properties", [ ("valid_func", {"x": "integer", "y": "number"}), ("bad_return_func", {"x": "string"}), ], ) def test_property_types(self, request, fixture_name, expected_properties): """Test that property types are correctly mapped.""" func = request.getfixturevalue(fixture_name) schema = get_json_schema(func) properties = schema["function"]["parameters"]["properties"] for prop_name, expected_type in expected_properties.items(): assert properties[prop_name]["type"] == expected_type def test_schema_basic_structure(self, valid_func): """Test that basic schema structure is correct.""" schema = get_json_schema(valid_func) # Check schema type assert schema["type"] == "function" assert "function" in schema # Check function schema function_schema = schema["function"] assert function_schema["name"] == "multiply" assert "description" in function_schema assert function_schema["description"] == "Multiplies two numbers." # Check parameters schema assert "parameters" in function_schema params = function_schema["parameters"] assert params["type"] == "object" assert "properties" in params assert "required" in params assert set(params["required"]) == {"x", "y"} properties = params["properties"] assert properties["x"]["type"] == "integer" assert properties["y"]["type"] == "number" # Check return schema assert "return" in function_schema return_schema = function_schema["return"] assert return_schema["type"] == "number" assert return_schema["description"] == "Product of x and y." def test_complex_types(self, complex_types_func): """Test schema generation for complex types.""" schema = get_json_schema(complex_types_func) properties = schema["function"]["parameters"]["properties"] # Check list type assert properties["items"]["type"] == "array" # Check dict type assert properties["config"]["type"] == "object" # Check tuple type assert properties["point"]["type"] == "array" assert len(properties["point"]["prefixItems"]) == 2 assert properties["point"]["prefixItems"][0]["type"] == "integer" assert properties["point"]["prefixItems"][1]["type"] == "integer" def test_optional_types(self, optional_types_func): """Test schema generation for optional arguments.""" schema = get_json_schema(optional_types_func) params = schema["function"]["parameters"] # Required argument should be in required list assert "required_arg" in params["required"] # Optional argument should not be in required list assert "optional_arg" not in params["required"] # Optional argument should be nullable assert params["properties"]["optional_arg"]["nullable"] is True assert params["properties"]["optional_arg"]["type"] == "integer" def test_enum_choices(self, enum_choices_func): """Test schema generation for enum choices in docstring.""" schema = get_json_schema(enum_choices_func) color_prop = schema["function"]["parameters"]["properties"]["color"] assert "enum" in color_prop assert color_prop["enum"] == ["red", "green", "blue"] def test_union_types(self, union_types_func): """Test schema generation for union types.""" schema = get_json_schema(union_types_func) value_prop = schema["function"]["parameters"]["properties"]["value"] return_prop = schema["function"]["return"] # Check union in parameter assert len(value_prop["type"]) == 2 # Check union in return type: should be converted to "any" assert return_prop["type"] == "any" def test_nested_types(self, nested_types_func): """Test schema generation for nested complex types.""" schema = get_json_schema(nested_types_func) data_prop = schema["function"]["parameters"]["properties"]["data"] assert data_prop["type"] == "array" def test_typed_docstring_parsing(self, typed_docstring_func): """Test parsing of docstrings with type annotations.""" schema = get_json_schema(typed_docstring_func) # Type hints should take precedence over docstring types assert schema["function"]["parameters"]["properties"]["x"]["type"] == "integer" assert schema["function"]["parameters"]["properties"]["y"]["type"] == "number" # Description should be extracted correctly assert ( schema["function"]["parameters"]["properties"]["x"]["description"] == "An integer parameter with type in docstring." ) assert ( schema["function"]["parameters"]["properties"]["y"]["description"] == "A float parameter with type in docstring." ) # Return type and description should be correct assert schema["function"]["return"]["type"] == "number" assert schema["function"]["return"]["description"] == "The calculated result." def test_mismatched_docstring_types(self, mismatched_types_func): """Test that type hints take precedence over docstring types when they conflict.""" schema = get_json_schema(mismatched_types_func) # Type hints should take precedence over docstring types assert schema["function"]["parameters"]["properties"]["value"]["type"] == "integer" # Return type from type hint should be used, not docstring assert schema["function"]["return"]["type"] == "string" def test_complex_docstring_types(self, complex_docstring_types_func): """Test parsing of complex type annotations in docstrings.""" schema = get_json_schema(complex_docstring_types_func) # Check that complex nested type is parsed correctly from type hints data_prop = schema["function"]["parameters"]["properties"]["data"] assert data_prop["type"] == "object" # Check return type return_prop = schema["function"]["return"] assert return_prop["type"] == "array" # Description should include the type information from docstring assert data_prop["description"] == "Nested structure with types." assert return_prop["description"] == "Processed results with types." @pytest.mark.parametrize( "fixture_name,expected_description", [ ("typed_docstring_func", "An integer parameter with type in docstring."), ("complex_docstring_types_func", "Nested structure with types."), ], ) def test_type_in_description_handling(self, request, fixture_name, expected_description): """Test that type information in docstrings is preserved in description.""" func = request.getfixturevalue(fixture_name) schema = get_json_schema(func) # First parameter description should contain the expected text first_param_name = list(schema["function"]["parameters"]["properties"].keys())[0] assert schema["function"]["parameters"]["properties"][first_param_name]["description"] == expected_description def test_with_special_words_in_description_func(self, keywords_in_description_func): schema = get_json_schema(keywords_in_description_func) assert schema["function"]["description"] == "Function with Args: or Returns: keywords in its description." class TestGetCode: @pytest.mark.parametrize( "code, expected", [ ( """ import numpy import pandas """, ["numpy", "pandas"], ), # From imports ( """ from torch import nn from transformers import AutoModel """, ["torch", "transformers"], ), # Mixed case with nested imports ( """ import numpy as np from torch.nn import Linear import os.path """, ["numpy", "torch", "os"], ), # Try/except block (should be filtered) ( """ try: import torch except ImportError: pass import numpy """, ["numpy"], ), # Flash attention block (should be filtered) ( """ if is_flash_attn_2_available(): from flash_attn import flash_attn_func import transformers """, ["transformers"], ), # Relative imports (should be excluded) ( """ from .utils import helper from ..models import transformer """, [], ), ], ) def test_get_imports(self, code: str, expected: list[str]): assert sorted(get_imports(code)) == sorted(expected)
smolagents/tests/test_function_type_hints_utils.py/0
{ "file_path": "smolagents/tests/test_function_type_hints_utils.py", "repo_id": "smolagents", "token_count": 7061 }
259
aml target server/transformers server/flash-attention cmake-build-debug/ cmake-build-release/ Dockerfile*
text-generation-inference/.dockerignore/0
{ "file_path": "text-generation-inference/.dockerignore", "repo_id": "text-generation-inference", "token_count": 37 }
260
mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) mkfile_dir := $(dir $(mkfile_path)) root_dir := ${mkfile_dir}/../.. HABANA_VERSION := 1.21.0 PYTORCH_VERSION := 2.6.0 .PHONY: image run-local-dev-container install-dependencies install-server install-router install-launcher local-dev-install image: docker build --ulimit nofile=4096 -t tgi-gaudi -f ${root_dir}/Dockerfile_gaudi ${root_dir} --build-arg HABANA_VERSION=$(HABANA_VERSION) --build-arg PYTORCH_VERSION=$(PYTORCH_VERSION) run-local-dev-container: docker run -it \ --runtime=habana \ --ipc=host \ --cap-add=sys_nice \ --net=host \ -e HABANA_VISIBLE_DEVICES=all \ -e OMPI_MCA_btl_vader_single_copy_mechanism=none \ -e PT_HPU_ENABLE_LAZY_COLLECTIVES=true \ -e HF_TOKEN=`cat /home/ubuntu/.cache/huggingface/token` \ -e LOG_LEVEL=debug \ -e PORT=8080 \ -v /home/ubuntu/.cache/huggingface:/data \ -v $(PWD):/text-generation-inference \ -w /text-generation-inference \ vault.habana.ai/gaudi-docker/$(HABANA_VERSION)/ubuntu22.04/habanalabs/pytorch-installer-$(PYTORCH_VERSION):latest install-dependencies: pip install git+https://github.com/HabanaAI/DeepSpeed.git@$(HABANA_VERSION) pip install outlines~=0.0.34 curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y install-server: make -C ${root_dir}/backends/gaudi/server install PROTO_PATH=../../../proto/v3 install-router: make -C ${root_dir} install-router install-launcher: make -C ${root_dir} install-launcher # use source to load the rust in path local-dev-install: install-dependencies bash -c 'source "$$HOME/.cargo/env" && \ make install-server && \ make install-router && \ make install-launcher' # In order to run the integration tests, you need to first build the image (make -C backends/gaudi image) run-integration-tests: DOCKER_VOLUME=${root_dir}/data \ HF_TOKEN=`cat ${HOME}/.cache/huggingface/token` \ pytest --durations=0 -s -vv ${root_dir}/integration-tests --gaudi run-integration-tests-with-all-models: DOCKER_VOLUME=${root_dir}/data \ HF_TOKEN=`cat ${HOME}/.cache/huggingface/token` \ pytest --durations=0 -s -vv ${root_dir}/integration-tests --gaudi --gaudi-all-models # This is used to capture the expected outputs for the integration tests offering an easy way to add more models to the integration tests capture-expected-outputs-for-integration-tests: pip install -U pip uv DOCKER_VOLUME=${root_dir}/data \ HF_TOKEN=`cat ${HOME}/.cache/huggingface/token` \ uv run pytest --durations=0 -sv ${root_dir}/backends/gaudi/server/integration-tests/capture_expected_outputs.py
text-generation-inference/backends/gaudi/Makefile/0
{ "file_path": "text-generation-inference/backends/gaudi/Makefile", "repo_id": "text-generation-inference", "token_count": 1023 }
261
[tool.poetry] name = "text-generation-server" version = "2.0.4" description = "Text Generation Inference Python gRPC Server" authors = ["Olivier Dehaene <olivier@huggingface.co>"] [tool.poetry.scripts] text-generation-server = 'text_generation_server.cli:app' [tool.poetry.dependencies] python = ">=3.9,<3.13" protobuf = "^5.0" grpcio = "^1.71.1" grpcio-status = "*" grpcio-reflection = "*" grpc-interceptor = "^0.15.0" typer = "^0.15.0" loguru = "^0.7.3" opentelemetry-api = "^1.32.0" opentelemetry-exporter-otlp = "^1.32.0" opentelemetry-instrumentation-grpc = "^0.53b0" hf-transfer = "^0.1.9" sentencepiece = "^0.2.0" peft = "^0.15" transformers = "^4.52.4" numpy = "^1.26" accelerate = "^1.7.0" outlines= { version = "^0.0.36", optional = true } prometheus-client = "^0.21.1" py-cpuinfo = "^9.0.0" [tool.poetry.group.dev.dependencies] grpcio-tools = "*" pytest = "^8.3.5" [tool.pytest.ini_options] markers = ["private: marks tests as requiring an admin hf token (deselect with '-m \"not private\"')"] [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry.requires-plugins] poetry-plugin-export = ">=1.8"
text-generation-inference/backends/gaudi/server/pyproject.toml/0
{ "file_path": "text-generation-inference/backends/gaudi/server/pyproject.toml", "repo_id": "text-generation-inference", "token_count": 524 }
262