text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# Copyright 2022 The Music Spectrogram Diffusion Authors. # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): @register_to_config def __init__( self, max_length: int, vocab_size: int, d_model: int, dropout_rate: float, num_layers: int, num_heads: int, d_kv: int, d_ff: int, feed_forward_proj: str, is_decoder: bool = False, ): super().__init__() self.token_embedder = nn.Embedding(vocab_size, d_model) self.position_encoding = nn.Embedding(max_length, d_model) self.position_encoding.weight.requires_grad = False self.dropout_pre = nn.Dropout(p=dropout_rate) t5config = T5Config( vocab_size=vocab_size, d_model=d_model, num_heads=num_heads, d_kv=d_kv, d_ff=d_ff, dropout_rate=dropout_rate, feed_forward_proj=feed_forward_proj, is_decoder=is_decoder, is_encoder_decoder=False, ) self.encoders = nn.ModuleList() for lyr_num in range(num_layers): lyr = T5Block(t5config) self.encoders.append(lyr) self.layer_norm = T5LayerNorm(d_model) self.dropout_post = nn.Dropout(p=dropout_rate) def forward(self, encoder_input_tokens, encoder_inputs_mask): x = self.token_embedder(encoder_input_tokens) seq_length = encoder_input_tokens.shape[1] inputs_positions = torch.arange(seq_length, device=encoder_input_tokens.device) x += self.position_encoding(inputs_positions) x = self.dropout_pre(x) # inverted the attention mask input_shape = encoder_input_tokens.size() extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) for lyr in self.encoders: x = lyr(x, extended_attention_mask)[0] x = self.layer_norm(x) return self.dropout_post(x), encoder_inputs_mask
diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py", "repo_id": "diffusers", "token_count": 1254 }
140
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import torch import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer from ....image_processor import VaeImageProcessor from ....models import AutoencoderKL, Transformer2DModel, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import deprecate, logging from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_text_unet import UNetFlatConditionModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using Versatile Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: vqvae ([`VQModel`]): Vector-quantized (VQ) model to encode and decode images to and from latent representations. bert ([`LDMBertModel`]): Text-encoder model based on [`~transformers.BERT`]. tokenizer ([`~transformers.BertTokenizer`]): A `BertTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "bert->unet->vqvae" tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor text_encoder: CLIPTextModelWithProjection image_unet: UNet2DConditionModel text_unet: UNetFlatConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers _optional_components = ["text_unet"] def __init__( self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, image_unet: UNet2DConditionModel, text_unet: UNetFlatConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, image_unet=image_unet, text_unet=text_unet, vae=vae, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) if self.text_unet is not None: self._swap_unet_attention_blocks() def _swap_unet_attention_blocks(self): """ Swap the `Transformer2DModel` blocks between the image and text UNets """ for name, module in self.image_unet.named_modules(): if isinstance(module, Transformer2DModel): parent_name, index = name.rsplit(".", 1) index = int(index) self.image_unet.get_submodule(parent_name)[index], self.text_unet.get_submodule(parent_name)[index] = ( self.text_unet.get_submodule(parent_name)[index], self.image_unet.get_submodule(parent_name)[index], ) def remove_unused_weights(self): self.register_modules(text_unet=None) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ def normalize_embeddings(encoder_output): embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) embeds_pooled = encoder_output.text_embeds embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) return embeds batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids if not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = normalize_embeddings(prompt_embeds) # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: ```py >>> from diffusers import VersatileDiffusionTextToImagePipeline >>> import torch >>> pipe = VersatileDiffusionTextToImagePipeline.from_pretrained( ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 ... ) >>> pipe.remove_unused_weights() >>> pipe = pipe.to("cuda") >>> generator = torch.Generator(device="cuda").manual_seed(0) >>> image = pipe("an astronaut riding on a horse on mars", generator=generator).images[0] >>> image.save("./astronaut.png") ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.image_unet.config.sample_size * self.vae_scale_factor width = width or self.image_unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.image_unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py", "repo_id": "diffusers", "token_count": 9869 }
141
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_encoder import MultilingualCLIP logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1") >>> pipe.to("cuda") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save("cat.png") ``` """ def get_new_h_w(h, w, scale_factor=8): new_h = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 new_w = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class KandinskyPipeline(DiffusionPipeline): """ Pipeline for text-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`MultilingualCLIP`]): Frozen text-encoder. tokenizer ([`XLMRobertaTokenizer`]): Tokenizer of class scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. """ model_cpu_offload_seq = "text_encoder->unet->movq" def __init__( self, text_encoder: MultilingualCLIP, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], movq: VQModel, ): super().__init__() self.register_modules( text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, movq=movq, ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, ): batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", truncation=True, max_length=77, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids.to(device) text_mask = text_inputs.attention_mask.to(device) prompt_embeds, text_encoder_hidden_states = self.text_encoder( input_ids=text_input_ids, attention_mask=text_mask ) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) uncond_text_input_ids = uncond_input.input_ids.to(device) uncond_text_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_encoder_hidden_states, text_mask @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image_embeds: Union[torch.Tensor, List[torch.Tensor]], negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 512, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image_embeds (`torch.Tensor` or `List[torch.Tensor]`): The clip image embeddings for text prompt, that will be used to condition the image generation. negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): The clip image embeddings for negative text prompt, will be used to condition the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( dtype=prompt_embeds.dtype, device=device ) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps_tensor = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels height, width = get_new_h_w(height, width, self.movq_scale_factor) # create initial latent latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, device, generator, latents, self.scheduler, ) for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} noise_pred = self.unet( sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] if do_classifier_free_guidance: noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) _, variance_pred_text = variance_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not ( hasattr(self.scheduler.config, "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, generator=generator, ).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] self.maybe_free_model_hooks() if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") if output_type in ["np", "pil"]: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py", "repo_id": "diffusers", "token_count": 7937 }
142
#!/usr/bin/env python3 import argparse import fnmatch from safetensors.torch import load_file from diffusers import Kandinsky3UNet MAPPING = { "to_time_embed.1": "time_embedding.linear_1", "to_time_embed.3": "time_embedding.linear_2", "in_layer": "conv_in", "out_layer.0": "conv_norm_out", "out_layer.2": "conv_out", "down_samples": "down_blocks", "up_samples": "up_blocks", "projection_lin": "encoder_hid_proj.projection_linear", "projection_ln": "encoder_hid_proj.projection_norm", "feature_pooling": "add_time_condition", "to_query": "to_q", "to_key": "to_k", "to_value": "to_v", "output_layer": "to_out.0", "self_attention_block": "attentions.0", } DYNAMIC_MAP = { "resnet_attn_blocks.*.0": "resnets_in.*", "resnet_attn_blocks.*.1": ("attentions.*", 1), "resnet_attn_blocks.*.2": "resnets_out.*", } # MAPPING = {} def convert_state_dict(unet_state_dict): """ Args: Convert the state dict of a U-Net model to match the key format expected by Kandinsky3UNet model. unet_model (torch.nn.Module): The original U-Net model. unet_kandi3_model (torch.nn.Module): The Kandinsky3UNet model to match keys with. Returns: OrderedDict: The converted state dictionary. """ # Example of renaming logic (this will vary based on your model's architecture) converted_state_dict = {} for key in unet_state_dict: new_key = key for pattern, new_pattern in MAPPING.items(): new_key = new_key.replace(pattern, new_pattern) for dyn_pattern, dyn_new_pattern in DYNAMIC_MAP.items(): has_matched = False if fnmatch.fnmatch(new_key, f"*.{dyn_pattern}.*") and not has_matched: star = int(new_key.split(dyn_pattern.split(".")[0])[-1].split(".")[1]) if isinstance(dyn_new_pattern, tuple): new_star = star + dyn_new_pattern[-1] dyn_new_pattern = dyn_new_pattern[0] else: new_star = star pattern = dyn_pattern.replace("*", str(star)) new_pattern = dyn_new_pattern.replace("*", str(new_star)) new_key = new_key.replace(pattern, new_pattern) has_matched = True converted_state_dict[new_key] = unet_state_dict[key] return converted_state_dict def main(model_path, output_path): # Load your original U-Net model unet_state_dict = load_file(model_path) # Initialize your Kandinsky3UNet model config = {} # Convert the state dict converted_state_dict = convert_state_dict(unet_state_dict) unet = Kandinsky3UNet(config) unet.load_state_dict(converted_state_dict) unet.save_pretrained(output_path) print(f"Converted model saved to {output_path}") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Convert U-Net PyTorch model to Kandinsky3UNet format") parser.add_argument("--model_path", type=str, required=True, help="Path to the original U-Net PyTorch model") parser.add_argument("--output_path", type=str, required=True, help="Path to save the converted model") args = parser.parse_args() main(args.model_path, args.output_path)
diffusers/src/diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py", "repo_id": "diffusers", "token_count": 1403 }
143
# Copyright 2024 the Latte Team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html import inspect import re import urllib.parse as ul from dataclasses import dataclass from typing import Callable, Dict, List, Optional, Tuple, Union import torch from transformers import T5EncoderModel, T5Tokenizer from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...models import AutoencoderKL, LatteTransformer3DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( BACKENDS_MAPPING, BaseOutput, is_bs4_available, is_ftfy_available, logging, replace_example_docstring, ) from ...utils.torch_utils import is_compiled_module, randn_tensor from ...video_processor import VideoProcessor logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import LattePipeline >>> from diffusers.utils import export_to_gif >>> # You can replace the checkpoint id with "maxin-cn/Latte-1" too. >>> pipe = LattePipeline.from_pretrained("maxin-cn/Latte-1", torch_dtype=torch.float16) >>> # Enable memory optimizations. >>> pipe.enable_model_cpu_offload() >>> prompt = "A small cactus with a happy face in the Sahara desert." >>> videos = pipe(prompt).frames[0] >>> export_to_gif(videos, "latte.gif") ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps @dataclass class LattePipelineOutput(BaseOutput): frames: torch.Tensor class LattePipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using Latte. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder ([`T5EncoderModel`]): Frozen text-encoder. Latte uses [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. tokenizer (`T5Tokenizer`): Tokenizer of class [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). transformer ([`LatteTransformer3DModel`]): A text conditioned `LatteTransformer3DModel` to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `transformer` to denoise the encoded video latents. """ bad_punct_regex = re.compile(r"[#®•©™&@·º½¾¿¡§~\)\(\]\[\}\{\|\\/\\*]{1,}") _optional_components = ["tokenizer", "text_encoder"] model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", ] def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKL, transformer: LatteTransformer3DModel, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) # Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/utils.py def mask_text_embeddings(self, emb, mask): if emb.shape[0] == 1: keep_index = mask.sum().item() return emb[:, :, :keep_index, :], keep_index # 1, 120, 4096 -> 1 7 4096 else: masked_feature = emb * mask[:, None, :, None] # 1 120 4096 return masked_feature, emb.shape[2] # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool = True, negative_prompt: str = "", num_images_per_prompt: int = 1, device: Optional[torch.device] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, clean_caption: bool = False, mask_feature: bool = True, dtype=None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt not to guide the video generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For Latte, this should be "". do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not num_images_per_prompt (`int`, *optional*, defaults to 1): number of video that should be generated per prompt device: (`torch.device`, *optional*): torch device to place the resulting embeddings on prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For Latte, it's should be the embeddings of the "" string. clean_caption (bool, defaults to `False`): If `True`, the function will preprocess and clean the provided caption before encoding. mask_feature: (bool, defaults to `True`): If `True`, the function will mask the text embeddings. """ embeds_initially_provided = prompt_embeds is not None and negative_prompt_embeds is not None if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = 120 if prompt_embeds is None: prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {max_length} tokens: {removed_text}" ) attention_mask = text_inputs.attention_mask.to(device) prompt_embeds_attention_mask = attention_mask prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds_attention_mask = torch.ones_like(prompt_embeds) if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_embeds_attention_mask = prompt_embeds_attention_mask.view(bs_embed, -1) prompt_embeds_attention_mask = prompt_embeds_attention_mask.repeat(num_images_per_prompt, 1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes else: negative_prompt_embeds = None # Perform additional masking. if mask_feature and not embeds_initially_provided: prompt_embeds = prompt_embeds.unsqueeze(1) masked_prompt_embeds, keep_indices = self.mask_text_embeddings(prompt_embeds, prompt_embeds_attention_mask) masked_prompt_embeds = masked_prompt_embeds.squeeze(1) masked_negative_prompt_embeds = ( negative_prompt_embeds[:, :keep_indices, :] if negative_prompt_embeds is not None else None ) return masked_prompt_embeds, masked_negative_prompt_embeds return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub("<person>", "person", caption) # urls: caption = re.sub( r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls caption = re.sub( r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls # html: caption = BeautifulSoup(caption, features="html.parser").text # @<nickname> caption = re.sub(r"@[\w\d]+\b", "", caption) # 31C0—31EF CJK Strokes # 31F0—31FF Katakana Phonetic Extensions # 3200—32FF Enclosed CJK Letters and Months # 3300—33FF CJK Compatibility # 3400—4DBF CJK Unified Ideographs Extension A # 4DC0—4DFF Yijing Hexagram Symbols # 4E00—9FFF CJK Unified Ideographs caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) caption = re.sub(r"[\u3200-\u32ff]+", "", caption) caption = re.sub(r"[\u3300-\u33ff]+", "", caption) caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) ####################################################### # все виды тире / all types of dash --> "-" caption = re.sub( r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa "-", caption, ) # кавычки к одному стандарту caption = re.sub(r"[`´«»“”¨]", '"', caption) caption = re.sub(r"[‘’]", "'", caption) # &quot; caption = re.sub(r"&quot;?", "", caption) # &amp caption = re.sub(r"&amp", "", caption) # ip adresses: caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) # article ids: caption = re.sub(r"\d:\d\d\s+$", "", caption) # \n caption = re.sub(r"\\n", " ", caption) # "#123" caption = re.sub(r"#\d{1,3}\b", "", caption) # "#12345.." caption = re.sub(r"#\d{5,}\b", "", caption) # "123456.." caption = re.sub(r"\b\d{6,}\b", "", caption) # filenames: caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) # caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " # this-is-my-cute-cat / this_is_my_cute_cat regex2 = re.compile(r"(?:\-|\_)") if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, " ", caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) caption = re.sub(r"\bpage\s+\d+\b", "", caption) caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) caption = re.sub(r"\b\s+\:\s+", r": ", caption) caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) caption = re.sub(r"\s+", " ", caption) caption.strip() caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) caption = re.sub(r"^\.\S+$", "", caption) return caption.strip() # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): shape = ( batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: str = "", num_inference_steps: int = 50, timesteps: Optional[List[int]] = None, guidance_scale: float = 7.5, num_images_per_prompt: int = 1, video_length: int = 16, height: int = 512, width: int = 512, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: str = "pil", return_dict: bool = True, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], clean_caption: bool = True, mask_feature: bool = True, enable_temporal_attentions: bool = True, decode_chunk_size: Optional[int] = None, ) -> Union[LattePipelineOutput, Tuple]: """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the video generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality video at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 7.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate videos that are closely linked to the text `prompt`, usually at the expense of lower video quality. video_length (`int`, *optional*, defaults to 16): The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds num_images_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. height (`int`, *optional*, defaults to self.unet.config.sample_size): The height in pixels of the generated video. width (`int`, *optional*, defaults to self.unet.config.sample_size): The width in pixels of the generated video. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For Latte this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate video. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable[[int, int, Dict], None]`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A callback function or a list of callback functions to be called at the end of each denoising step. callback_on_step_end_tensor_inputs (`List[str]`, *optional*): A list of tensor inputs that should be passed to the callback function. If not defined, all tensor inputs will be passed. clean_caption (`bool`, *optional*, defaults to `True`): Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to be installed. If the dependencies are not installed, the embeddings will be created from the raw prompt. mask_feature (`bool` defaults to `True`): If set to `True`, the text embeddings will be masked. enable_temporal_attentions (`bool`, *optional*, defaults to `True`): Whether to enable temporal attentions decode_chunk_size (`int`, *optional*): The number of frames to decode at a time. Higher chunk size leads to better temporal consistency at the expense of more memory usage. By default, the decoder decodes all frames at once for maximal quality. For lower memory usage, reduce `decode_chunk_size`. Examples: Returns: [`~pipelines.latte.pipeline_latte.LattePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.latte.pipeline_latte.LattePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. Default decode_chunk_size = decode_chunk_size if decode_chunk_size is not None else video_length # 1. Check inputs. Raise error if not correct height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor self.check_inputs( prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds, negative_prompt_embeds, ) self._guidance_scale = guidance_scale self._interrupt = False # 2. Default height and width to transformer if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clean_caption=clean_caption, mask_feature=mask_feature, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) self._num_timesteps = len(timesteps) # 5. Prepare latents. latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, video_length, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) current_timestep = t if not torch.is_tensor(current_timestep): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = latent_model_input.device.type == "mps" if isinstance(current_timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) elif len(current_timestep.shape) == 0: current_timestep = current_timestep[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML current_timestep = current_timestep.expand(latent_model_input.shape[0]) # predict noise model_output noise_pred = self.transformer( latent_model_input, encoder_hidden_states=prompt_embeds, timestep=current_timestep, enable_temporal_attentions=enable_temporal_attentions, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # use learned sigma? if not ( hasattr(self.scheduler.config, "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): noise_pred = noise_pred.chunk(2, dim=1)[0] # compute previous video: x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == "latents": video = self.decode_latents(latents, video_length, decode_chunk_size=14) video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return LattePipelineOutput(frames=video) # Similar to diffusers.pipelines.stable_video_diffusion.pipeline_stable_video_diffusion.decode_latents def decode_latents(self, latents: torch.Tensor, video_length: int, decode_chunk_size: int = 14): # [batch, channels, frames, height, width] -> [batch*frames, channels, height, width] latents = latents.permute(0, 2, 1, 3, 4).flatten(0, 1) latents = 1 / self.vae.config.scaling_factor * latents forward_vae_fn = self.vae._orig_mod.forward if is_compiled_module(self.vae) else self.vae.forward accepts_num_frames = "num_frames" in set(inspect.signature(forward_vae_fn).parameters.keys()) # decode decode_chunk_size frames at a time to avoid OOM frames = [] for i in range(0, latents.shape[0], decode_chunk_size): num_frames_in = latents[i : i + decode_chunk_size].shape[0] decode_kwargs = {} if accepts_num_frames: # we only pass num_frames_in if it's expected decode_kwargs["num_frames"] = num_frames_in frame = self.vae.decode(latents[i : i + decode_chunk_size], **decode_kwargs).sample frames.append(frame) frames = torch.cat(frames, dim=0) # [batch*frames, channels, height, width] -> [batch, channels, frames, height, width] frames = frames.reshape(-1, video_length, *frames.shape[1:]).permute(0, 2, 1, 3, 4) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 frames = frames.float() return frames
diffusers/src/diffusers/pipelines/latte/pipeline_latte.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/latte/pipeline_latte.py", "repo_id": "diffusers", "token_count": 18698 }
144
# Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from math import pi from typing import Optional import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin from ...utils import BaseOutput, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class StableAudioPositionalEmbedding(nn.Module): """Used for continuous time""" def __init__(self, dim: int): super().__init__() assert (dim % 2) == 0 half_dim = dim // 2 self.weights = nn.Parameter(torch.randn(half_dim)) def forward(self, times: torch.Tensor) -> torch.Tensor: times = times[..., None] freqs = times * self.weights[None] * 2 * pi fouriered = torch.cat((freqs.sin(), freqs.cos()), dim=-1) fouriered = torch.cat((times, fouriered), dim=-1) return fouriered @dataclass class StableAudioProjectionModelOutput(BaseOutput): """ Args: Class for StableAudio projection layer's outputs. text_hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states obtained by linearly projecting the hidden-states for the text encoder. seconds_start_hidden_states (`torch.Tensor` of shape `(batch_size, 1, hidden_size)`, *optional*): Sequence of hidden-states obtained by linearly projecting the audio start hidden states. seconds_end_hidden_states (`torch.Tensor` of shape `(batch_size, 1, hidden_size)`, *optional*): Sequence of hidden-states obtained by linearly projecting the audio end hidden states. """ text_hidden_states: Optional[torch.Tensor] = None seconds_start_hidden_states: Optional[torch.Tensor] = None seconds_end_hidden_states: Optional[torch.Tensor] = None class StableAudioNumberConditioner(nn.Module): """ A simple linear projection model to map numbers to a latent space. Args: number_embedding_dim (`int`): Dimensionality of the number embeddings. min_value (`int`): The minimum value of the seconds number conditioning modules. max_value (`int`): The maximum value of the seconds number conditioning modules internal_dim (`int`): Dimensionality of the intermediate number hidden states. """ def __init__( self, number_embedding_dim, min_value, max_value, internal_dim: Optional[int] = 256, ): super().__init__() self.time_positional_embedding = nn.Sequential( StableAudioPositionalEmbedding(internal_dim), nn.Linear(in_features=internal_dim + 1, out_features=number_embedding_dim), ) self.number_embedding_dim = number_embedding_dim self.min_value = min_value self.max_value = max_value def forward( self, floats: torch.Tensor, ): floats = floats.clamp(self.min_value, self.max_value) normalized_floats = (floats - self.min_value) / (self.max_value - self.min_value) # Cast floats to same type as embedder embedder_dtype = next(self.time_positional_embedding.parameters()).dtype normalized_floats = normalized_floats.to(embedder_dtype) embedding = self.time_positional_embedding(normalized_floats) float_embeds = embedding.view(-1, 1, self.number_embedding_dim) return float_embeds class StableAudioProjectionModel(ModelMixin, ConfigMixin): """ A simple linear projection model to map the conditioning values to a shared latent space. Args: text_encoder_dim (`int`): Dimensionality of the text embeddings from the text encoder (T5). conditioning_dim (`int`): Dimensionality of the output conditioning tensors. min_value (`int`): The minimum value of the seconds number conditioning modules. max_value (`int`): The maximum value of the seconds number conditioning modules """ @register_to_config def __init__(self, text_encoder_dim, conditioning_dim, min_value, max_value): super().__init__() self.text_projection = ( nn.Identity() if conditioning_dim == text_encoder_dim else nn.Linear(text_encoder_dim, conditioning_dim) ) self.start_number_conditioner = StableAudioNumberConditioner(conditioning_dim, min_value, max_value) self.end_number_conditioner = StableAudioNumberConditioner(conditioning_dim, min_value, max_value) def forward( self, text_hidden_states: Optional[torch.Tensor] = None, start_seconds: Optional[torch.Tensor] = None, end_seconds: Optional[torch.Tensor] = None, ): text_hidden_states = ( text_hidden_states if text_hidden_states is None else self.text_projection(text_hidden_states) ) seconds_start_hidden_states = ( start_seconds if start_seconds is None else self.start_number_conditioner(start_seconds) ) seconds_end_hidden_states = end_seconds if end_seconds is None else self.end_number_conditioner(end_seconds) return StableAudioProjectionModelOutput( text_hidden_states=text_hidden_states, seconds_start_hidden_states=seconds_start_hidden_states, seconds_end_hidden_states=seconds_end_hidden_states, )
diffusers/src/diffusers/pipelines/stable_audio/modeling_stable_audio.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_audio/modeling_stable_audio.py", "repo_id": "diffusers", "token_count": 2312 }
145
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTokenizer from ...configuration_utils import FrozenDict from ...schedulers import DDPMScheduler, KarrasDiffusionSchedulers from ...utils import deprecate, logging from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel from ..pipeline_utils import DiffusionPipeline from . import StableDiffusionPipelineOutput logger = logging.get_logger(__name__) def preprocess(image): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): w, h = image[0].size w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 32 image = [np.array(i.resize((w, h)))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image class OnnxStableDiffusionUpscalePipeline(DiffusionPipeline): vae: OnnxRuntimeModel text_encoder: OnnxRuntimeModel tokenizer: CLIPTokenizer unet: OnnxRuntimeModel low_res_scheduler: DDPMScheduler scheduler: KarrasDiffusionSchedulers safety_checker: OnnxRuntimeModel feature_extractor: CLIPImageProcessor _optional_components = ["safety_checker", "feature_extractor"] _is_onnx = True def __init__( self, vae: OnnxRuntimeModel, text_encoder: OnnxRuntimeModel, tokenizer: Any, unet: OnnxRuntimeModel, low_res_scheduler: DDPMScheduler, scheduler: KarrasDiffusionSchedulers, safety_checker: Optional[OnnxRuntimeModel] = None, feature_extractor: Optional[CLIPImageProcessor] = None, max_noise_level: int = 350, num_latent_channels=4, num_unet_input_channels=7, requires_safety_checker: bool = True, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, low_res_scheduler=low_res_scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.register_to_config( max_noise_level=max_noise_level, num_latent_channels=num_latent_channels, num_unet_input_channels=num_unet_input_channels, ) def check_inputs( self, prompt: Union[str, List[str]], image, noise_level, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, np.ndarray) and not isinstance(image, list) ): raise ValueError( f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}" ) # verify batch size of prompt and image are same if image is a list or tensor or numpy array if isinstance(image, (list, np.ndarray)): if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if isinstance(image, list): image_batch_size = len(image) else: image_batch_size = image.shape[0] if batch_size != image_batch_size: raise ValueError( f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." " Please make sure that passed `prompt` matches the batch size of `image`." ) # check noise level if noise_level > self.config.max_noise_level: raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None): shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = generator.randn(*shape).astype(dtype) elif latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") return latents def decode_latents(self, latents): latents = 1 / 0.08333 * latents image = self.vae(latent_sample=latents)[0] image = np.clip(image / 2 + 0.5, 0, 1) image = image.transpose((0, 2, 3, 1)) return image def _encode_prompt( self, prompt: Union[str, List[str]], num_images_per_prompt: Optional[int], do_classifier_free_guidance: bool, negative_prompt: Optional[str], prompt_embeds: Optional[np.ndarray] = None, negative_prompt_embeds: Optional[np.ndarray] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`np.ndarray`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids if not np.array_equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] * batch_size elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="np", ) negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] if do_classifier_free_guidance: negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def __call__( self, prompt: Union[str, List[str]], image: Union[np.ndarray, PIL.Image.Image, List[PIL.Image.Image]], num_inference_steps: int = 75, guidance_scale: float = 9.0, noise_level: int = 20, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[np.random.RandomState, List[np.random.RandomState]]] = None, latents: Optional[np.ndarray] = None, prompt_embeds: Optional[np.ndarray] = None, negative_prompt_embeds: Optional[np.ndarray] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, np.ndarray], None]] = None, callback_steps: Optional[int] = 1, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`np.ndarray` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter will be modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. noise_level (`float`, defaults to 0.2): Deteremines the amount of noise to add to the initial image before performing upscaling. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`np.random.RandomState`, *optional*): A np.random.RandomState to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`np.ndarray`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`np.ndarray`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 1. Check inputs self.check_inputs( prompt, image, noise_level, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if generator is None: generator = np.random # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds = self._encode_prompt( prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) latents_dtype = prompt_embeds.dtype image = preprocess(image).cpu().numpy() height, width = image.shape[2:] latents = self.prepare_latents( batch_size * num_images_per_prompt, self.config.num_latent_channels, height, width, latents_dtype, generator, ) image = image.astype(latents_dtype) self.scheduler.set_timesteps(num_inference_steps) timesteps = self.scheduler.timesteps # Scale the initial noise by the standard deviation required by the scheduler latents = latents * np.float64(self.scheduler.init_noise_sigma) # 5. Add noise to image noise_level = np.array([noise_level]).astype(np.int64) noise = generator.randn(*image.shape).astype(latents_dtype) image = self.low_res_scheduler.add_noise( torch.from_numpy(image), torch.from_numpy(noise), torch.from_numpy(noise_level) ) image = image.numpy() batch_multiplier = 2 if do_classifier_free_guidance else 1 image = np.concatenate([image] * batch_multiplier * num_images_per_prompt) noise_level = np.concatenate([noise_level] * image.shape[0]) # 7. Check that sizes of image and latents match num_channels_image = image.shape[1] if self.config.num_latent_channels + num_channels_image != self.config.num_unet_input_channels: raise ValueError( "Incorrect configuration settings! The config of `pipeline.unet` expects" f" {self.config.num_unet_input_channels} but received `num_channels_latents`: {self.config.num_latent_channels} +" f" `num_channels_image`: {num_channels_image} " f" = {self.config.num_latent_channels + num_channels_image}. Please verify the config of" " `pipeline.unet` or your `image` input." ) # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta timestep_dtype = next( (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" ) timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] # 9. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents # concat latents, mask, masked_image_latents in the channel dimension latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = np.concatenate([latent_model_input, image], axis=1) # timestep to tensor timestep = np.array([t], dtype=timestep_dtype) # predict the noise residual noise_pred = self.unet( sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, class_labels=noise_level, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs ).prev_sample latents = latents.numpy() # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # 10. Post-processing image = self.decode_latents(latents) if self.safety_checker is not None: safety_checker_input = self.feature_extractor( self.numpy_to_pil(image), return_tensors="np" ).pixel_values.astype(image.dtype) images, has_nsfw_concept = [], [] for i in range(image.shape[0]): image_i, has_nsfw_concept_i = self.safety_checker( clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] ) images.append(image_i) has_nsfw_concept.append(has_nsfw_concept_i[0]) image = np.concatenate(images) else: has_nsfw_concept = None if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py", "repo_id": "diffusers", "token_count": 12557 }
146
from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class StableDiffusion3PipelineOutput(BaseOutput): """ Output class for Stable Diffusion pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. """ images: Union[List[PIL.Image.Image], np.ndarray]
diffusers/src/diffusers/pipelines/stable_diffusion_3/pipeline_output.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_3/pipeline_output.py", "repo_id": "diffusers", "token_count": 218 }
147
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, BaseOutput, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure.update( { "pipeline_stable_video_diffusion": [ "StableVideoDiffusionPipeline", "StableVideoDiffusionPipelineOutput", ], } ) if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_video_diffusion import ( StableVideoDiffusionPipeline, StableVideoDiffusionPipelineOutput, ) else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/stable_video_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_video_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 664 }
148
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPT2Config, GPT2LMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin # Modified from ClipCaptionModel in https://github.com/thu-ml/unidiffuser/blob/main/libs/caption_decoder.py class UniDiffuserTextDecoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): """ Text decoder model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is used to generate text from the UniDiffuser image-text embedding. Parameters: prefix_length (`int`): Max number of prefix tokens that will be supplied to the model. prefix_inner_dim (`int`): The hidden size of the incoming prefix embeddings. For UniDiffuser, this would be the hidden dim of the CLIP text encoder. prefix_hidden_dim (`int`, *optional*): Hidden dim of the MLP if we encode the prefix. vocab_size (`int`, *optional*, defaults to 50257): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`]. n_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states. n_layer (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. n_inner (`int`, *optional*, defaults to None): Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd activation_function (`str`, *optional*, defaults to `"gelu"`): Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_attn_weights (`bool`, *optional*, defaults to `True`): Scale attention weights by dividing by sqrt(hidden_size).. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`): Whether to additionally scale attention weights by `1 / layer_idx + 1`. reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention dot-product/softmax to float() when training with mixed precision. """ _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self, prefix_length: int, prefix_inner_dim: int, prefix_hidden_dim: Optional[int] = None, vocab_size: int = 50257, # Start of GPT2 config args n_positions: int = 1024, n_embd: int = 768, n_layer: int = 12, n_head: int = 12, n_inner: Optional[int] = None, activation_function: str = "gelu_new", resid_pdrop: float = 0.1, embd_pdrop: float = 0.1, attn_pdrop: float = 0.1, layer_norm_epsilon: float = 1e-5, initializer_range: float = 0.02, scale_attn_weights: bool = True, use_cache: bool = True, scale_attn_by_inverse_layer_idx: bool = False, reorder_and_upcast_attn: bool = False, ): super().__init__() self.prefix_length = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" f" `n_embd`: {n_embd} are not equal." ) self.prefix_inner_dim = prefix_inner_dim self.prefix_hidden_dim = prefix_hidden_dim self.encode_prefix = ( nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim) if self.prefix_hidden_dim is not None else nn.Identity() ) self.decode_prefix = ( nn.Linear(self.prefix_hidden_dim, n_embd) if self.prefix_hidden_dim is not None else nn.Identity() ) gpt_config = GPT2Config( vocab_size=vocab_size, n_positions=n_positions, n_embd=n_embd, n_layer=n_layer, n_head=n_head, n_inner=n_inner, activation_function=activation_function, resid_pdrop=resid_pdrop, embd_pdrop=embd_pdrop, attn_pdrop=attn_pdrop, layer_norm_epsilon=layer_norm_epsilon, initializer_range=initializer_range, scale_attn_weights=scale_attn_weights, use_cache=use_cache, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) self.transformer = GPT2LMHeadModel(gpt_config) def forward( self, input_ids: torch.Tensor, prefix_embeds: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, ): """ Args: input_ids (`torch.Tensor` of shape `(N, max_seq_len)`): Text tokens to use for inference. prefix_embeds (`torch.Tensor` of shape `(N, prefix_length, 768)`): Prefix embedding to preprend to the embedded tokens. attention_mask (`torch.Tensor` of shape `(N, prefix_length + max_seq_len, 768)`, *optional*): Attention mask for the prefix embedding. labels (`torch.Tensor`, *optional*): Labels to use for language modeling. """ embedding_text = self.transformer.transformer.wte(input_ids) hidden = self.encode_prefix(prefix_embeds) prefix_embeds = self.decode_prefix(hidden) embedding_cat = torch.cat((prefix_embeds, embedding_text), dim=1) if labels is not None: dummy_token = self.get_dummy_token(input_ids.shape[0], input_ids.device) labels = torch.cat((dummy_token, input_ids), dim=1) out = self.transformer(inputs_embeds=embedding_cat, labels=labels, attention_mask=attention_mask) if self.prefix_hidden_dim is not None: return out, hidden else: return out def get_dummy_token(self, batch_size: int, device: torch.device) -> torch.Tensor: return torch.zeros(batch_size, self.prefix_length, dtype=torch.int64, device=device) def encode(self, prefix): return self.encode_prefix(prefix) @torch.no_grad() def generate_captions(self, features, eos_token_id, device): """ Generate captions given text embedding features. Returns list[L]. Args: features (`torch.Tensor` of shape `(B, L, D)`): Text embedding features to generate captions from. eos_token_id (`int`): The token ID of the EOS token for the text decoder model. device: Device to perform text generation on. Returns: `List[str]`: A list of strings generated from the decoder model. """ features = torch.split(features, 1, dim=0) generated_tokens = [] generated_seq_lengths = [] for feature in features: feature = self.decode_prefix(feature.to(device)) # back to the clip feature # Only support beam search for now output_tokens, seq_lengths = self.generate_beam( input_embeds=feature, device=device, eos_token_id=eos_token_id ) generated_tokens.append(output_tokens[0]) generated_seq_lengths.append(seq_lengths[0]) generated_tokens = torch.stack(generated_tokens) generated_seq_lengths = torch.stack(generated_seq_lengths) return generated_tokens, generated_seq_lengths @torch.no_grad() def generate_beam( self, input_ids=None, input_embeds=None, device=None, beam_size: int = 5, entry_length: int = 67, temperature: float = 1.0, eos_token_id: Optional[int] = None, ): """ Generates text using the given tokenizer and text prompt or token embedding via beam search. This implementation is based on the beam search implementation from the [original UniDiffuser code](https://github.com/thu-ml/unidiffuser/blob/main/libs/caption_decoder.py#L89). Args: eos_token_id (`int`, *optional*): The token ID of the EOS token for the text decoder model. input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): Tokenizer indices of input sequence tokens in the vocabulary. One of `input_ids` and `input_embeds` must be supplied. input_embeds (`torch.Tensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): An embedded representation to directly pass to the transformer as a prefix for beam search. One of `input_ids` and `input_embeds` must be supplied. device: The device to perform beam search on. beam_size (`int`, *optional*, defaults to `5`): The number of best states to store during beam search. entry_length (`int`, *optional*, defaults to `67`): The number of iterations to run beam search. temperature (`float`, *optional*, defaults to 1.0): The temperature to use when performing the softmax over logits from the decoding model. Returns: `Tuple(torch.Tensor, torch.Tensor)`: A tuple of tensors where the first element is a tensor of generated token sequences sorted by score in descending order, and the second element is the sequence lengths corresponding to those sequences. """ # Generates text until stop_token is reached using beam search with the desired beam size. stop_token_index = eos_token_id tokens = None scores = None seq_lengths = torch.ones(beam_size, device=device, dtype=torch.int) is_stopped = torch.zeros(beam_size, device=device, dtype=torch.bool) if input_embeds is not None: generated = input_embeds else: generated = self.transformer.transformer.wte(input_ids) for i in range(entry_length): outputs = self.transformer(inputs_embeds=generated) logits = outputs.logits logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) logits = logits.softmax(-1).log() if scores is None: scores, next_tokens = logits.topk(beam_size, -1) generated = generated.expand(beam_size, *generated.shape[1:]) next_tokens, scores = next_tokens.permute(1, 0), scores.squeeze(0) if tokens is None: tokens = next_tokens else: tokens = tokens.expand(beam_size, *tokens.shape[1:]) tokens = torch.cat((tokens, next_tokens), dim=1) else: logits[is_stopped] = -float(np.inf) logits[is_stopped, 0] = 0 scores_sum = scores[:, None] + logits seq_lengths[~is_stopped] += 1 scores_sum_average = scores_sum / seq_lengths[:, None] scores_sum_average, next_tokens = scores_sum_average.view(-1).topk(beam_size, -1) next_tokens_source = next_tokens // scores_sum.shape[1] seq_lengths = seq_lengths[next_tokens_source] next_tokens = next_tokens % scores_sum.shape[1] next_tokens = next_tokens.unsqueeze(1) tokens = tokens[next_tokens_source] tokens = torch.cat((tokens, next_tokens), dim=1) generated = generated[next_tokens_source] scores = scores_sum_average * seq_lengths is_stopped = is_stopped[next_tokens_source] next_token_embed = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0], 1, -1) generated = torch.cat((generated, next_token_embed), dim=1) is_stopped = is_stopped + next_tokens.eq(stop_token_index).squeeze() if is_stopped.all(): break scores = scores / seq_lengths order = scores.argsort(descending=True) # tokens tensors are already padded to max_seq_length output_texts = [tokens[i] for i in order] output_texts = torch.stack(output_texts, dim=0) seq_lengths = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype) return output_texts, seq_lengths
diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py", "repo_id": "diffusers", "token_count": 6303 }
149
# Copyright 2024 Google Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ...configuration_utils import ConfigMixin, register_to_config from ...utils.torch_utils import randn_tensor from ..scheduling_utils import SchedulerMixin class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): """ `ScoreSdeVpScheduler` is a variance preserving stochastic differential equation (SDE) scheduler. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 2000): The number of diffusion steps to train the model. beta_min (`int`, defaults to 0.1): beta_max (`int`, defaults to 20): sampling_eps (`int`, defaults to 1e-3): The end value of sampling where timesteps decrease progressively from 1 to epsilon. """ order = 1 @register_to_config def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3): self.sigmas = None self.discrete_sigmas = None self.timesteps = None def set_timesteps(self, num_inference_steps, device: Union[str, torch.device] = None): """ Sets the continuous timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device) def step_pred(self, score, x, t, generator=None): """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: score (): x (): t (): generator (`torch.Generator`, *optional*): A random number generator. """ if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score log_mean_coeff = -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) std = std.flatten() while len(std.shape) < len(score.shape): std = std.unsqueeze(-1) score = -score / std # compute dt = -1.0 / len(self.timesteps) beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) beta_t = beta_t.flatten() while len(beta_t.shape) < len(x.shape): beta_t = beta_t.unsqueeze(-1) drift = -0.5 * beta_t * x diffusion = torch.sqrt(beta_t) drift = drift - diffusion**2 * score x_mean = x + drift * dt # add noise noise = randn_tensor(x.shape, layout=x.layout, generator=generator, device=x.device, dtype=x.dtype) x = x_mean + diffusion * math.sqrt(-dt) * noise return x, x_mean def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/deprecated/scheduling_sde_vp.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/deprecated/scheduling_sde_vp.py", "repo_id": "diffusers", "token_count": 1693 }
150
# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr def rescale_zero_terminal_snr(betas): """ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) Args: betas (`torch.Tensor`): the betas that the scheduler is being initialized with. Returns: `torch.Tensor`: rescaled betas with zero terminal SNR """ # Convert betas to alphas_bar_sqrt alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt**2 # Revert sqrt alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): """ `DPMSolverMultistepScheduler` is a fast dedicated high-order solver for diffusion ODEs. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. solver_order (`int`, defaults to 2): The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. dynamic_thresholding_ratio (`float`, defaults to 0.995): The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. sample_max_value (`float`, defaults to 1.0): The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `algorithm_type="dpmsolver++"`. algorithm_type (`str`, defaults to `dpmsolver++`): Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) paper, and the `dpmsolver++` type implements the algorithms in the [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. solver_type (`str`, defaults to `midpoint`): Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. lower_order_final (`bool`, defaults to `True`): Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. euler_at_final (`bool`, defaults to `False`): Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference steps, but sometimes may result in blurring. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, the sigmas are determined according to a sequence of noise levels {σi}. use_lu_lambdas (`bool`, *optional*, defaults to `False`): Whether to use the uniform-logSNR for step sizes proposed by Lu's DPM-Solver in the noise schedule during the sampling process. If `True`, the sigmas and time steps are determined according to a sequence of `lambda(t)`. final_sigmas_type (`str`, defaults to `"zero"`): The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. lambda_min_clipped (`float`, defaults to `-inf`): Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the cosine (`squaredcos_cap_v2`) noise schedule. variance_type (`str`, *optional*): Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output contains the predicted Gaussian variance. timestep_spacing (`str`, defaults to `"linspace"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. steps_offset (`int`, defaults to 0): An offset added to the inference steps, as required by some model families. rescale_betas_zero_snr (`bool`, defaults to `False`): Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and dark samples instead of limiting it to samples with medium brightness. Loosely related to [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, solver_order: int = 2, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, algorithm_type: str = "dpmsolver++", solver_type: str = "midpoint", lower_order_final: bool = True, euler_at_final: bool = False, use_karras_sigmas: Optional[bool] = False, use_lu_lambdas: Optional[bool] = False, final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" lambda_min_clipped: float = -float("inf"), variance_type: Optional[str] = None, timestep_spacing: str = "linspace", steps_offset: int = 0, rescale_betas_zero_snr: bool = False, ): if algorithm_type in ["dpmsolver", "sde-dpmsolver"]: deprecation_message = f"algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead" deprecate("algorithm_types dpmsolver and sde-dpmsolver", "1.0.0", deprecation_message) if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) if rescale_betas_zero_snr: # Close to 0 without being 0 so first sigma is not inf # FP16 smallest positive subnormal works well here self.alphas_cumprod[-1] = 2**-24 # Currently we only support VP-type noise schedule self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # settings for DPM-Solver if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: if algorithm_type == "deis": self.register_to_config(algorithm_type="dpmsolver++") else: raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") if solver_type not in ["midpoint", "heun"]: if solver_type in ["logrho", "bh1", "bh2"]: self.register_to_config(solver_type="midpoint") else: raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero": raise ValueError( f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead." ) # setable values self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.lower_order_nums = 0 self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication @property def step_index(self): """ The index counter for current timestep. It will increase 1 after each scheduler step. """ return self._step_index @property def begin_index(self): """ The index for the first timestep. It should be set from pipeline with `set_begin_index` method. """ return self._begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`): The begin index for the scheduler. """ self._begin_index = begin_index def set_timesteps( self, num_inference_steps: int = None, device: Union[str, torch.device] = None, timesteps: Optional[List[int]] = None, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary timesteps schedule. If `None`, timesteps will be generated based on the `timestep_spacing` attribute. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`, and `timestep_spacing` attribute will be ignored. """ if num_inference_steps is None and timesteps is None: raise ValueError("Must pass exactly one of `num_inference_steps` or `timesteps`.") if num_inference_steps is not None and timesteps is not None: raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") if timesteps is not None and self.config.use_karras_sigmas: raise ValueError("Cannot use `timesteps` with `config.use_karras_sigmas = True`") if timesteps is not None and self.config.use_lu_lambdas: raise ValueError("Cannot use `timesteps` with `config.use_lu_lambdas = True`") if timesteps is not None: timesteps = np.array(timesteps).astype(np.int64) else: # Clipping the minimum of all lambda(t) for numerical stability. # This is critical for cosine (squaredcos_cap_v2) noise schedule. clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item() # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": timesteps = ( np.linspace(0, last_timestep - 1, num_inference_steps + 1) .round()[::-1][:-1] .copy() .astype(np.int64) ) elif self.config.timestep_spacing == "leading": step_ratio = last_timestep // (num_inference_steps + 1) # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = ( (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) if self.config.use_karras_sigmas: sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() elif self.config.use_lu_lambdas: lambdas = np.flip(log_sigmas.copy()) lambdas = self._convert_to_lu(in_lambdas=lambdas, num_inference_steps=num_inference_steps) sigmas = np.exp(lambdas) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.final_sigmas_type == "sigma_min": sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 elif self.config.final_sigmas_type == "zero": sigma_last = 0 else: raise ValueError( f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" ) sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) self.model_outputs = [ None, ] * self.config.solver_order self.lower_order_nums = 0 # add an index counter for schedulers that allow duplicated timesteps self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(np.maximum(sigma, 1e-10)) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / ((sigma**2 + 1) ** 0.5) sigma_t = sigma * alpha_t return alpha_t, sigma_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: """Constructs the noise schedule of Karras et al. (2022).""" # Hack to make sure that other schedulers which copy this function don't break # TODO: Add this logic to the other schedulers if hasattr(self.config, "sigma_min"): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, "sigma_max"): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def _convert_to_lu(self, in_lambdas: torch.Tensor, num_inference_steps) -> torch.Tensor: """Constructs the noise schedule of Lu et al. (2022).""" lambda_min: float = in_lambdas[-1].item() lambda_max: float = in_lambdas[0].item() rho = 1.0 # 1.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = lambda_min ** (1 / rho) max_inv_rho = lambda_max ** (1 / rho) lambdas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return lambdas def convert_model_output( self, model_output: torch.Tensor, *args, sample: torch.Tensor = None, **kwargs, ) -> torch.Tensor: """ Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an integral of the data prediction model. <Tip> The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise prediction and data prediction models. </Tip> Args: model_output (`torch.Tensor`): The direct output from the learned diffusion model. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. Returns: `torch.Tensor`: The converted model output. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError("missing `sample` as a required keyward argument") if timestep is not None: deprecate( "timesteps", "1.0.0", "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) # DPM-Solver++ needs to solve an integral of the data prediction model. if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: if self.config.prediction_type == "epsilon": # DPM-Solver and DPM-Solver++ only need the "mean" output. if self.config.variance_type in ["learned", "learned_range"]: model_output = model_output[:, :3] sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == "sample": x0_pred = model_output elif self.config.prediction_type == "v_prediction": sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction` for the DPMSolverMultistepScheduler." ) if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred # DPM-Solver needs to solve an integral of the noise prediction model. elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]: if self.config.prediction_type == "epsilon": # DPM-Solver and DPM-Solver++ only need the "mean" output. if self.config.variance_type in ["learned", "learned_range"]: epsilon = model_output[:, :3] else: epsilon = model_output elif self.config.prediction_type == "sample": sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) epsilon = (sample - alpha_t * model_output) / sigma_t elif self.config.prediction_type == "v_prediction": sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) epsilon = alpha_t * model_output + sigma_t * sample else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction` for the DPMSolverMultistepScheduler." ) if self.config.thresholding: sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * epsilon) / alpha_t x0_pred = self._threshold_sample(x0_pred) epsilon = (sample - alpha_t * x0_pred) / sigma_t return epsilon def dpm_solver_first_order_update( self, model_output: torch.Tensor, *args, sample: torch.Tensor = None, noise: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: """ One step for the first-order DPMSolver (equivalent to DDIM). Args: model_output (`torch.Tensor`): The direct output from the learned diffusion model. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. Returns: `torch.Tensor`: The sample tensor at the previous timestep. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(" missing `sample` as a required keyward argument") if timestep is not None: deprecate( "timesteps", "1.0.0", "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s if self.config.algorithm_type == "dpmsolver++": x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output elif self.config.algorithm_type == "dpmsolver": x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output elif self.config.algorithm_type == "sde-dpmsolver++": assert noise is not None x_t = ( (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise ) elif self.config.algorithm_type == "sde-dpmsolver": assert noise is not None x_t = ( (alpha_t / alpha_s) * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise ) return x_t def multistep_dpm_solver_second_order_update( self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor = None, noise: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: """ One step for the second-order multistep DPMSolver. Args: model_output_list (`List[torch.Tensor]`): The direct outputs from learned diffusion model at current and latter timesteps. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. Returns: `torch.Tensor`: The sample tensor at the previous timestep. """ timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(" missing `sample` as a required keyward argument") if timestep_list is not None: deprecate( "timestep_list", "1.0.0", "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma_t, sigma_s0, sigma_s1 = ( self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], ) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) m0, m1 = model_output_list[-1], model_output_list[-2] h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 r0 = h_0 / h D0, D1 = m0, (1.0 / r0) * (m0 - m1) if self.config.algorithm_type == "dpmsolver++": # See https://arxiv.org/abs/2211.01095 for detailed derivations if self.config.solver_type == "midpoint": x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * D0 - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 ) elif self.config.solver_type == "heun": x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 ) elif self.config.algorithm_type == "dpmsolver": # See https://arxiv.org/abs/2206.00927 for detailed derivations if self.config.solver_type == "midpoint": x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (torch.exp(h) - 1.0)) * D0 - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 ) elif self.config.solver_type == "heun": x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (torch.exp(h) - 1.0)) * D0 - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 ) elif self.config.algorithm_type == "sde-dpmsolver++": assert noise is not None if self.config.solver_type == "midpoint": x_t = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise ) elif self.config.solver_type == "heun": x_t = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise ) elif self.config.algorithm_type == "sde-dpmsolver": assert noise is not None if self.config.solver_type == "midpoint": x_t = ( (alpha_t / alpha_s0) * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - (sigma_t * (torch.exp(h) - 1.0)) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise ) elif self.config.solver_type == "heun": x_t = ( (alpha_t / alpha_s0) * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise ) return x_t def multistep_dpm_solver_third_order_update( self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor = None, **kwargs, ) -> torch.Tensor: """ One step for the third-order multistep DPMSolver. Args: model_output_list (`List[torch.Tensor]`): The direct outputs from learned diffusion model at current and latter timesteps. sample (`torch.Tensor`): A current instance of a sample created by diffusion process. Returns: `torch.Tensor`: The sample tensor at the previous timestep. """ timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(" missing`sample` as a required keyward argument") if timestep_list is not None: deprecate( "timestep_list", "1.0.0", "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], self.sigmas[self.step_index - 2], ) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 r0, r1 = h_0 / h, h_1 / h D0 = m0 D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) if self.config.algorithm_type == "dpmsolver++": # See https://arxiv.org/abs/2206.00927 for detailed derivations x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 ) elif self.config.algorithm_type == "dpmsolver": # See https://arxiv.org/abs/2206.00927 for detailed derivations x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (torch.exp(h) - 1.0)) * D0 - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 ) return x_t def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index def _init_step_index(self, timestep): """ Initialize the step_index counter for the scheduler. """ if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step( self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, generator=None, variance_noise: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with the multistep DPMSolver. Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. variance_noise (`torch.Tensor`): Alternative to generating noise with `generator` by directly providing the noise for the variance itself. Useful for methods such as [`LEdits++`]. return_dict (`bool`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if self.step_index is None: self._init_step_index(timestep) # Improve numerical stability for small number of steps lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( self.config.euler_at_final or (self.config.lower_order_final and len(self.timesteps) < 15) or self.config.final_sigmas_type == "zero" ) lower_order_second = ( (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 ) model_output = self.convert_model_output(model_output, sample=sample) for i in range(self.config.solver_order - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.model_outputs[-1] = model_output # Upcast to avoid precision issues when computing prev_sample sample = sample.to(torch.float32) if self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"] and variance_noise is None: noise = randn_tensor( model_output.shape, generator=generator, device=model_output.device, dtype=torch.float32 ) elif self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]: noise = variance_noise.to(device=model_output.device, dtype=torch.float32) else: noise = None if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) else: prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) if self.lower_order_nums < self.config.solver_order: self.lower_order_nums += 1 # Cast sample back to expected dtype prev_sample = prev_sample.to(model_output.dtype) # upon completion increase step index by one self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. Returns: `torch.Tensor`: A scaled input sample. """ return sample def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor, ) -> torch.Tensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): # mps does not support float64 schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) noisy_samples = alpha_t * original_samples + sigma_t * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py", "repo_id": "diffusers", "token_count": 22854 }
151
# Copyright 2024 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class KarrasVeSchedulerState: # setable values num_inference_steps: Optional[int] = None timesteps: Optional[jnp.ndarray] = None schedule: Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def create(cls): return cls() @dataclass class FlaxKarrasVeOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. derivative (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): Derivative of predicted original image sample (x_0). state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. """ prev_sample: jnp.ndarray derivative: jnp.ndarray state: KarrasVeSchedulerState class FlaxKarrasVeScheduler(FlaxSchedulerMixin, ConfigMixin): """ Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and the VE column of Table 1 from [1] for reference. [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic differential equations." https://arxiv.org/abs/2011.13456 [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper. Args: sigma_min (`float`): minimum noise magnitude sigma_max (`float`): maximum noise magnitude s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, 1.011]. s_churn (`float`): the parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100]. s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity). A reasonable range is [0, 10]. s_max (`float`): the end value of the sigma range where we add noise. A reasonable range is [0.2, 80]. """ @property def has_state(self): return True @register_to_config def __init__( self, sigma_min: float = 0.02, sigma_max: float = 100, s_noise: float = 1.007, s_churn: float = 80, s_min: float = 0.05, s_max: float = 50, ): pass def create_state(self): return KarrasVeSchedulerState.create() def set_timesteps( self, state: KarrasVeSchedulerState, num_inference_steps: int, shape: Tuple = () ) -> KarrasVeSchedulerState: """ Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. """ timesteps = jnp.arange(0, num_inference_steps)[::-1].copy() schedule = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=num_inference_steps, schedule=jnp.array(schedule, dtype=jnp.float32), timesteps=timesteps, ) def add_noise_to_input( self, state: KarrasVeSchedulerState, sample: jnp.ndarray, sigma: float, key: jax.Array, ) -> Tuple[jnp.ndarray, float]: """ Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a higher noise level sigma_hat = sigma_i + gamma_i*sigma_i. TODO Args: """ if self.config.s_min <= sigma <= self.config.s_max: gamma = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1) else: gamma = 0 # sample eps ~ N(0, S_noise^2 * I) key = random.split(key, num=1) eps = self.config.s_noise * random.normal(key=key, shape=sample.shape) sigma_hat = sigma + gamma * sigma sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def step( self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxKarrasVeOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. model_output (`torch.Tensor` or `np.ndarray`): direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.Tensor` or `np.ndarray`): TODO return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class Returns: [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] or `tuple`: Updated sample in the diffusion chain and derivative. [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ pred_original_sample = sample_hat + sigma_hat * model_output derivative = (sample_hat - pred_original_sample) / sigma_hat sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) def step_correct( self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, sample_prev: jnp.ndarray, derivative: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxKarrasVeOutput, Tuple]: """ Correct the predicted sample based on the output model_output of the network. TODO complete description Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. model_output (`torch.Tensor` or `np.ndarray`): direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.Tensor` or `np.ndarray`): TODO sample_prev (`torch.Tensor` or `np.ndarray`): TODO derivative (`torch.Tensor` or `np.ndarray`): TODO return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class Returns: prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO """ pred_original_sample = sample_prev + sigma_prev * model_output derivative_corr = (sample_prev - pred_original_sample) / sigma_prev sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) def add_noise(self, state: KarrasVeSchedulerState, original_samples, noise, timesteps): raise NotImplementedError()
diffusers/src/diffusers/schedulers/scheduling_karras_ve_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_karras_ve_flax.py", "repo_id": "diffusers", "token_count": 3948 }
152
import contextlib import copy import math import random from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np import torch from .models import UNet2DConditionModel from .schedulers import SchedulerMixin from .utils import ( convert_state_dict_to_diffusers, convert_state_dict_to_peft, deprecate, is_peft_available, is_torch_npu_available, is_torchvision_available, is_transformers_available, ) if is_transformers_available(): import transformers if is_peft_available(): from peft import set_peft_model_state_dict if is_torchvision_available(): from torchvision import transforms if is_torch_npu_available(): import torch_npu # noqa: F401 def set_seed(seed: int): """ Args: Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. seed (`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if is_torch_npu_available(): torch.npu.manual_seed_all(seed) else: torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available def compute_snr(noise_scheduler, timesteps): """ Computes SNR as per https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849 """ alphas_cumprod = noise_scheduler.alphas_cumprod sqrt_alphas_cumprod = alphas_cumprod**0.5 sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 # Expand the tensors. # Adapted from https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L1026 sqrt_alphas_cumprod = sqrt_alphas_cumprod.to(device=timesteps.device)[timesteps].float() while len(sqrt_alphas_cumprod.shape) < len(timesteps.shape): sqrt_alphas_cumprod = sqrt_alphas_cumprod[..., None] alpha = sqrt_alphas_cumprod.expand(timesteps.shape) sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod.to(device=timesteps.device)[timesteps].float() while len(sqrt_one_minus_alphas_cumprod.shape) < len(timesteps.shape): sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[..., None] sigma = sqrt_one_minus_alphas_cumprod.expand(timesteps.shape) # Compute SNR. snr = (alpha / sigma) ** 2 return snr def resolve_interpolation_mode(interpolation_type: str): """ Maps a string describing an interpolation function to the corresponding torchvision `InterpolationMode` enum. The full list of supported enums is documented at https://pytorch.org/vision/0.9/transforms.html#torchvision.transforms.functional.InterpolationMode. Args: interpolation_type (`str`): A string describing an interpolation method. Currently, `bilinear`, `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos` are supported, corresponding to the supported interpolation modes in torchvision. Returns: `torchvision.transforms.InterpolationMode`: an `InterpolationMode` enum used by torchvision's `resize` transform. """ if not is_torchvision_available(): raise ImportError( "Please make sure to install `torchvision` to be able to use the `resolve_interpolation_mode()` function." ) if interpolation_type == "bilinear": interpolation_mode = transforms.InterpolationMode.BILINEAR elif interpolation_type == "bicubic": interpolation_mode = transforms.InterpolationMode.BICUBIC elif interpolation_type == "box": interpolation_mode = transforms.InterpolationMode.BOX elif interpolation_type == "nearest": interpolation_mode = transforms.InterpolationMode.NEAREST elif interpolation_type == "nearest_exact": interpolation_mode = transforms.InterpolationMode.NEAREST_EXACT elif interpolation_type == "hamming": interpolation_mode = transforms.InterpolationMode.HAMMING elif interpolation_type == "lanczos": interpolation_mode = transforms.InterpolationMode.LANCZOS else: raise ValueError( f"The given interpolation mode {interpolation_type} is not supported. Currently supported interpolation" f" modes are `bilinear`, `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`." ) return interpolation_mode def compute_dream_and_update_latents( unet: UNet2DConditionModel, noise_scheduler: SchedulerMixin, timesteps: torch.Tensor, noise: torch.Tensor, noisy_latents: torch.Tensor, target: torch.Tensor, encoder_hidden_states: torch.Tensor, dream_detail_preservation: float = 1.0, ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]: """ Implements "DREAM (Diffusion Rectification and Estimation-Adaptive Models)" from http://arxiv.org/abs/2312.00210. DREAM helps align training with sampling to help training be more efficient and accurate at the cost of an extra forward step without gradients. Args: `unet`: The state unet to use to make a prediction. `noise_scheduler`: The noise scheduler used to add noise for the given timestep. `timesteps`: The timesteps for the noise_scheduler to user. `noise`: A tensor of noise in the shape of noisy_latents. `noisy_latents`: Previously noise latents from the training loop. `target`: The ground-truth tensor to predict after eps is removed. `encoder_hidden_states`: Text embeddings from the text model. `dream_detail_preservation`: A float value that indicates detail preservation level. See reference. Returns: `tuple[torch.Tensor, torch.Tensor]`: Adjusted noisy_latents and target. """ alphas_cumprod = noise_scheduler.alphas_cumprod.to(timesteps.device)[timesteps, None, None, None] sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 # The paper uses lambda = sqrt(1 - alpha) ** p, with p = 1 in their experiments. dream_lambda = sqrt_one_minus_alphas_cumprod**dream_detail_preservation pred = None with torch.no_grad(): pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample _noisy_latents, _target = (None, None) if noise_scheduler.config.prediction_type == "epsilon": predicted_noise = pred delta_noise = (noise - predicted_noise).detach() delta_noise.mul_(dream_lambda) _noisy_latents = noisy_latents.add(sqrt_one_minus_alphas_cumprod * delta_noise) _target = target.add(delta_noise) elif noise_scheduler.config.prediction_type == "v_prediction": raise NotImplementedError("DREAM has not been implemented for v-prediction") else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") return _noisy_latents, _target def unet_lora_state_dict(unet: UNet2DConditionModel) -> Dict[str, torch.Tensor]: r""" Returns: A state dict containing just the LoRA parameters. """ lora_state_dict = {} for name, module in unet.named_modules(): if hasattr(module, "set_lora_layer"): lora_layer = getattr(module, "lora_layer") if lora_layer is not None: current_lora_layer_sd = lora_layer.state_dict() for lora_layer_matrix_name, lora_param in current_lora_layer_sd.items(): # The matrix name can either be "down" or "up". lora_state_dict[f"{name}.lora.{lora_layer_matrix_name}"] = lora_param return lora_state_dict def cast_training_params(model: Union[torch.nn.Module, List[torch.nn.Module]], dtype=torch.float32): if not isinstance(model, list): model = [model] for m in model: for param in m.parameters(): # only upcast trainable parameters into fp32 if param.requires_grad: param.data = param.to(dtype) def _set_state_dict_into_text_encoder( lora_state_dict: Dict[str, torch.Tensor], prefix: str, text_encoder: torch.nn.Module ): """ Sets the `lora_state_dict` into `text_encoder` coming from `transformers`. Args: lora_state_dict: The state dictionary to be set. prefix: String identifier to retrieve the portion of the state dict that belongs to `text_encoder`. text_encoder: Where the `lora_state_dict` is to be set. """ text_encoder_state_dict = { f'{k.replace(prefix, "")}': v for k, v in lora_state_dict.items() if k.startswith(prefix) } text_encoder_state_dict = convert_state_dict_to_peft(convert_state_dict_to_diffusers(text_encoder_state_dict)) set_peft_model_state_dict(text_encoder, text_encoder_state_dict, adapter_name="default") def compute_density_for_timestep_sampling( weighting_scheme: str, batch_size: int, logit_mean: float = None, logit_std: float = None, mode_scale: float = None ): """Compute the density for sampling the timesteps when doing SD3 training. Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. SD3 paper reference: https://arxiv.org/abs/2403.03206v1. """ if weighting_scheme == "logit_normal": # See 3.1 in the SD3 paper ($rf/lognorm(0.00,1.00)$). u = torch.normal(mean=logit_mean, std=logit_std, size=(batch_size,), device="cpu") u = torch.nn.functional.sigmoid(u) elif weighting_scheme == "mode": u = torch.rand(size=(batch_size,), device="cpu") u = 1 - u - mode_scale * (torch.cos(math.pi * u / 2) ** 2 - 1 + u) else: u = torch.rand(size=(batch_size,), device="cpu") return u def compute_loss_weighting_for_sd3(weighting_scheme: str, sigmas=None): """Computes loss weighting scheme for SD3 training. Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. SD3 paper reference: https://arxiv.org/abs/2403.03206v1. """ if weighting_scheme == "sigma_sqrt": weighting = (sigmas**-2.0).float() elif weighting_scheme == "cosmap": bot = 1 - 2 * sigmas + 2 * sigmas**2 weighting = 2 / (math.pi * bot) else: weighting = torch.ones_like(sigmas) return weighting # Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14 class EMAModel: """ Exponential Moving Average of models weights """ def __init__( self, parameters: Iterable[torch.nn.Parameter], decay: float = 0.9999, min_decay: float = 0.0, update_after_step: int = 0, use_ema_warmup: bool = False, inv_gamma: Union[float, int] = 1.0, power: Union[float, int] = 2 / 3, foreach: bool = False, model_cls: Optional[Any] = None, model_config: Dict[str, Any] = None, **kwargs, ): """ Args: parameters (Iterable[torch.nn.Parameter]): The parameters to track. decay (float): The decay factor for the exponential moving average. min_decay (float): The minimum decay factor for the exponential moving average. update_after_step (int): The number of steps to wait before starting to update the EMA weights. use_ema_warmup (bool): Whether to use EMA warmup. inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1. Only used if `use_ema_warmup` is True. power (float): Exponential factor of EMA warmup. Default: 2/3. Only used if `use_ema_warmup` is True. foreach (bool): Use torch._foreach functions for updating shadow parameters. Should be faster. device (Optional[Union[str, torch.device]]): The device to store the EMA weights on. If None, the EMA weights will be stored on CPU. @crowsonkb's notes on EMA Warmup: If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are good values for models you plan to train for a million or more steps (reaches decay factor 0.999 at 31.6K steps, 0.9999 at 1M steps), gamma=1, power=3/4 for models you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at 215.4k steps). """ if isinstance(parameters, torch.nn.Module): deprecation_message = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage`", "1.0.0", deprecation_message, standard_warn=False, ) parameters = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility use_ema_warmup = True if kwargs.get("max_value", None) is not None: deprecation_message = "The `max_value` argument is deprecated. Please use `decay` instead." deprecate("max_value", "1.0.0", deprecation_message, standard_warn=False) decay = kwargs["max_value"] if kwargs.get("min_value", None) is not None: deprecation_message = "The `min_value` argument is deprecated. Please use `min_decay` instead." deprecate("min_value", "1.0.0", deprecation_message, standard_warn=False) min_decay = kwargs["min_value"] parameters = list(parameters) self.shadow_params = [p.clone().detach() for p in parameters] if kwargs.get("device", None) is not None: deprecation_message = "The `device` argument is deprecated. Please use `to` instead." deprecate("device", "1.0.0", deprecation_message, standard_warn=False) self.to(device=kwargs["device"]) self.temp_stored_params = None self.decay = decay self.min_decay = min_decay self.update_after_step = update_after_step self.use_ema_warmup = use_ema_warmup self.inv_gamma = inv_gamma self.power = power self.optimization_step = 0 self.cur_decay_value = None # set in `step()` self.foreach = foreach self.model_cls = model_cls self.model_config = model_config @classmethod def from_pretrained(cls, path, model_cls, foreach=False) -> "EMAModel": _, ema_kwargs = model_cls.load_config(path, return_unused_kwargs=True) model = model_cls.from_pretrained(path) ema_model = cls(model.parameters(), model_cls=model_cls, model_config=model.config, foreach=foreach) ema_model.load_state_dict(ema_kwargs) return ema_model def save_pretrained(self, path): if self.model_cls is None: raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__.") if self.model_config is None: raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__.") model = self.model_cls.from_config(self.model_config) state_dict = self.state_dict() state_dict.pop("shadow_params", None) model.register_to_config(**state_dict) self.copy_to(model.parameters()) model.save_pretrained(path) def get_decay(self, optimization_step: int) -> float: """ Compute the decay factor for the exponential moving average. """ step = max(0, optimization_step - self.update_after_step - 1) if step <= 0: return 0.0 if self.use_ema_warmup: cur_decay_value = 1 - (1 + step / self.inv_gamma) ** -self.power else: cur_decay_value = (1 + step) / (10 + step) cur_decay_value = min(cur_decay_value, self.decay) # make sure decay is not smaller than min_decay cur_decay_value = max(cur_decay_value, self.min_decay) return cur_decay_value @torch.no_grad() def step(self, parameters: Iterable[torch.nn.Parameter]): if isinstance(parameters, torch.nn.Module): deprecation_message = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage.step`", "1.0.0", deprecation_message, standard_warn=False, ) parameters = parameters.parameters() parameters = list(parameters) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. decay = self.get_decay(self.optimization_step) self.cur_decay_value = decay one_minus_decay = 1 - decay context_manager = contextlib.nullcontext if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): import deepspeed if self.foreach: if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): context_manager = deepspeed.zero.GatheredParameters(parameters, modifier_rank=None) with context_manager(): params_grad = [param for param in parameters if param.requires_grad] s_params_grad = [ s_param for s_param, param in zip(self.shadow_params, parameters) if param.requires_grad ] if len(params_grad) < len(parameters): torch._foreach_copy_( [s_param for s_param, param in zip(self.shadow_params, parameters) if not param.requires_grad], [param for param in parameters if not param.requires_grad], non_blocking=True, ) torch._foreach_sub_( s_params_grad, torch._foreach_sub(s_params_grad, params_grad), alpha=one_minus_decay ) else: for s_param, param in zip(self.shadow_params, parameters): if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): context_manager = deepspeed.zero.GatheredParameters(param, modifier_rank=None) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param)) else: s_param.copy_(param) def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: """ Copy current averaged parameters into given collection of parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored moving averages. If `None`, the parameters with which this `ExponentialMovingAverage` was initialized will be used. """ parameters = list(parameters) if self.foreach: torch._foreach_copy_( [param.data for param in parameters], [s_param.to(param.device).data for s_param, param in zip(self.shadow_params, parameters)], ) else: for s_param, param in zip(self.shadow_params, parameters): param.data.copy_(s_param.to(param.device).data) def pin_memory(self) -> None: r""" Move internal buffers of the ExponentialMovingAverage to pinned memory. Useful for non-blocking transfers for offloading EMA params to the host. """ self.shadow_params = [p.pin_memory() for p in self.shadow_params] def to(self, device=None, dtype=None, non_blocking=False) -> None: r"""Move internal buffers of the ExponentialMovingAverage to `device`. Args: device: like `device` argument to `torch.Tensor.to` """ # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype, non_blocking=non_blocking) if p.is_floating_point() else p.to(device=device, non_blocking=non_blocking) for p in self.shadow_params ] def state_dict(self) -> dict: r""" Returns the state of the ExponentialMovingAverage as a dict. This method is used by accelerate during checkpointing to save the ema state dict. """ # Following PyTorch conventions, references to tensors are returned: # "returns a reference to the state and not its copy!" - # https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def store(self, parameters: Iterable[torch.nn.Parameter]) -> None: r""" Args: Save the current parameters for restoring later. parameters: Iterable of `torch.nn.Parameter`; the parameters to be temporarily stored. """ self.temp_stored_params = [param.detach().cpu().clone() for param in parameters] def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None: r""" Args: Restore the parameters stored with the `store` method. Useful to validate the model with EMA parameters without: affecting the original optimization process. Store the parameters before the `copy_to()` method. After validation (or model saving), use this to restore the former parameters. parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored parameters. If `None`, the parameters with which this `ExponentialMovingAverage` was initialized will be used. """ if self.temp_stored_params is None: raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`") if self.foreach: torch._foreach_copy_( [param.data for param in parameters], [c_param.data for c_param in self.temp_stored_params] ) else: for c_param, param in zip(self.temp_stored_params, parameters): param.data.copy_(c_param.data) # Better memory-wise. self.temp_stored_params = None def load_state_dict(self, state_dict: dict) -> None: r""" Args: Loads the ExponentialMovingAverage state. This method is used by accelerate during checkpointing to save the ema state dict. state_dict (dict): EMA state. Should be an object returned from a call to :meth:`state_dict`. """ # deepcopy, to be consistent with module API state_dict = copy.deepcopy(state_dict) self.decay = state_dict.get("decay", self.decay) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("Decay must be between 0 and 1") self.min_decay = state_dict.get("min_decay", self.min_decay) if not isinstance(self.min_decay, float): raise ValueError("Invalid min_decay") self.optimization_step = state_dict.get("optimization_step", self.optimization_step) if not isinstance(self.optimization_step, int): raise ValueError("Invalid optimization_step") self.update_after_step = state_dict.get("update_after_step", self.update_after_step) if not isinstance(self.update_after_step, int): raise ValueError("Invalid update_after_step") self.use_ema_warmup = state_dict.get("use_ema_warmup", self.use_ema_warmup) if not isinstance(self.use_ema_warmup, bool): raise ValueError("Invalid use_ema_warmup") self.inv_gamma = state_dict.get("inv_gamma", self.inv_gamma) if not isinstance(self.inv_gamma, (float, int)): raise ValueError("Invalid inv_gamma") self.power = state_dict.get("power", self.power) if not isinstance(self.power, (float, int)): raise ValueError("Invalid power") shadow_params = state_dict.get("shadow_params", None) if shadow_params is not None: self.shadow_params = shadow_params if not isinstance(self.shadow_params, list): raise ValueError("shadow_params must be a list") if not all(isinstance(p, torch.Tensor) for p in self.shadow_params): raise ValueError("shadow_params must all be Tensors")
diffusers/src/diffusers/training_utils.py/0
{ "file_path": "diffusers/src/diffusers/training_utils.py", "repo_id": "diffusers", "token_count": 10653 }
153
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class KolorsImg2ImgPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "sentencepiece"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"]) class KolorsPAGPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "sentencepiece"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"]) class KolorsPipeline(metaclass=DummyObject): _backends = ["torch", "transformers", "sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "transformers", "sentencepiece"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "transformers", "sentencepiece"])
diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py", "repo_id": "diffusers", "token_count": 636 }
154
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for working with package versions """ import importlib.metadata import operator import re import sys from typing import Optional from packaging import version ops = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): if got_ver is None or want_ver is None: raise ValueError( f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" f" reinstalling {pkg}." ) if not ops[op](version.parse(got_ver), version.parse(want_ver)): raise ImportError( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def require_version(requirement: str, hint: Optional[str] = None) -> None: """ Perform a runtime check of the dependency versions, using the exact same syntax used by pip. The installed module version comes from the *site-packages* dir via *importlib.metadata*. Args: requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" hint (`str`, *optional*): what suggestion to print in case of requirements not being met Example: ```python require_version("pandas>1.1.2") require_version("numpy>1.18.5", "this is important to have for whatever reason") ```""" hint = f"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$", requirement): pkg, op, want_ver = requirement, None, None else: match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" f" got {requirement}" ) pkg, want_full = match[0] want_range = want_full.split(",") # there could be multiple requirements wanted = {} for w in want_range: match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," f" but got {requirement}" ) op, want_ver = match[0] wanted[op] = want_ver if op not in ops: raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") # special case if pkg == "python": got_ver = ".".join([str(x) for x in sys.version_info[:3]]) for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) return # check if any version is installed try: got_ver = importlib.metadata.version(pkg) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) def require_version_core(requirement): """require_version wrapper which emits a core-specific hint on failure""" hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(requirement, hint)
diffusers/src/diffusers/utils/versions.py/0
{ "file_path": "diffusers/src/diffusers/utils/versions.py", "repo_id": "diffusers", "token_count": 1699 }
155
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import VQModel from diffusers.utils.testing_utils import ( backend_manual_seed, enable_full_determinism, floats_tensor, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class VQModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = VQModel main_input_name = "sample" @property def dummy_input(self, sizes=(32, 32)): batch_size = 4 num_channels = 3 image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": [8, 16], "norm_num_groups": 8, "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_forward_signature(self): pass def test_training(self): pass def test_from_pretrained_hub(self): model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def test_output_pretrained(self): model = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(torch_device).eval() torch.manual_seed(0) backend_manual_seed(torch_device, 0) image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) image = image.to(torch_device) with torch.no_grad(): output = model(image).sample output_slice = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143]) # fmt: on self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) def test_loss_pretrained(self): model = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(torch_device).eval() torch.manual_seed(0) backend_manual_seed(torch_device, 0) image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) image = image.to(torch_device) with torch.no_grad(): output = model(image).commit_loss.cpu() # fmt: off expected_output = torch.tensor([0.1936]) # fmt: on self.assertTrue(torch.allclose(output, expected_output, atol=1e-3))
diffusers/tests/models/autoencoders/test_models_vq.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_vq.py", "repo_id": "diffusers", "token_count": 1581 }
156
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import SD3Transformer2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class SD3TransformerTests(ModelTesterMixin, unittest.TestCase): model_class = SD3Transformer2DModel main_input_name = "hidden_states" @property def dummy_input(self): batch_size = 2 num_channels = 4 height = width = embedding_dim = 32 pooled_embedding_dim = embedding_dim * 2 sequence_length = 154 hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) pooled_prompt_embeds = torch.randn((batch_size, pooled_embedding_dim)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_prompt_embeds, "timestep": timestep, } @property def input_shape(self): return (4, 32, 32) @property def output_shape(self): return (4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 32, "patch_size": 1, "in_channels": 4, "num_layers": 1, "attention_head_dim": 8, "num_attention_heads": 4, "caption_projection_dim": 32, "joint_attention_dim": 32, "pooled_projection_dim": 64, "out_channels": 4, } inputs_dict = self.dummy_input return init_dict, inputs_dict @unittest.skip("SD3Transformer2DModel uses a dedicated attention processor. This test doesn't apply") def test_set_attn_processor_for_determinism(self): pass
diffusers/tests/models/transformers/test_models_transformer_sd3.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_sd3.py", "repo_id": "diffusers", "token_count": 1072 }
157
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from importlib import import_module class DependencyTester(unittest.TestCase): def test_diffusers_import(self): try: import diffusers # noqa: F401 except ImportError: assert False def test_backend_registration(self): import diffusers from diffusers.dependency_versions_table import deps all_classes = inspect.getmembers(diffusers, inspect.isclass) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": backend = "k-diffusion" elif backend == "invisible_watermark": backend = "invisible-watermark" assert backend in deps, f"{backend} is not in the deps table!" def test_pipeline_imports(self): import diffusers import diffusers.pipelines all_classes = inspect.getmembers(diffusers, inspect.isclass) for cls_name, cls_module in all_classes: if hasattr(diffusers.pipelines, cls_name): pipeline_folder_module = ".".join(str(cls_module.__module__).split(".")[:3]) _ = import_module(pipeline_folder_module, str(cls_name))
diffusers/tests/others/test_dependencies.py/0
{ "file_path": "diffusers/tests/others/test_dependencies.py", "repo_id": "diffusers", "token_count": 775 }
158
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer import diffusers from diffusers import ( AnimateDiffSDXLPipeline, AutoencoderKL, DDIMScheduler, MotionAdapter, UNet2DConditionModel, UNetMotionModel, ) from diffusers.utils import is_xformers_available, logging from diffusers.utils.testing_utils import torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineTesterMixin, SDFunctionTesterMixin, SDXLOptionalComponentsTesterMixin, ) def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class AnimateDiffPipelineSDXLFastTests( IPAdapterTesterMixin, SDFunctionTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = AnimateDiffSDXLPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64, 128), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4, 8), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2, 4), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, norm_num_groups=1, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", clip_sample=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") motion_adapter = MotionAdapter( block_out_channels=(32, 64, 128), motion_layers_per_block=2, motion_norm_num_groups=2, motion_num_attention_heads=4, use_motion_mid_block=False, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "motion_adapter": motion_adapter, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_motion_unet_loading(self): components = self.get_dummy_components() pipe = AnimateDiffSDXLPipeline(**components) assert isinstance(pipe.unet, UNetMotionModel) @unittest.skip("Attention slicing is not enabled in this pipeline") def test_attention_slicing_forward_pass(self): pass def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_prompt_embeds(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) prompt = inputs.pop("prompt") ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = pipe.encode_prompt(prompt) pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) def test_save_load_optional_components(self): self._test_save_load_optional_components() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs).frames[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs).frames[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results")
diffusers/tests/pipelines/animatediff/test_animatediff_sdxl.py/0
{ "file_path": "diffusers/tests/pipelines/animatediff/test_animatediff_sdxl.py", "repo_id": "diffusers", "token_count": 5298 }
159
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import traceback import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, EulerDiscreteScheduler, LCMScheduler, StableDiffusionControlNetPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, get_python_version, is_torch_compile, load_image, load_numpy, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() # Will be run via run_test_in_subprocess def _test_stable_diffusion_compile(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.to("cuda") pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.controlnet.to(memory_format=torch.channels_last) pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe(prompt, image, num_inference_steps=10, generator=generator, output_type="np") image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out_full.npy" ) expected_image = np.resize(expected_image, (512, 512, 3)) assert np.abs(expected_image - image).max() < 1.0 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class ControlNetPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, time_cond_proj_dim=time_cond_proj_dim, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) def test_ip_adapter_single(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.5234, 0.3333, 0.1745, 0.7605, 0.6224, 0.4637, 0.6989, 0.7526, 0.4665]) return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_controlnet_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_controlnet_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionMultiControlNetPipelineFastTests( IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal_(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_ip_adapter_single(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.2422, 0.3425, 0.4048, 0.5351, 0.3503, 0.2419, 0.4645, 0.4570, 0.3804]) return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass def test_inference_multiple_prompt_input(self): device = "cpu" components = self.get_dummy_components() sd_pipe = StableDiffusionControlNetPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"], inputs["prompt"]] inputs["image"] = [inputs["image"], inputs["image"]] output = sd_pipe(**inputs) image = output.images assert image.shape == (2, 64, 64, 3) image_1, image_2 = image # make sure that the outputs are different assert np.sum(np.abs(image_1 - image_2)) > 1e-3 # multiple prompts, single image conditioning inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"], inputs["prompt"]] output_1 = sd_pipe(**inputs) assert np.abs(image - output_1.images).max() < 1e-3 # multiple prompts, multiple image conditioning inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"], inputs["prompt"], inputs["prompt"], inputs["prompt"]] inputs["image"] = [inputs["image"], inputs["image"], inputs["image"], inputs["image"]] output_2 = sd_pipe(**inputs) image = output_2.images assert image.shape == (4, 64, 64, 3) class StableDiffusionMultiControlNetOneModelPipelineFastTests( IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal_(m.weight) m.bias.data.fill_(1.0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe( **inputs, control_guidance_start=[0.1], control_guidance_end=[0.2], )[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_ip_adapter_single(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.5264, 0.3203, 0.1602, 0.8235, 0.6332, 0.4593, 0.7226, 0.7777, 0.4780]) return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_depth(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-depth") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth_out.npy" ) assert np.abs(expected_image - image).max() < 8e-1 def test_hed(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-hed") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "oil painting of handsome old man, masterpiece" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (704, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_mlsd(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-mlsd") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "room" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (704, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2 def test_normal(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-normal") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "cute toy" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2 def test_openpose(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Chef in the kitchen" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/chef_pose_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_scribble(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(5) prompt = "bag" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (640, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_seg(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(5) prompt = "house" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() prompt = "house" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" ) _ = pipe( prompt, image, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 4 * 10**9 def test_canny_guess_mode(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=3.0, guess_mode=True, ) image = output.images[0] assert image.shape == (768, 512, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.2724, 0.2846, 0.2724, 0.3843, 0.3682, 0.2736, 0.4675, 0.3862, 0.2887]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_canny_guess_mode_euler(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=3.0, guess_mode=True, ) image = output.images[0] assert image.shape == (768, 512, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.1655, 0.1721, 0.1623, 0.1685, 0.1711, 0.1646, 0.1651, 0.1631, 0.1494]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @is_torch_compile @require_torch_2 @unittest.skipIf( get_python_version == (3, 12), reason="Torch Dynamo isn't yet supported for Python 3.12.", ) def test_stable_diffusion_compile(self): run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None) def test_v11_shuffle_global_pool_conditions(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_shuffle") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "New York" image = load_image( "https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=7.0, ) image = output.images[0] assert image.shape == (512, 640, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.1338, 0.1597, 0.1202, 0.1687, 0.1377, 0.1017, 0.2070, 0.1574, 0.1348]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_pose_and_canny(self): controlnet_canny = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") controlnet_pose = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=[controlnet_pose, controlnet_canny] ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird and Chef" image_canny = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) image_pose = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) output = pipe(prompt, [image_pose, image_canny], generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose_canny_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2
diffusers/tests/pipelines/controlnet/test_controlnet.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet/test_controlnet.py", "repo_id": "diffusers", "token_count": 18979 }
160
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import traceback import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderTiny, ConsistencyDecoderVAE, ControlNetXSAdapter, DDIMScheduler, LCMScheduler, StableDiffusionControlNetXSPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, is_torch_compile, load_image, load_numpy, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ...models.autoencoders.test_models_vae import ( get_asym_autoencoder_kl_config, get_autoencoder_kl_config, get_autoencoder_tiny_config, get_consistency_vae_config, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDFunctionTesterMixin, ) enable_full_determinism() def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor # Will be run via run_test_in_subprocess def _test_stable_diffusion_compile(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) controlnet = ControlNetXSAdapter.from_pretrained( "UmerHA/Testing-ConrolNetXS-SD2.1-canny", torch_dtype=torch.float16 ) pipe = StableDiffusionControlNetXSPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16, ) pipe.to("cuda") pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe(prompt, image, num_inference_steps=10, generator=generator, output_type="np") image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out_full.npy" ) expected_image = np.resize(expected_image, (512, 512, 3)) assert np.abs(expected_image - image).max() < 1.0 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class ControlNetXSPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDFunctionTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionControlNetXSPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS test_attention_slicing = False def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=16, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=8, norm_num_groups=4, time_cond_proj_dim=time_cond_proj_dim, use_linear_projection=True, ) torch.manual_seed(0) controlnet = ControlNetXSAdapter.from_unet( unet=unet, size_ratio=1, learn_time_embedding=True, conditioning_embedding_out_channels=(2, 2), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=8, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 8 * controlnet_embedder_scale_factor, 8 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_controlnet_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=8) sd_pipe = StableDiffusionControlNetXSPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) expected_slice = np.array([0.745, 0.753, 0.767, 0.543, 0.523, 0.502, 0.314, 0.521, 0.478]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new UNetControlNetXSModel under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_multi_vae(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) block_out_channels = pipe.vae.config.block_out_channels norm_num_groups = pipe.vae.config.norm_num_groups vae_classes = [AutoencoderKL, AsymmetricAutoencoderKL, ConsistencyDecoderVAE, AutoencoderTiny] configs = [ get_autoencoder_kl_config(block_out_channels, norm_num_groups), get_asym_autoencoder_kl_config(block_out_channels, norm_num_groups), get_consistency_vae_config(block_out_channels, norm_num_groups), get_autoencoder_tiny_config(block_out_channels), ] out_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] for vae_cls, config in zip(vae_classes, configs): vae = vae_cls(**config) vae = vae.to(torch_device) components["vae"] = vae vae_pipe = self.pipeline_class(**components) # pipeline creates a new UNetControlNetXSModel under the hood, which aren't on device. # So we need to move the new pipe to device. vae_pipe.to(torch_device) vae_pipe.set_progress_bar_config(disable=None) out_vae_np = vae_pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] assert out_vae_np.shape == out_np.shape @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") # pipeline creates a new UNetControlNetXSModel under the hood. So we need to check the device from pipe.components model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) @slow @require_torch_gpu class ControlNetXSPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetXSAdapter.from_pretrained( "UmerHA/Testing-ConrolNetXS-SD2.1-canny", torch_dtype=torch.float16 ) pipe = StableDiffusionControlNetXSPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", controlnet=controlnet, torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) original_image = image[-3:, -3:, -1].flatten() expected_image = np.array([0.1963, 0.229, 0.2659, 0.2109, 0.2332, 0.2827, 0.2534, 0.2422, 0.2808]) assert np.allclose(original_image, expected_image, atol=1e-04) def test_depth(self): controlnet = ControlNetXSAdapter.from_pretrained( "UmerHA/Testing-ConrolNetXS-SD2.1-depth", torch_dtype=torch.float16 ) pipe = StableDiffusionControlNetXSPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", controlnet=controlnet, torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) original_image = image[-3:, -3:, -1].flatten() expected_image = np.array([0.4844, 0.4937, 0.4956, 0.4663, 0.5039, 0.5044, 0.4565, 0.4883, 0.4941]) assert np.allclose(original_image, expected_image, atol=1e-04) @is_torch_compile @require_torch_2 def test_stable_diffusion_compile(self): run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None)
diffusers/tests/pipelines/controlnet_xs/test_controlnetxs.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet_xs/test_controlnetxs.py", "repo_id": "diffusers", "token_count": 6576 }
161
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DiTTransformer2DModel, DPMSolverMultistepScheduler from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DiTPipeline params = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } batch_params = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) transformer = DiTTransformer2DModel( sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_zero", norm_elementwise_affine=False, ) vae = AutoencoderKL() scheduler = DDIMScheduler() components = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "np", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3)) expected_slice = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @nightly @require_torch_gpu class DiTPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_dit_256(self): generator = torch.manual_seed(0) pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256") pipe.to("cuda") words = ["vase", "umbrella", "white shark", "white wolf"] ids = pipe.get_label_ids(words) images = pipe(ids, generator=generator, num_inference_steps=40, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max()) < 1e-2 def test_dit_512(self): pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") words = ["vase", "umbrella"] ids = pipe.get_label_ids(words) generator = torch.manual_seed(0) images = pipe(ids, generator=generator, num_inference_steps=25, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max()) < 1e-1
diffusers/tests/pipelines/dit/test_dit.py/0
{ "file_path": "diffusers/tests/pipelines/dit/test_dit.py", "repo_id": "diffusers", "token_count": 2394 }
162
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import ( KandinskyV22CombinedPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22InpaintCombinedPipeline, ) from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies from .test_kandinsky_img2img import Dummies as Img2ImgDummies from .test_kandinsky_inpaint import Dummies as InpaintDummies from .test_kandinsky_prior import Dummies as PriorDummies enable_full_determinism() class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22CombinedPipeline params = [ "prompt", ] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = True callback_cfg_params = ["image_embds"] def get_dummy_components(self): dummy = Dummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update( { "height": 64, "width": 64, } ) return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3076, 0.2729, 0.5668, 0.0522, 0.3384, 0.7028, 0.4908, 0.3659, 0.6243]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-3) def test_callback_inputs(self): pass def test_callback_cfg(self): pass class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Img2ImgCombinedPipeline params = ["prompt", "image"] batch_params = ["prompt", "negative_prompt", "image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embds"] def get_dummy_components(self): dummy = Img2ImgDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = Img2ImgDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4445, 0.4287, 0.4596, 0.3919, 0.3730, 0.5039, 0.4834, 0.4269, 0.5521]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_callback_inputs(self): pass def test_callback_cfg(self): pass class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintCombinedPipeline params = ["prompt", "image", "mask_image"] batch_params = ["prompt", "negative_prompt", "image", "mask_image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = InpaintDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = InpaintDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_callback_inputs(self): pass def test_callback_cfg(self): pass
diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py", "repo_id": "diffusers", "token_count": 6108 }
163
import inspect import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AnimateDiffPAGPipeline, AnimateDiffPipeline, AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LCMScheduler, MotionAdapter, StableDiffusionPipeline, UNet2DConditionModel, UNetMotionModel, ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin, SDFunctionTesterMixin, ) def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class AnimateDiffPAGPipelineFastTests( IPAdapterTesterMixin, SDFunctionTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase ): pipeline_class = AnimateDiffPAGPipeline params = TEXT_TO_IMAGE_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) def get_dummy_components(self): cross_attention_dim = 8 block_out_channels = (8, 8) torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=block_out_channels, layers_per_block=2, sample_size=8, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=cross_attention_dim, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", clip_sample=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=block_out_channels, in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=cross_attention_dim, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") motion_adapter = MotionAdapter( block_out_channels=block_out_channels, motion_layers_per_block=2, motion_norm_num_groups=2, motion_num_attention_heads=4, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "motion_adapter": motion_adapter, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "pag_scale": 3.0, "output_type": "pt", } return inputs def test_from_pipe_consistent_config(self): assert self.original_pipeline_class == StableDiffusionPipeline original_repo = "hf-internal-testing/tinier-stable-diffusion-pipe" original_kwargs = {"requires_safety_checker": False} # create original_pipeline_class(sd) pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) # original_pipeline_class(sd) -> pipeline_class pipe_components = self.get_dummy_components() pipe_additional_components = {} for name, component in pipe_components.items(): if name not in pipe_original.components: pipe_additional_components[name] = component pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) # pipeline_class -> original_pipeline_class(sd) original_pipe_additional_components = {} for name, component in pipe_original.components.items(): if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): original_pipe_additional_components[name] = component pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) # compare the config original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} assert original_config_2 == original_config def test_motion_unet_loading(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) assert isinstance(pipe.unet, UNetMotionModel) @unittest.skip("Attention slicing is not enabled in this pipeline") def test_attention_slicing_forward_pass(self): pass def test_ip_adapter_single(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array( [ 0.5068, 0.5294, 0.4926, 0.4810, 0.4188, 0.5935, 0.5295, 0.3947, 0.5300, 0.4706, 0.3950, 0.4737, 0.4072, 0.3227, 0.5481, 0.4864, 0.4518, 0.5315, 0.5979, 0.5374, 0.3503, 0.5275, 0.6067, 0.4914, 0.5440, 0.4775, 0.5538, ] ) return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice) def test_dict_tuple_outputs_equivalent(self): expected_slice = None if torch_device == "cpu": expected_slice = np.array([0.5295, 0.3947, 0.5300, 0.4864, 0.4518, 0.5315, 0.5440, 0.4775, 0.5538]) return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_prompt_embeds(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) inputs.pop("prompt") inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) pipe(**inputs) def test_free_init(self): components = self.get_dummy_components() pipe: AnimateDiffPAGPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device) frames_normal = pipe(**inputs_normal).frames[0] pipe.enable_free_init( num_iters=2, use_fast_sampling=True, method="butterworth", order=4, spatial_stop_frequency=0.25, temporal_stop_frequency=0.25, ) inputs_enable_free_init = self.get_dummy_inputs(torch_device) frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] pipe.disable_free_init() inputs_disable_free_init = self.get_dummy_inputs(torch_device) frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() self.assertGreater( sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results" ) self.assertLess( max_diff_disabled, 1e-3, "Disabling of FreeInit should lead to results similar to the default pipeline results", ) def test_free_init_with_schedulers(self): components = self.get_dummy_components() pipe: AnimateDiffPAGPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device) frames_normal = pipe(**inputs_normal).frames[0] schedulers_to_test = [ DPMSolverMultistepScheduler.from_config( components["scheduler"].config, timestep_spacing="linspace", beta_schedule="linear", algorithm_type="dpmsolver++", steps_offset=1, clip_sample=False, ), LCMScheduler.from_config( components["scheduler"].config, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1, clip_sample=False, ), ] components.pop("scheduler") for scheduler in schedulers_to_test: components["scheduler"] = scheduler pipe: AnimateDiffPAGPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) pipe.enable_free_init(num_iters=2, use_fast_sampling=False) inputs = self.get_dummy_inputs(torch_device) frames_enable_free_init = pipe(**inputs).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() self.assertGreater( sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results", ) def test_free_noise_blocks(self): components = self.get_dummy_components() pipe: AnimateDiffPAGPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) pipe.enable_free_noise() for block in pipe.unet.down_blocks: for motion_module in block.motion_modules: for transformer_block in motion_module.transformer_blocks: self.assertTrue( isinstance(transformer_block, FreeNoiseTransformerBlock), "Motion module transformer blocks must be an instance of `FreeNoiseTransformerBlock` after enabling FreeNoise.", ) pipe.disable_free_noise() for block in pipe.unet.down_blocks: for motion_module in block.motion_modules: for transformer_block in motion_module.transformer_blocks: self.assertFalse( isinstance(transformer_block, FreeNoiseTransformerBlock), "Motion module transformer blocks must not be an instance of `FreeNoiseTransformerBlock` after disabling FreeNoise.", ) def test_free_noise(self): components = self.get_dummy_components() pipe: AnimateDiffPAGPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device) frames_normal = pipe(**inputs_normal).frames[0] for context_length in [8, 9]: for context_stride in [4, 6]: pipe.enable_free_noise(context_length, context_stride) inputs_enable_free_noise = self.get_dummy_inputs(torch_device) frames_enable_free_noise = pipe(**inputs_enable_free_noise).frames[0] pipe.disable_free_noise() inputs_disable_free_noise = self.get_dummy_inputs(torch_device) frames_disable_free_noise = pipe(**inputs_disable_free_noise).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_noise)).sum() max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_noise)).max() self.assertGreater( sum_enabled, 1e1, "Enabling of FreeNoise should lead to results different from the default pipeline results", ) self.assertLess( max_diff_disabled, 1e-4, "Disabling of FreeNoise should lead to results similar to the default pipeline results", ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs).frames[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs).frames[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") def test_vae_slicing(self): return super().test_vae_slicing(image_count=2) def test_pag_disable_enable(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # base pipeline (expect same output when pag is disabled) components.pop("pag_applied_layers", None) pipe_sd = AnimateDiffPipeline(**components) pipe_sd = pipe_sd.to(device) pipe_sd.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] assert ( "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).frames[0, -3:, -3:, -1] components = self.get_dummy_components() # pag disabled with pag_scale=0.0 pipe_pag = self.pipeline_class(**components) pipe_pag = pipe_pag.to(device) pipe_pag.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["pag_scale"] = 0.0 out_pag_disabled = pipe_pag(**inputs).frames[0, -3:, -3:, -1] # pag enabled pipe_pag = self.pipeline_class(**components) pipe_pag = pipe_pag.to(device) pipe_pag.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) out_pag_enabled = pipe_pag(**inputs).frames[0, -3:, -3:, -1] assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 def test_pag_applied_layers(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # base pipeline components.pop("pag_applied_layers", None) pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) # pag_applied_layers = ["mid","up","down"] should apply to all self-attention layers # Note that for motion modules in AnimateDiff, both attn1 and attn2 are self-attention all_self_attn_layers = [ k for k in pipe.unet.attn_processors.keys() if "attn1" in k or ("motion_modules" in k and "attn2" in k) ] original_attn_procs = pipe.unet.attn_processors pag_layers = [ "down", "mid", "up", ] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) # pag_applied_layers = ["mid"], or ["mid_block.0"] should apply to all self-attention layers in mid_block, i.e. # mid_block.motion_modules.0.transformer_blocks.0.attn1.processor # mid_block.attentions.0.transformer_blocks.0.attn1.processor all_self_attn_mid_layers = [ "mid_block.attentions.0.transformer_blocks.0.attn1.processor", "mid_block.motion_modules.0.transformer_blocks.0.attn1.processor", "mid_block.motion_modules.0.transformer_blocks.0.attn2.processor", ] pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["mid"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["mid_block"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["mid_block.(attentions|motion_modules)"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["mid_block.attentions.1"] with self.assertRaises(ValueError): pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) # pag_applied_layers = "down" should apply to all self-attention layers in down_blocks # down_blocks.1.(attentions|motion_modules).0.transformer_blocks.0.attn1.processor # down_blocks.1.(attentions|motion_modules).0.transformer_blocks.1.attn1.processor # down_blocks.1.(attentions|motion_modules).0.transformer_blocks.0.attn1.processor pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["down"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert len(pipe.pag_attn_processors) == 10 pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["down_blocks.0"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert (len(pipe.pag_attn_processors)) == 6 pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.1"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert len(pipe.pag_attn_processors) == 10 pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["motion_modules.42"] with self.assertRaises(ValueError): pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False)
diffusers/tests/pipelines/pag/test_pag_animatediff.py/0
{ "file_path": "diffusers/tests/pipelines/pag/test_pag_animatediff.py", "repo_id": "diffusers", "token_count": 10886 }
164
# These are canonical sets of parameters for different types of pipelines. # They are set on subclasses of `PipelineTesterMixin` as `params` and # `batch_params`. # # If a pipeline's set of arguments has minor changes from one of the common sets # of arguments, do not make modifications to the existing common sets of arguments. # I.e. a text to image pipeline with non-configurable height and width arguments # should set its attribute as `params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. TEXT_TO_IMAGE_PARAMS = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) TEXT_TO_IMAGE_BATCH_PARAMS = frozenset(["prompt", "negative_prompt"]) TEXT_TO_IMAGE_IMAGE_PARAMS = frozenset([]) IMAGE_TO_IMAGE_IMAGE_PARAMS = frozenset(["image"]) IMAGE_VARIATION_PARAMS = frozenset( [ "image", "height", "width", "guidance_scale", ] ) IMAGE_VARIATION_BATCH_PARAMS = frozenset(["image"]) TEXT_GUIDED_IMAGE_VARIATION_PARAMS = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS = frozenset(["prompt", "image", "negative_prompt"]) TEXT_GUIDED_IMAGE_INPAINTING_PARAMS = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) IMAGE_INPAINTING_PARAMS = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["image", "mask_image"]) IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["example_image", "image", "mask_image"]) CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS = frozenset(["class_labels"]) CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS = frozenset(["class_labels"]) UNCONDITIONAL_IMAGE_GENERATION_PARAMS = frozenset(["batch_size"]) UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS = frozenset([]) UNCONDITIONAL_AUDIO_GENERATION_PARAMS = frozenset(["batch_size"]) UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS = frozenset([]) TEXT_TO_AUDIO_PARAMS = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) TEXT_TO_AUDIO_BATCH_PARAMS = frozenset(["prompt", "negative_prompt"]) TOKENS_TO_AUDIO_GENERATION_PARAMS = frozenset(["input_tokens"]) TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS = frozenset(["input_tokens"]) TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS = frozenset(["prompt_embeds"]) VIDEO_TO_VIDEO_BATCH_PARAMS = frozenset(["prompt", "negative_prompt", "video"])
diffusers/tests/pipelines/pipeline_params.py/0
{ "file_path": "diffusers/tests/pipelines/pipeline_params.py", "repo_id": "diffusers", "token_count": 1584 }
165
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, StableCascadeDecoderPipeline from diffusers.models import StableCascadeUNet from diffusers.pipelines.wuerstchen import PaellaVQModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, load_pt, numpy_cosine_similarity_distance, require_torch_gpu, skip_mps, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class StableCascadeDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableCascadeDecoderPipeline params = ["prompt"] batch_params = ["image_embeddings", "prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeddings", "text_encoder_hidden_states"] @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, "out_channels": 4, "conditioning_dim": 128, "block_out_channels": [16, 32, 64, 128], "num_attention_heads": [-1, -1, 1, 2], "down_num_layers_per_block": [1, 1, 1, 1], "up_num_layers_per_block": [1, 1, 1, 1], "down_blocks_repeat_mappers": [1, 1, 1, 1], "up_blocks_repeat_mappers": [3, 3, 2, 2], "block_types_per_layer": [ ["SDCascadeResBlock", "SDCascadeTimestepBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ], "switch_level": None, "clip_text_pooled_in_channels": 32, "dropout": [0.1, 0.1, 0.1, 0.1], } model = StableCascadeUNet(**model_kwargs) return model.eval() def get_dummy_components(self): decoder = self.dummy_decoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer vqgan = self.dummy_vqgan scheduler = DDPMWuerstchenScheduler() components = { "decoder": decoder, "vqgan": vqgan, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "latent_dim_scale": 4.0, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeddings": torch.ones((1, 4, 4, 4), device=device), "prompt": "horse", "generator": generator, "guidance_scale": 2.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_wuerstchen_decoder(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False) image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-2) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) @unittest.skip(reason="fp16 not supported") def test_float16_inference(self): super().test_float16_inference() def test_stable_cascade_decoder_prompt_embeds(self): device = "cpu" components = self.get_dummy_components() pipe = StableCascadeDecoderPipeline(**components) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image_embeddings = inputs["image_embeddings"] prompt = "A photograph of a shiba inu, wearing a hat" ( prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled, ) = pipe.encode_prompt(device, 1, 1, False, prompt=prompt) generator = torch.Generator(device=device) decoder_output_prompt = pipe( image_embeddings=image_embeddings, prompt=prompt, num_inference_steps=1, output_type="np", generator=generator.manual_seed(0), ) decoder_output_prompt_embeds = pipe( image_embeddings=image_embeddings, prompt=None, prompt_embeds=prompt_embeds, prompt_embeds_pooled=prompt_embeds_pooled, negative_prompt_embeds=negative_prompt_embeds, negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, num_inference_steps=1, output_type="np", generator=generator.manual_seed(0), ) assert np.abs(decoder_output_prompt.images - decoder_output_prompt_embeds.images).max() < 1e-5 def test_stable_cascade_decoder_single_prompt_multiple_image_embeddings(self): device = "cpu" components = self.get_dummy_components() pipe = StableCascadeDecoderPipeline(**components) pipe.set_progress_bar_config(disable=None) prior_num_images_per_prompt = 2 decoder_num_images_per_prompt = 2 prompt = ["a cat"] batch_size = len(prompt) generator = torch.Generator(device) image_embeddings = randn_tensor( (batch_size * prior_num_images_per_prompt, 4, 4, 4), generator=generator.manual_seed(0) ) decoder_output = pipe( image_embeddings=image_embeddings, prompt=prompt, num_inference_steps=1, output_type="np", guidance_scale=0.0, generator=generator.manual_seed(0), num_images_per_prompt=decoder_num_images_per_prompt, ) assert decoder_output.images.shape[0] == ( batch_size * prior_num_images_per_prompt * decoder_num_images_per_prompt ) def test_stable_cascade_decoder_single_prompt_multiple_image_embeddings_with_guidance(self): device = "cpu" components = self.get_dummy_components() pipe = StableCascadeDecoderPipeline(**components) pipe.set_progress_bar_config(disable=None) prior_num_images_per_prompt = 2 decoder_num_images_per_prompt = 2 prompt = ["a cat"] batch_size = len(prompt) generator = torch.Generator(device) image_embeddings = randn_tensor( (batch_size * prior_num_images_per_prompt, 4, 4, 4), generator=generator.manual_seed(0) ) decoder_output = pipe( image_embeddings=image_embeddings, prompt=prompt, num_inference_steps=1, output_type="np", guidance_scale=2.0, generator=generator.manual_seed(0), num_images_per_prompt=decoder_num_images_per_prompt, ) assert decoder_output.images.shape[0] == ( batch_size * prior_num_images_per_prompt * decoder_num_images_per_prompt ) @slow @require_torch_gpu class StableCascadeDecoderPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_cascade_decoder(self): pipe = StableCascadeDecoderPipeline.from_pretrained( "stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16 ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." generator = torch.Generator(device="cpu").manual_seed(0) image_embedding = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_cascade/image_embedding.pt" ) image = pipe( prompt=prompt, image_embeddings=image_embedding, output_type="np", num_inference_steps=2, generator=generator, ).images[0] assert image.shape == (1024, 1024, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_cascade/stable_cascade_decoder_image.npy" ) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-4
diffusers/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py/0
{ "file_path": "diffusers/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py", "repo_id": "diffusers", "token_count": 5590 }
166
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import nightly, require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @nightly @require_flax class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_flax(self): sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2", variant="bf16", dtype=jnp.bfloat16, ) prompt = "A painting of a squirrel eating a burger" num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = sd_pipe.prepare_inputs(prompt) params = replicate(params) prompt_ids = shard(prompt_ids) prng_seed = jax.random.PRNGKey(0) prng_seed = jax.random.split(prng_seed, jax.device_count()) images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512]) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2 @nightly @require_flax class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_dpm_flax(self): model_id = "stabilityai/stable-diffusion-2" scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( model_id, scheduler=scheduler, variant="bf16", dtype=jnp.bfloat16, ) params["scheduler"] = scheduler_params prompt = "A painting of a squirrel eating a burger" num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = sd_pipe.prepare_inputs(prompt) params = replicate(params) prompt_ids = shard(prompt_ids) prng_seed = jax.random.PRNGKey(0) prng_seed = jax.random.split(prng_seed, jax.device_count()) images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297]) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py", "repo_id": "diffusers", "token_count": 1712 }
167
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import random import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LCMScheduler, StableDiffusionXLInpaintPipeline, UNet2DConditionModel, UniPCMultistepScheduler, ) from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionXLInpaintPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( { "add_text_embeds", "add_time_ids", "mask", "masked_image_latents", } ) def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, time_cond_proj_dim=time_cond_proj_dim, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, image_size=224, projection_dim=32, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": image_encoder, "feature_extractor": feature_extractor, "requires_aesthetics_score": True, } return components def get_dummy_inputs(self, device, seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # create mask image[8:, 8:, :] = 255 mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "strength": 1.0, "output_type": "np", } return inputs def get_dummy_inputs_2images(self, device, seed=0, img_res=64): # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image1 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) image2 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed + 22)).to(device) # Convert images to [-1, 1] init_image1 = 2.0 * image1 - 1.0 init_image2 = 2.0 * image2 - 1.0 # empty mask mask_image = torch.zeros((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator1 = torch.manual_seed(seed) generator2 = torch.manual_seed(seed) else: generator1 = torch.Generator(device=device).manual_seed(seed) generator2 = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": ["A painting of a squirrel eating a burger"] * 2, "image": [init_image1, init_image2], "mask_image": [mask_image] * 2, "generator": [generator1, generator2], "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_ip_adapter_single(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.8274, 0.5538, 0.6141, 0.5843, 0.6865, 0.7082, 0.5861, 0.6123, 0.5344]) return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice) def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def test_stable_diffusion_xl_inpaint_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8279, 0.5673, 0.6088, 0.6156, 0.6923, 0.7347, 0.6547, 0.6108, 0.5198]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_inpaint_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6611, 0.5569, 0.5531, 0.5471, 0.5918, 0.6393, 0.5074, 0.5468, 0.5185]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_inpaint_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6611, 0.5569, 0.5531, 0.5471, 0.5918, 0.6393, 0.5074, 0.5468, 0.5185]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass def test_stable_diffusion_xl_inpaint_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_refiner(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(skip_first_text_encoder=True) sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.7540, 0.5231, 0.5833, 0.6217, 0.6339, 0.7067, 0.6507, 0.5672, 0.5030]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_ts = num_train_timesteps - int(round(num_train_timesteps * split)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}} pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" for steps in [7, 20]: assert_run_mixture(steps, 0.33, EulerDiscreteScheduler) assert_run_mixture(steps, 0.33, HeunDiscreteScheduler) @slow def test_stable_diffusion_two_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_ts = num_train_timesteps - int(round(num_train_timesteps * split)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}} pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" for steps in [5, 8, 20]: for split in [0.33, 0.49, 0.71]: for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split, scheduler_cls) @slow def test_stable_diffusion_three_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() pipe_3 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) pipe_3.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split_1, split_2, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1)) split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list( filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps) ) expected_steps_3 = expected_steps_2[-1:] + list(filter(lambda ts: ts < split_2_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 + expected_steps_3 else: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps)) expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert ( expected_steps_1 == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" inputs_2 = { **inputs, **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"}, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}} pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] assert ( expected_steps == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split_1, split_2, scheduler_cls) def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_stable_diffusion_xl_inpaint_mask_latents(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(device) sd_pipe.set_progress_bar_config(disable=None) # normal mask + normal image ## `image`: pil, `mask_image``: pil, `masked_image_latents``: None inputs = self.get_dummy_inputs(device) inputs["strength"] = 0.9 out_0 = sd_pipe(**inputs).images # image latents + mask latents inputs = self.get_dummy_inputs(device) image = sd_pipe.image_processor.preprocess(inputs["image"]).to(sd_pipe.device) mask = sd_pipe.mask_processor.preprocess(inputs["mask_image"]).to(sd_pipe.device) masked_image = image * (mask < 0.5) generator = torch.Generator(device=device).manual_seed(0) image_latents = sd_pipe._encode_vae_image(image, generator=generator) torch.randn((1, 4, 32, 32), generator=generator) mask_latents = sd_pipe._encode_vae_image(masked_image, generator=generator) inputs["image"] = image_latents inputs["masked_image_latents"] = mask_latents inputs["mask_image"] = mask inputs["strength"] = 0.9 generator = torch.Generator(device=device).manual_seed(0) torch.randn((1, 4, 32, 32), generator=generator) inputs["generator"] = generator out_1 = sd_pipe(**inputs).images assert np.abs(out_0 - out_1).max() < 1e-2 def test_stable_diffusion_xl_inpaint_2_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) # test to confirm if we pass two same image, we will get same output inputs = self.get_dummy_inputs(device) gen1 = torch.Generator(device=device).manual_seed(0) gen2 = torch.Generator(device=device).manual_seed(0) for name in ["prompt", "image", "mask_image"]: inputs[name] = [inputs[name]] * 2 inputs["generator"] = [gen1, gen2] images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() < 1e-4 # test to confirm that if we pass two different images, we will get different output inputs = self.get_dummy_inputs_2images(device) images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() > 1e-2 def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = "hey" num_inference_steps = 5 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4)
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py", "repo_id": "diffusers", "token_count": 15816 }
168
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, TextToVideoSDPipeline, UNet3DConditionModel from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, SDFunctionTesterMixin enable_full_determinism() @skip_mps class TextToVideoSDPipelineFastTests(PipelineTesterMixin, SDFunctionTesterMixin, unittest.TestCase): pipeline_class = TextToVideoSDPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet3DConditionModel( block_out_channels=(8, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=4, attention_head_dim=4, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=(8,), in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D"], latent_channels=4, sample_size=32, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=4, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() def test_text_to_video_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = TextToVideoSDPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "np" frames = sd_pipe(**inputs).frames image_slice = frames[0][0][-3:, -3:, -1] assert frames[0][0].shape == (32, 32, 3) expected_slice = np.array([0.8093, 0.2751, 0.6976, 0.5927, 0.4616, 0.4336, 0.5094, 0.5683, 0.4796]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", reason="Feature isn't heavily used. Test in CUDA environment only.") def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=1e-2) # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_single_identical(self): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_num_images_per_prompt(self): pass def test_progress_bar(self): return super().test_progress_bar() @slow @skip_mps @require_torch_gpu class TextToVideoSDPipelineSlowTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_two_step_model(self): expected_video = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text-to-video/video_2step.npy" ) pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") pipe = pipe.to(torch_device) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").frames assert numpy_cosine_similarity_distance(expected_video.flatten(), video_frames.flatten()) < 1e-4 def test_two_step_model_with_freeu(self): expected_video = [] pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") pipe = pipe.to(torch_device) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").frames video = video_frames[0, 0, -3:, -3:, -1].flatten() expected_video = [0.3643, 0.3455, 0.3831, 0.3923, 0.2978, 0.3247, 0.3278, 0.3201, 0.3475] assert np.abs(expected_video - video).mean() < 5e-2
diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video.py/0
{ "file_path": "diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video.py", "repo_id": "diffusers", "token_count": 3591 }
169
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from diffusers import StableCascadeUNet from diffusers.utils import logging from diffusers.utils.testing_utils import ( enable_full_determinism, require_torch_gpu, slow, ) logger = logging.get_logger(__name__) enable_full_determinism() @slow @require_torch_gpu class StableCascadeUNetSingleFileTest(unittest.TestCase): def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_single_file_components_stage_b(self): model_single_file = StableCascadeUNet.from_single_file( "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_bf16.safetensors", torch_dtype=torch.bfloat16, ) model = StableCascadeUNet.from_pretrained( "stabilityai/stable-cascade", variant="bf16", subfolder="decoder", use_safetensors=True ) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( model.config[param_name] == param_value ), f"{param_name} differs between single file loading and pretrained loading" def test_single_file_components_stage_b_lite(self): model_single_file = StableCascadeUNet.from_single_file( "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_lite_bf16.safetensors", torch_dtype=torch.bfloat16, ) model = StableCascadeUNet.from_pretrained( "stabilityai/stable-cascade", variant="bf16", subfolder="decoder_lite" ) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( model.config[param_name] == param_value ), f"{param_name} differs between single file loading and pretrained loading" def test_single_file_components_stage_c(self): model_single_file = StableCascadeUNet.from_single_file( "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_bf16.safetensors", torch_dtype=torch.bfloat16, ) model = StableCascadeUNet.from_pretrained( "stabilityai/stable-cascade-prior", variant="bf16", subfolder="prior" ) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( model.config[param_name] == param_value ), f"{param_name} differs between single file loading and pretrained loading" def test_single_file_components_stage_c_lite(self): model_single_file = StableCascadeUNet.from_single_file( "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_lite_bf16.safetensors", torch_dtype=torch.bfloat16, ) model = StableCascadeUNet.from_pretrained( "stabilityai/stable-cascade-prior", variant="bf16", subfolder="prior_lite" ) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( model.config[param_name] == param_value ), f"{param_name} differs between single file loading and pretrained loading"
diffusers/tests/single_file/test_model_sd_cascade_unet_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_model_sd_cascade_unet_single_file.py", "repo_id": "diffusers", "token_count": 1929 }
170
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from collections import defaultdict import yaml PATH_TO_TOC = "docs/source/en/_toctree.yml" def clean_doc_toc(doc_list): """ Cleans the table of content of the model documentation by removing duplicates and sorting models alphabetically. """ counts = defaultdict(int) overview_doc = [] new_doc_list = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]}) else: new_doc_list.append(doc) doc_list = new_doc_list duplicates = [key for key, value in counts.items() if value > 1] new_doc = [] for duplicate_key in duplicates: titles = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key}) if len(titles) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]}) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1]) new_doc = sorted(new_doc, key=lambda s: s["title"].lower()) # "overview" gets special treatment and is always first if len(overview_doc) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed.") overview_doc.extend(new_doc) # Sort return overview_doc def check_scheduler_doc(overwrite=False): with open(PATH_TO_TOC, encoding="utf-8") as f: content = yaml.safe_load(f.read()) # Get to the API doc api_idx = 0 while content[api_idx]["title"] != "API": api_idx += 1 api_doc = content[api_idx]["sections"] # Then to the model doc scheduler_idx = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 scheduler_doc = api_doc[scheduler_idx]["sections"] new_scheduler_doc = clean_doc_toc(scheduler_doc) diff = False if new_scheduler_doc != scheduler_doc: diff = True if overwrite: api_doc[scheduler_idx]["sections"] = new_scheduler_doc if diff: if overwrite: content[api_idx]["sections"] = api_doc with open(PATH_TO_TOC, "w", encoding="utf-8") as f: f.write(yaml.dump(content, allow_unicode=True)) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def check_pipeline_doc(overwrite=False): with open(PATH_TO_TOC, encoding="utf-8") as f: content = yaml.safe_load(f.read()) # Get to the API doc api_idx = 0 while content[api_idx]["title"] != "API": api_idx += 1 api_doc = content[api_idx]["sections"] # Then to the model doc pipeline_idx = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 diff = False pipeline_docs = api_doc[pipeline_idx]["sections"] new_pipeline_docs = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: sub_pipeline_doc = pipeline_doc["section"] new_sub_pipeline_doc = clean_doc_toc(sub_pipeline_doc) if overwrite: pipeline_doc["section"] = new_sub_pipeline_doc new_pipeline_docs.append(pipeline_doc) # sort overall pipeline doc new_pipeline_docs = clean_doc_toc(new_pipeline_docs) if new_pipeline_docs != pipeline_docs: diff = True if overwrite: api_doc[pipeline_idx]["sections"] = new_pipeline_docs if diff: if overwrite: content[api_idx]["sections"] = api_doc with open(PATH_TO_TOC, "w", encoding="utf-8") as f: f.write(yaml.dump(content, allow_unicode=True)) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
diffusers/utils/check_doc_toc.py/0
{ "file_path": "diffusers/utils/check_doc_toc.py", "repo_id": "diffusers", "token_count": 2177 }
171
# Copyright 2024 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ import os from datetime import datetime as dt from datetime import timezone from github import Github LABELS_TO_EXEMPT = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def main(): g = Github(os.environ["GITHUB_TOKEN"]) repo = g.get_repo("huggingface/diffusers") open_issues = repo.get_issues(state="open") for issue in open_issues: labels = [label.name.lower() for label in issue.get_labels()] if "stale" in labels: comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None if last_comment is not None and last_comment.user.login != "github-actions[bot]": # Opens the issue if someone other than Stalebot commented. issue.edit(state="open") issue.remove_from_labels("stale") elif ( (dt.now(timezone.utc) - issue.updated_at).days > 23 and (dt.now(timezone.utc) - issue.created_at).days >= 30 and not any(label in LABELS_TO_EXEMPT for label in labels) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale") if __name__ == "__main__": main()
diffusers/utils/stale.py/0
{ "file_path": "diffusers/utils/stale.py", "repo_id": "diffusers", "token_count": 996 }
172
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Assess the performance of video decoding in various configurations. This script will benchmark different video encoding and decoding parameters. See the provided README.md or run `python benchmark/video/run_video_benchmark.py --help` for usage info. """ import argparse import datetime as dt import random import shutil from collections import OrderedDict from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path import einops import numpy as np import pandas as pd import PIL import torch from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity from tqdm import tqdm from lerobot.common.datasets.lerobot_dataset import LeRobotDataset from lerobot.common.datasets.video_utils import ( decode_video_frames_torchvision, encode_video_frames, ) from lerobot.common.utils.benchmark import TimeBenchmark BASE_ENCODING = OrderedDict( [ ("vcodec", "libx264"), ("pix_fmt", "yuv444p"), ("g", 2), ("crf", None), # TODO(aliberts): Add fastdecode # ("fastdecode", 0), ] ) # TODO(rcadene, aliberts): move to `utils.py` folder when we want to refactor def parse_int_or_none(value) -> int | None: if value.lower() == "none": return None try: return int(value) except ValueError as e: raise argparse.ArgumentTypeError(f"Invalid int or None: {value}") from e def check_datasets_formats(repo_ids: list) -> None: for repo_id in repo_ids: dataset = LeRobotDataset(repo_id) if dataset.video: raise ValueError( f"Use only image dataset for running this benchmark. Video dataset provided: {repo_id}" ) def get_directory_size(directory: Path) -> int: total_size = 0 for item in directory.rglob("*"): if item.is_file(): total_size += item.stat().st_size return total_size def load_original_frames(imgs_dir: Path, timestamps: list[float], fps: int) -> torch.Tensor: frames = [] for ts in timestamps: idx = int(ts * fps) frame = PIL.Image.open(imgs_dir / f"frame_{idx:06d}.png") frame = torch.from_numpy(np.array(frame)) frame = frame.type(torch.float32) / 255 frame = einops.rearrange(frame, "h w c -> c h w") frames.append(frame) return torch.stack(frames) def save_decoded_frames( imgs_dir: Path, save_dir: Path, frames: torch.Tensor, timestamps: list[float], fps: int ) -> None: if save_dir.exists() and len(list(save_dir.glob("frame_*.png"))) == len(timestamps): return save_dir.mkdir(parents=True, exist_ok=True) for i, ts in enumerate(timestamps): idx = int(ts * fps) frame_hwc = (frames[i].permute((1, 2, 0)) * 255).type(torch.uint8).cpu().numpy() PIL.Image.fromarray(frame_hwc).save(save_dir / f"frame_{idx:06d}_decoded.png") shutil.copyfile(imgs_dir / f"frame_{idx:06d}.png", save_dir / f"frame_{idx:06d}_original.png") def save_first_episode(imgs_dir: Path, dataset: LeRobotDataset) -> None: ep_num_images = dataset.episode_data_index["to"][0].item() if imgs_dir.exists() and len(list(imgs_dir.glob("frame_*.png"))) == ep_num_images: return imgs_dir.mkdir(parents=True, exist_ok=True) hf_dataset = dataset.hf_dataset.with_format(None) # We only save images from the first camera img_keys = [key for key in hf_dataset.features if key.startswith("observation.image")] imgs_dataset = hf_dataset.select_columns(img_keys[0]) for i, item in enumerate( tqdm(imgs_dataset, desc=f"saving {dataset.repo_id} first episode images", leave=False) ): img = item[img_keys[0]] img.save(str(imgs_dir / f"frame_{i:06d}.png"), quality=100) if i >= ep_num_images - 1: break def sample_timestamps(timestamps_mode: str, ep_num_images: int, fps: int) -> list[float]: # Start at 5 to allow for 2_frames_4_space and 6_frames idx = random.randint(5, ep_num_images - 1) match timestamps_mode: case "1_frame": frame_indexes = [idx] case "2_frames": frame_indexes = [idx - 1, idx] case "2_frames_4_space": frame_indexes = [idx - 5, idx] case "6_frames": frame_indexes = [idx - i for i in range(6)][::-1] case _: raise ValueError(timestamps_mode) return [idx / fps for idx in frame_indexes] def decode_video_frames( video_path: str, timestamps: list[float], tolerance_s: float, backend: str, ) -> torch.Tensor: if backend in ["pyav", "video_reader"]: return decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend) else: raise NotImplementedError(backend) def benchmark_decoding( imgs_dir: Path, video_path: Path, timestamps_mode: str, backend: str, ep_num_images: int, fps: int, num_samples: int = 50, num_workers: int = 4, save_frames: bool = False, ) -> dict: def process_sample(sample: int): time_benchmark = TimeBenchmark() timestamps = sample_timestamps(timestamps_mode, ep_num_images, fps) num_frames = len(timestamps) result = { "psnr_values": [], "ssim_values": [], "mse_values": [], } with time_benchmark: frames = decode_video_frames(video_path, timestamps=timestamps, tolerance_s=5e-1, backend=backend) result["load_time_video_ms"] = time_benchmark.result_ms / num_frames with time_benchmark: original_frames = load_original_frames(imgs_dir, timestamps, fps) result["load_time_images_ms"] = time_benchmark.result_ms / num_frames frames_np, original_frames_np = frames.numpy(), original_frames.numpy() for i in range(num_frames): result["mse_values"].append(mean_squared_error(original_frames_np[i], frames_np[i])) result["psnr_values"].append( peak_signal_noise_ratio(original_frames_np[i], frames_np[i], data_range=1.0) ) result["ssim_values"].append( structural_similarity(original_frames_np[i], frames_np[i], data_range=1.0, channel_axis=0) ) if save_frames and sample == 0: save_dir = video_path.with_suffix("") / f"{timestamps_mode}_{backend}" save_decoded_frames(imgs_dir, save_dir, frames, timestamps, fps) return result load_times_video_ms = [] load_times_images_ms = [] mse_values = [] psnr_values = [] ssim_values = [] # A sample is a single set of decoded frames specified by timestamps_mode (e.g. a single frame, 2 frames, etc.). # For each sample, we record metrics (loading time and quality metrics) which are then averaged over all samples. # As these samples are independent, we run them in parallel threads to speed up the benchmark. with ThreadPoolExecutor(max_workers=num_workers) as executor: futures = [executor.submit(process_sample, i) for i in range(num_samples)] for future in tqdm(as_completed(futures), total=num_samples, desc="samples", leave=False): result = future.result() load_times_video_ms.append(result["load_time_video_ms"]) load_times_images_ms.append(result["load_time_images_ms"]) psnr_values.extend(result["psnr_values"]) ssim_values.extend(result["ssim_values"]) mse_values.extend(result["mse_values"]) avg_load_time_video_ms = float(np.array(load_times_video_ms).mean()) avg_load_time_images_ms = float(np.array(load_times_images_ms).mean()) video_images_load_time_ratio = avg_load_time_video_ms / avg_load_time_images_ms return { "avg_load_time_video_ms": avg_load_time_video_ms, "avg_load_time_images_ms": avg_load_time_images_ms, "video_images_load_time_ratio": video_images_load_time_ratio, "avg_mse": float(np.mean(mse_values)), "avg_psnr": float(np.mean(psnr_values)), "avg_ssim": float(np.mean(ssim_values)), } def benchmark_encoding_decoding( dataset: LeRobotDataset, video_path: Path, imgs_dir: Path, encoding_cfg: dict, decoding_cfg: dict, num_samples: int, num_workers: int, save_frames: bool, overwrite: bool = False, seed: int = 1337, ) -> list[dict]: fps = dataset.fps if overwrite or not video_path.is_file(): tqdm.write(f"encoding {video_path}") encode_video_frames( imgs_dir=imgs_dir, video_path=video_path, fps=fps, vcodec=encoding_cfg["vcodec"], pix_fmt=encoding_cfg["pix_fmt"], g=encoding_cfg.get("g"), crf=encoding_cfg.get("crf"), # fast_decode=encoding_cfg.get("fastdecode"), overwrite=True, ) ep_num_images = dataset.episode_data_index["to"][0].item() width, height = tuple(dataset[0][dataset.camera_keys[0]].shape[-2:]) num_pixels = width * height video_size_bytes = video_path.stat().st_size images_size_bytes = get_directory_size(imgs_dir) video_images_size_ratio = video_size_bytes / images_size_bytes random.seed(seed) benchmark_table = [] for timestamps_mode in tqdm( decoding_cfg["timestamps_modes"], desc="decodings (timestamps_modes)", leave=False ): for backend in tqdm(decoding_cfg["backends"], desc="decodings (backends)", leave=False): benchmark_row = benchmark_decoding( imgs_dir, video_path, timestamps_mode, backend, ep_num_images, fps, num_samples, num_workers, save_frames, ) benchmark_row.update( **{ "repo_id": dataset.repo_id, "resolution": f"{width} x {height}", "num_pixels": num_pixels, "video_size_bytes": video_size_bytes, "images_size_bytes": images_size_bytes, "video_images_size_ratio": video_images_size_ratio, "timestamps_mode": timestamps_mode, "backend": backend, }, **encoding_cfg, ) benchmark_table.append(benchmark_row) return benchmark_table def main( output_dir: Path, repo_ids: list[str], vcodec: list[str], pix_fmt: list[str], g: list[int], crf: list[int], # fastdecode: list[int], timestamps_modes: list[str], backends: list[str], num_samples: int, num_workers: int, save_frames: bool, ): check_datasets_formats(repo_ids) encoding_benchmarks = { "g": g, "crf": crf, # "fastdecode": fastdecode, } decoding_benchmarks = { "timestamps_modes": timestamps_modes, "backends": backends, } headers = ["repo_id", "resolution", "num_pixels"] headers += list(BASE_ENCODING.keys()) headers += [ "timestamps_mode", "backend", "video_size_bytes", "images_size_bytes", "video_images_size_ratio", "avg_load_time_video_ms", "avg_load_time_images_ms", "video_images_load_time_ratio", "avg_mse", "avg_psnr", "avg_ssim", ] file_paths = [] for video_codec in tqdm(vcodec, desc="encodings (vcodec)"): for pixel_format in tqdm(pix_fmt, desc="encodings (pix_fmt)", leave=False): benchmark_table = [] for repo_id in tqdm(repo_ids, desc="encodings (datasets)", leave=False): dataset = LeRobotDataset(repo_id) imgs_dir = output_dir / "images" / dataset.repo_id.replace("/", "_") # We only use the first episode save_first_episode(imgs_dir, dataset) for key, values in tqdm(encoding_benchmarks.items(), desc="encodings (g, crf)", leave=False): for value in tqdm(values, desc=f"encodings ({key})", leave=False): encoding_cfg = BASE_ENCODING.copy() encoding_cfg["vcodec"] = video_codec encoding_cfg["pix_fmt"] = pixel_format encoding_cfg[key] = value args_path = Path("_".join(str(value) for value in encoding_cfg.values())) video_path = output_dir / "videos" / args_path / f"{repo_id.replace('/', '_')}.mp4" benchmark_table += benchmark_encoding_decoding( dataset, video_path, imgs_dir, encoding_cfg, decoding_benchmarks, num_samples, num_workers, save_frames, ) # Save intermediate results benchmark_df = pd.DataFrame(benchmark_table, columns=headers) now = dt.datetime.now() csv_path = ( output_dir / f"{now:%Y-%m-%d}_{now:%H-%M-%S}_{video_codec}_{pixel_format}_{num_samples}-samples.csv" ) benchmark_df.to_csv(csv_path, header=True, index=False) file_paths.append(csv_path) del benchmark_df # Concatenate all results df_list = [pd.read_csv(csv_path) for csv_path in file_paths] concatenated_df = pd.concat(df_list, ignore_index=True) concatenated_path = output_dir / f"{now:%Y-%m-%d}_{now:%H-%M-%S}_all_{num_samples}-samples.csv" concatenated_df.to_csv(concatenated_path, header=True, index=False) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--output-dir", type=Path, default=Path("outputs/video_benchmark"), help="Directory where the video benchmark outputs are written.", ) parser.add_argument( "--repo-ids", type=str, nargs="*", default=[ "lerobot/pusht_image", "aliberts/aloha_mobile_shrimp_image", "aliberts/paris_street", "aliberts/kitchen", ], help="Datasets repo-ids to test against. First episodes only are used. Must be images.", ) parser.add_argument( "--vcodec", type=str, nargs="*", default=["libx264", "libx265", "libsvtav1"], help="Video codecs to be tested", ) parser.add_argument( "--pix-fmt", type=str, nargs="*", default=["yuv444p", "yuv420p"], help="Pixel formats (chroma subsampling) to be tested", ) parser.add_argument( "--g", type=parse_int_or_none, nargs="*", default=[1, 2, 3, 4, 5, 6, 10, 15, 20, 40, 100, None], help="Group of pictures sizes to be tested.", ) parser.add_argument( "--crf", type=parse_int_or_none, nargs="*", default=[0, 5, 10, 15, 20, 25, 30, 40, 50, None], help="Constant rate factors to be tested.", ) # parser.add_argument( # "--fastdecode", # type=int, # nargs="*", # default=[0, 1], # help="Use the fastdecode tuning option. 0 disables it. " # "For libx264 and libx265, only 1 is possible. " # "For libsvtav1, 1, 2 or 3 are possible values with a higher number meaning a faster decoding optimization", # ) parser.add_argument( "--timestamps-modes", type=str, nargs="*", default=[ "1_frame", "2_frames", "2_frames_4_space", "6_frames", ], help="Timestamps scenarios to be tested.", ) parser.add_argument( "--backends", type=str, nargs="*", default=["pyav", "video_reader"], help="Torchvision decoding backend to be tested.", ) parser.add_argument( "--num-samples", type=int, default=50, help="Number of samples for each encoding x decoding config.", ) parser.add_argument( "--num-workers", type=int, default=10, help="Number of processes for parallelized sample processing.", ) parser.add_argument( "--save-frames", type=int, default=0, help="Whether to save decoded frames or not. Enter a non-zero number for true.", ) args = parser.parse_args() main(**vars(args))
lerobot/benchmarks/video/run_video_benchmark.py/0
{ "file_path": "lerobot/benchmarks/video/run_video_benchmark.py", "repo_id": "lerobot", "token_count": 8152 }
173
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy from math import ceil import einops import torch import tqdm from datasets import Image from lerobot.common.datasets.video_utils import VideoFrame def get_stats_einops_patterns(dataset, num_workers=0): """These einops patterns will be used to aggregate batches and compute statistics. Note: We assume the images are in channel first format """ dataloader = torch.utils.data.DataLoader( dataset, num_workers=num_workers, batch_size=2, shuffle=False, ) batch = next(iter(dataloader)) stats_patterns = {} for key, feats_type in dataset.features.items(): # NOTE: skip language_instruction embedding in stats computation if key == "language_instruction": continue # sanity check that tensors are not float64 assert batch[key].dtype != torch.float64 if isinstance(feats_type, (VideoFrame, Image)): # sanity check that images are channel first _, c, h, w = batch[key].shape assert c < h and c < w, f"expect channel first images, but instead {batch[key].shape}" # sanity check that images are float32 in range [0,1] assert batch[key].dtype == torch.float32, f"expect torch.float32, but instead {batch[key].dtype=}" assert batch[key].max() <= 1, f"expect pixels lower than 1, but instead {batch[key].max()=}" assert batch[key].min() >= 0, f"expect pixels greater than 1, but instead {batch[key].min()=}" stats_patterns[key] = "b c h w -> c 1 1" elif batch[key].ndim == 2: stats_patterns[key] = "b c -> c " elif batch[key].ndim == 1: stats_patterns[key] = "b -> 1" else: raise ValueError(f"{key}, {feats_type}, {batch[key].shape}") return stats_patterns def compute_stats(dataset, batch_size=32, num_workers=16, max_num_samples=None): """Compute mean/std and min/max statistics of all data keys in a LeRobotDataset.""" if max_num_samples is None: max_num_samples = len(dataset) # for more info on why we need to set the same number of workers, see `load_from_videos` stats_patterns = get_stats_einops_patterns(dataset, num_workers) # mean and std will be computed incrementally while max and min will track the running value. mean, std, max, min = {}, {}, {}, {} for key in stats_patterns: mean[key] = torch.tensor(0.0).float() std[key] = torch.tensor(0.0).float() max[key] = torch.tensor(-float("inf")).float() min[key] = torch.tensor(float("inf")).float() def create_seeded_dataloader(dataset, batch_size, seed): generator = torch.Generator() generator.manual_seed(seed) dataloader = torch.utils.data.DataLoader( dataset, num_workers=num_workers, batch_size=batch_size, shuffle=True, drop_last=False, generator=generator, ) return dataloader # Note: Due to be refactored soon. The point of storing `first_batch` is to make sure we don't get # surprises when rerunning the sampler. first_batch = None running_item_count = 0 # for online mean computation dataloader = create_seeded_dataloader(dataset, batch_size, seed=1337) for i, batch in enumerate( tqdm.tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute mean, min, max") ): this_batch_size = len(batch["index"]) running_item_count += this_batch_size if first_batch is None: first_batch = deepcopy(batch) for key, pattern in stats_patterns.items(): batch[key] = batch[key].float() # Numerically stable update step for mean computation. batch_mean = einops.reduce(batch[key], pattern, "mean") # Hint: to update the mean we need x̄ₙ = (Nₙ₋₁x̄ₙ₋₁ + Bₙxₙ) / Nₙ, where the subscript represents # the update step, N is the running item count, B is this batch size, x̄ is the running mean, # and x is the current batch mean. Some rearrangement is then required to avoid risking # numerical overflow. Another hint: Nₙ₋₁ = Nₙ - Bₙ. Rearrangement yields # x̄ₙ = x̄ₙ₋₁ + Bₙ * (xₙ - x̄ₙ₋₁) / Nₙ mean[key] = mean[key] + this_batch_size * (batch_mean - mean[key]) / running_item_count max[key] = torch.maximum(max[key], einops.reduce(batch[key], pattern, "max")) min[key] = torch.minimum(min[key], einops.reduce(batch[key], pattern, "min")) if i == ceil(max_num_samples / batch_size) - 1: break first_batch_ = None running_item_count = 0 # for online std computation dataloader = create_seeded_dataloader(dataset, batch_size, seed=1337) for i, batch in enumerate( tqdm.tqdm(dataloader, total=ceil(max_num_samples / batch_size), desc="Compute std") ): this_batch_size = len(batch["index"]) running_item_count += this_batch_size # Sanity check to make sure the batches are still in the same order as before. if first_batch_ is None: first_batch_ = deepcopy(batch) for key in stats_patterns: assert torch.equal(first_batch_[key], first_batch[key]) for key, pattern in stats_patterns.items(): batch[key] = batch[key].float() # Numerically stable update step for mean computation (where the mean is over squared # residuals).See notes in the mean computation loop above. batch_std = einops.reduce((batch[key] - mean[key]) ** 2, pattern, "mean") std[key] = std[key] + this_batch_size * (batch_std - std[key]) / running_item_count if i == ceil(max_num_samples / batch_size) - 1: break for key in stats_patterns: std[key] = torch.sqrt(std[key]) stats = {} for key in stats_patterns: stats[key] = { "mean": mean[key], "std": std[key], "max": max[key], "min": min[key], } return stats def aggregate_stats(ls_datasets) -> dict[str, torch.Tensor]: """Aggregate stats of multiple LeRobot datasets into one set of stats without recomputing from scratch. The final stats will have the union of all data keys from each of the datasets. The final stats will have the union of all data keys from each of the datasets. For instance: - new_max = max(max_dataset_0, max_dataset_1, ...) - new_min = min(min_dataset_0, min_dataset_1, ...) - new_mean = (mean of all data) - new_std = (std of all data) """ data_keys = set() for dataset in ls_datasets: data_keys.update(dataset.stats.keys()) stats = {k: {} for k in data_keys} for data_key in data_keys: for stat_key in ["min", "max"]: # compute `max(dataset_0["max"], dataset_1["max"], ...)` stats[data_key][stat_key] = einops.reduce( torch.stack([d.stats[data_key][stat_key] for d in ls_datasets if data_key in d.stats], dim=0), "n ... -> ...", stat_key, ) total_samples = sum(d.num_samples for d in ls_datasets if data_key in d.stats) # Compute the "sum" statistic by multiplying each mean by the number of samples in the respective # dataset, then divide by total_samples to get the overall "mean". # NOTE: the brackets around (d.num_samples / total_samples) are needed tor minimize the risk of # numerical overflow! stats[data_key]["mean"] = sum( d.stats[data_key]["mean"] * (d.num_samples / total_samples) for d in ls_datasets if data_key in d.stats ) # The derivation for standard deviation is a little more involved but is much in the same spirit as # the computation of the mean. # Given two sets of data where the statistics are known: # σ_combined = sqrt[ (n1 * (σ1^2 + d1^2) + n2 * (σ2^2 + d2^2)) / (n1 + n2) ] # where d1 = μ1 - μ_combined, d2 = μ2 - μ_combined # NOTE: the brackets around (d.num_samples / total_samples) are needed tor minimize the risk of # numerical overflow! stats[data_key]["std"] = torch.sqrt( sum( (d.stats[data_key]["std"] ** 2 + (d.stats[data_key]["mean"] - stats[data_key]["mean"]) ** 2) * (d.num_samples / total_samples) for d in ls_datasets if data_key in d.stats ) ) return stats
lerobot/lerobot/common/datasets/compute_stats.py/0
{ "file_path": "lerobot/lerobot/common/datasets/compute_stats.py", "repo_id": "lerobot", "token_count": 3939 }
174
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Use this script to batch encode lerobot dataset from their raw format to LeRobotDataset and push their updated version to the hub. Under the hood, this script reuses 'push_dataset_to_hub.py'. It assumes that you already downloaded raw datasets, which you can do with the related '_download_raw.py' script. For instance, for codebase_version = 'v1.6', the following command was run, assuming raw datasets from lerobot-raw were downloaded in 'raw/datasets/directory': ```bash python lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py \ --raw-dir raw/datasets/directory \ --raw-repo-ids lerobot-raw \ --local-dir push/datasets/directory \ --tests-data-dir tests/data \ --push-repo lerobot \ --vcodec libsvtav1 \ --pix-fmt yuv420p \ --g 2 \ --crf 30 ``` """ import argparse from pathlib import Path from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub._download_raw import AVAILABLE_RAW_REPO_IDS from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id from lerobot.scripts.push_dataset_to_hub import push_dataset_to_hub def get_push_repo_id_from_raw(raw_repo_id: str, push_repo: str) -> str: dataset_id_raw = raw_repo_id.split("/")[1] dataset_id = dataset_id_raw.removesuffix("_raw") return f"{push_repo}/{dataset_id}" def encode_datasets( raw_dir: Path, raw_repo_ids: list[str], push_repo: str, vcodec: str, pix_fmt: str, g: int, crf: int, local_dir: Path | None = None, tests_data_dir: Path | None = None, raw_format: str | None = None, dry_run: bool = False, ) -> None: if len(raw_repo_ids) == 1 and raw_repo_ids[0].lower() == "lerobot-raw": raw_repo_ids_format = AVAILABLE_RAW_REPO_IDS else: if raw_format is None: raise ValueError(raw_format) raw_repo_ids_format = {id_: raw_format for id_ in raw_repo_ids} for raw_repo_id, repo_raw_format in raw_repo_ids_format.items(): check_repo_id(raw_repo_id) dataset_repo_id_push = get_push_repo_id_from_raw(raw_repo_id, push_repo) dataset_raw_dir = raw_dir / raw_repo_id dataset_dir = local_dir / dataset_repo_id_push if local_dir is not None else None encoding = { "vcodec": vcodec, "pix_fmt": pix_fmt, "g": g, "crf": crf, } if not (dataset_raw_dir).is_dir(): raise NotADirectoryError(dataset_raw_dir) if not dry_run: push_dataset_to_hub( dataset_raw_dir, raw_format=repo_raw_format, repo_id=dataset_repo_id_push, local_dir=dataset_dir, resume=True, encoding=encoding, tests_data_dir=tests_data_dir, ) else: print( f"DRY RUN: {dataset_raw_dir} --> {dataset_dir} --> {dataset_repo_id_push}@{CODEBASE_VERSION}" ) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--raw-dir", type=Path, default=Path("data"), help="Directory where raw datasets are located.", ) parser.add_argument( "--raw-repo-ids", type=str, nargs="*", default=["lerobot-raw"], help="""Raw dataset repo ids. if 'lerobot-raw', the keys from `AVAILABLE_RAW_REPO_IDS` will be used and raw datasets will be fetched from the 'lerobot-raw/' repo and pushed with their associated format. It is assumed that each dataset is located at `raw_dir / raw_repo_id` """, ) parser.add_argument( "--raw-format", type=str, default=None, help="""Raw format to use for the raw repo-ids. Must be specified if --raw-repo-ids is not 'lerobot-raw'""", ) parser.add_argument( "--local-dir", type=Path, default=None, help="""When provided, writes the dataset converted to LeRobotDataset format in this directory (e.g. `data/lerobot/aloha_mobile_chair`).""", ) parser.add_argument( "--push-repo", type=str, default="lerobot", help="Repo to upload datasets to", ) parser.add_argument( "--vcodec", type=str, default="libsvtav1", help="Codec to use for encoding videos", ) parser.add_argument( "--pix-fmt", type=str, default="yuv420p", help="Pixel formats (chroma subsampling) to be used for encoding", ) parser.add_argument( "--g", type=int, default=2, help="Group of pictures sizes to be used for encoding.", ) parser.add_argument( "--crf", type=int, default=30, help="Constant rate factors to be used for encoding.", ) parser.add_argument( "--tests-data-dir", type=Path, default=None, help=( "When provided, save tests artifacts into the given directory " "(e.g. `--tests-data-dir tests/data` will save to tests/data/{--repo-id})." ), ) parser.add_argument( "--dry-run", type=int, default=0, help="If not set to 0, this script won't download or upload anything.", ) args = parser.parse_args() encode_datasets(**vars(args)) if __name__ == "__main__": main()
lerobot/lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py/0
{ "file_path": "lerobot/lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py", "repo_id": "lerobot", "token_count": 2733 }
175
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import re import warnings from functools import cache from pathlib import Path from typing import Dict import datasets import torch from datasets import load_dataset, load_from_disk from huggingface_hub import DatasetCard, HfApi, hf_hub_download, snapshot_download from PIL import Image as PILImage from safetensors.torch import load_file from torchvision import transforms DATASET_CARD_TEMPLATE = """ --- # Metadata will go there --- This dataset was created using [🤗 LeRobot](https://github.com/huggingface/lerobot). """ def flatten_dict(d, parent_key="", sep="/"): """Flatten a nested dictionary structure by collapsing nested keys into one key with a separator. For example: ``` >>> dct = {"a": {"b": 1, "c": {"d": 2}}, "e": 3}` >>> print(flatten_dict(dct)) {"a/b": 1, "a/c/d": 2, "e": 3} """ items = [] for k, v in d.items(): new_key = f"{parent_key}{sep}{k}" if parent_key else k if isinstance(v, dict): items.extend(flatten_dict(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items) def unflatten_dict(d, sep="/"): outdict = {} for key, value in d.items(): parts = key.split(sep) d = outdict for part in parts[:-1]: if part not in d: d[part] = {} d = d[part] d[parts[-1]] = value return outdict def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]): """Get a transform function that convert items from Hugging Face dataset (pyarrow) to torch tensors. Importantly, images are converted from PIL, which corresponds to a channel last representation (h w c) of uint8 type, to a torch image representation with channel first (c h w) of float32 type in range [0,1]. """ for key in items_dict: first_item = items_dict[key][0] if isinstance(first_item, PILImage.Image): to_tensor = transforms.ToTensor() items_dict[key] = [to_tensor(img) for img in items_dict[key]] elif isinstance(first_item, str): # TODO (michel-aractingi): add str2embedding via language tokenizer # For now we leave this part up to the user to choose how to address # language conditioned tasks pass elif isinstance(first_item, dict) and "path" in first_item and "timestamp" in first_item: # video frame will be processed downstream pass elif first_item is None: pass else: items_dict[key] = [torch.tensor(x) for x in items_dict[key]] return items_dict @cache def get_hf_dataset_safe_version(repo_id: str, version: str) -> str: api = HfApi() dataset_info = api.list_repo_refs(repo_id, repo_type="dataset") branches = [b.name for b in dataset_info.branches] if version not in branches: warnings.warn( f"""You are trying to load a dataset from {repo_id} created with a previous version of the codebase. The following versions are available: {branches}. The requested version ('{version}') is not found. You should be fine since backward compatibility is maintained. If you encounter a problem, contact LeRobot maintainers on Discord ('https://discord.com/invite/s3KuuzsPFb') or open an issue on github.""", stacklevel=1, ) if "main" not in branches: raise ValueError(f"Version 'main' not found on {repo_id}") return "main" else: return version def load_hf_dataset(repo_id: str, version: str, root: Path, split: str) -> datasets.Dataset: """hf_dataset contains all the observations, states, actions, rewards, etc.""" if root is not None: hf_dataset = load_from_disk(str(Path(root) / repo_id / "train")) # TODO(rcadene): clean this which enables getting a subset of dataset if split != "train": if "%" in split: raise NotImplementedError(f"We dont support splitting based on percentage for now ({split}).") match_from = re.search(r"train\[(\d+):\]", split) match_to = re.search(r"train\[:(\d+)\]", split) if match_from: from_frame_index = int(match_from.group(1)) hf_dataset = hf_dataset.select(range(from_frame_index, len(hf_dataset))) elif match_to: to_frame_index = int(match_to.group(1)) hf_dataset = hf_dataset.select(range(to_frame_index)) else: raise ValueError( f'`split` ({split}) should either be "train", "train[INT:]", or "train[:INT]"' ) else: safe_version = get_hf_dataset_safe_version(repo_id, version) hf_dataset = load_dataset(repo_id, revision=safe_version, split=split) hf_dataset.set_transform(hf_transform_to_torch) return hf_dataset def load_episode_data_index(repo_id, version, root) -> dict[str, torch.Tensor]: """episode_data_index contains the range of indices for each episode Example: ```python from_id = episode_data_index["from"][episode_id].item() to_id = episode_data_index["to"][episode_id].item() episode_frames = [dataset[i] for i in range(from_id, to_id)] ``` """ if root is not None: path = Path(root) / repo_id / "meta_data" / "episode_data_index.safetensors" else: safe_version = get_hf_dataset_safe_version(repo_id, version) path = hf_hub_download( repo_id, "meta_data/episode_data_index.safetensors", repo_type="dataset", revision=safe_version ) return load_file(path) def load_stats(repo_id, version, root) -> dict[str, dict[str, torch.Tensor]]: """stats contains the statistics per modality computed over the full dataset, such as max, min, mean, std Example: ```python normalized_action = (action - stats["action"]["mean"]) / stats["action"]["std"] ``` """ if root is not None: path = Path(root) / repo_id / "meta_data" / "stats.safetensors" else: safe_version = get_hf_dataset_safe_version(repo_id, version) path = hf_hub_download( repo_id, "meta_data/stats.safetensors", repo_type="dataset", revision=safe_version ) stats = load_file(path) return unflatten_dict(stats) def load_info(repo_id, version, root) -> dict: """info contains useful information regarding the dataset that are not stored elsewhere Example: ```python print("frame per second used to collect the video", info["fps"]) ``` """ if root is not None: path = Path(root) / repo_id / "meta_data" / "info.json" else: safe_version = get_hf_dataset_safe_version(repo_id, version) path = hf_hub_download(repo_id, "meta_data/info.json", repo_type="dataset", revision=safe_version) with open(path) as f: info = json.load(f) return info def load_videos(repo_id, version, root) -> Path: if root is not None: path = Path(root) / repo_id / "videos" else: # TODO(rcadene): we download the whole repo here. see if we can avoid this safe_version = get_hf_dataset_safe_version(repo_id, version) repo_dir = snapshot_download(repo_id, repo_type="dataset", revision=safe_version) path = Path(repo_dir) / "videos" return path def load_previous_and_future_frames( item: dict[str, torch.Tensor], hf_dataset: datasets.Dataset, episode_data_index: dict[str, torch.Tensor], delta_timestamps: dict[str, list[float]], tolerance_s: float, ) -> dict[torch.Tensor]: """ Given a current item in the dataset containing a timestamp (e.g. 0.6 seconds), and a list of time differences of some modalities (e.g. delta_timestamps={"observation.image": [-0.8, -0.2, 0, 0.2]}), this function computes for each given modality (e.g. "observation.image") a list of query timestamps (e.g. [-0.2, 0.4, 0.6, 0.8]) and loads the closest frames in the dataset. Importantly, when no frame can be found around a query timestamp within a specified tolerance window, this function raises an AssertionError. When a timestamp is queried before the first available timestamp of the episode or after the last available timestamp, the violation of the tolerance doesnt raise an AssertionError, and the function populates a boolean array indicating which frames are outside of the episode range. For instance, this boolean array is useful during batched training to not supervise actions associated to timestamps coming after the end of the episode, or to pad the observations in a specific way. Note that by default the observation frames before the start of the episode are the same as the first frame of the episode. Parameters: - item (dict): A dictionary containing all the data related to a frame. It is the result of `dataset[idx]`. Each key corresponds to a different modality (e.g., "timestamp", "observation.image", "action"). - hf_dataset (datasets.Dataset): A dictionary containing the full dataset. Each key corresponds to a different modality (e.g., "timestamp", "observation.image", "action"). - episode_data_index (dict): A dictionary containing two keys ("from" and "to") associated to dataset indices. They indicate the start index and end index of each episode in the dataset. - delta_timestamps (dict): A dictionary containing lists of delta timestamps for each possible modality to be retrieved. These deltas are added to the item timestamp to form the query timestamps. - tolerance_s (float, optional): The tolerance level (in seconds) used to determine if a data point is close enough to the query timestamp by asserting `tol > difference`. It is suggested to set `tol` to a smaller value than the smallest expected inter-frame period, but large enough to account for jitter. Returns: - The same item with the queried frames for each modality specified in delta_timestamps, with an additional key for each modality (e.g. "observation.image_is_pad"). Raises: - AssertionError: If any of the frames unexpectedly violate the tolerance level. This could indicate synchronization issues with timestamps during data collection. """ # get indices of the frames associated to the episode, and their timestamps ep_id = item["episode_index"].item() ep_data_id_from = episode_data_index["from"][ep_id].item() ep_data_id_to = episode_data_index["to"][ep_id].item() ep_data_ids = torch.arange(ep_data_id_from, ep_data_id_to, 1) # load timestamps ep_timestamps = hf_dataset.select_columns("timestamp")[ep_data_id_from:ep_data_id_to]["timestamp"] ep_timestamps = torch.stack(ep_timestamps) # we make the assumption that the timestamps are sorted ep_first_ts = ep_timestamps[0] ep_last_ts = ep_timestamps[-1] current_ts = item["timestamp"].item() for key in delta_timestamps: # get timestamps used as query to retrieve data of previous/future frames delta_ts = delta_timestamps[key] query_ts = current_ts + torch.tensor(delta_ts) # compute distances between each query timestamp and all timestamps of all the frames belonging to the episode dist = torch.cdist(query_ts[:, None], ep_timestamps[:, None], p=1) min_, argmin_ = dist.min(1) # TODO(rcadene): synchronize timestamps + interpolation if needed is_pad = min_ > tolerance_s # check violated query timestamps are all outside the episode range assert ((query_ts[is_pad] < ep_first_ts) | (ep_last_ts < query_ts[is_pad])).all(), ( f"One or several timestamps unexpectedly violate the tolerance ({min_} > {tolerance_s=}) inside episode range." "This might be due to synchronization issues with timestamps during data collection." ) # get dataset indices corresponding to frames to be loaded data_ids = ep_data_ids[argmin_] # load frames modality item[key] = hf_dataset.select_columns(key)[data_ids][key] if isinstance(item[key][0], dict) and "path" in item[key][0]: # video mode where frame are expressed as dict of path and timestamp item[key] = item[key] else: item[key] = torch.stack(item[key]) item[f"{key}_is_pad"] = is_pad return item def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> Dict[str, torch.Tensor]: """ Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset. Parameters: - hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index. Returns: - episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys: - "from": A tensor containing the starting index of each episode. - "to": A tensor containing the ending index of each episode. """ episode_data_index = {"from": [], "to": []} current_episode = None """ The episode_index is a list of integers, each representing the episode index of the corresponding example. For instance, the following is a valid episode_index: [0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2] Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this: { "from": [0, 3, 7], "to": [3, 7, 12] } """ if len(hf_dataset) == 0: episode_data_index = { "from": torch.tensor([]), "to": torch.tensor([]), } return episode_data_index for idx, episode_idx in enumerate(hf_dataset["episode_index"]): if episode_idx != current_episode: # We encountered a new episode, so we append its starting location to the "from" list episode_data_index["from"].append(idx) # If this is not the first episode, we append the ending location of the previous episode to the "to" list if current_episode is not None: episode_data_index["to"].append(idx) # Let's keep track of the current episode index current_episode = episode_idx else: # We are still in the same episode, so there is nothing for us to do here pass # We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list episode_data_index["to"].append(idx + 1) for k in ["from", "to"]: episode_data_index[k] = torch.tensor(episode_data_index[k]) return episode_data_index def reset_episode_index(hf_dataset: datasets.Dataset) -> datasets.Dataset: """Reset the `episode_index` of the provided HuggingFace Dataset. `episode_data_index` (and related functionality such as `load_previous_and_future_frames`) requires the `episode_index` to be sorted, continuous (1,1,1 and not 1,2,1) and start at 0. This brings the `episode_index` to the required format. """ if len(hf_dataset) == 0: return hf_dataset unique_episode_idxs = torch.stack(hf_dataset["episode_index"]).unique().tolist() episode_idx_to_reset_idx_mapping = { ep_id: reset_ep_id for reset_ep_id, ep_id in enumerate(unique_episode_idxs) } def modify_ep_idx_func(example): example["episode_index"] = episode_idx_to_reset_idx_mapping[example["episode_index"].item()] return example hf_dataset = hf_dataset.map(modify_ep_idx_func) return hf_dataset def cycle(iterable): """The equivalent of itertools.cycle, but safe for Pytorch dataloaders. See https://github.com/pytorch/pytorch/issues/23900 for information on why itertools.cycle is not safe. """ iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) def create_branch(repo_id, *, branch: str, repo_type: str | None = None): """Create a branch on a existing Hugging Face repo. Delete the branch if it already exists before creating it. """ api = HfApi() branches = api.list_repo_refs(repo_id, repo_type=repo_type).branches refs = [branch.ref for branch in branches] ref = f"refs/heads/{branch}" if ref in refs: api.delete_branch(repo_id, repo_type=repo_type, branch=branch) api.create_branch(repo_id, repo_type=repo_type, branch=branch) def create_lerobot_dataset_card(tags: list | None = None, text: str | None = None) -> DatasetCard: card = DatasetCard(DATASET_CARD_TEMPLATE) card.data.task_categories = ["robotics"] card.data.tags = ["LeRobot"] if tags is not None: card.data.tags += tags if text is not None: card.text += text return card
lerobot/lerobot/common/datasets/utils.py/0
{ "file_path": "lerobot/lerobot/common/datasets/utils.py", "repo_id": "lerobot", "token_count": 6829 }
176
#!/usr/bin/env python # Copyright 2024 Seungjae Lee and Yibin Wang and Haritheja Etukuru # and H. Jin Kim and Nur Muhammad Mahi Shafiullah and Lerrel Pinto # and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import warnings from collections import deque from typing import Callable, List import einops import numpy as np import torch import torch.nn.functional as F # noqa: N812 import torchvision from huggingface_hub import PyTorchModelHubMixin from torch import Tensor, nn from torch.optim.lr_scheduler import LambdaLR from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.utils import get_device_from_parameters, populate_queues from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig from lerobot.common.policies.vqbet.vqbet_utils import GPT, ResidualVQ # ruff: noqa: N806 class VQBeTPolicy( nn.Module, PyTorchModelHubMixin, library_name="lerobot", repo_url="https://github.com/huggingface/lerobot", tags=["robotics", "vqbet"], ): """ VQ-BeT Policy as per "Behavior Generation with Latent Actions" """ name = "vqbet" def __init__( self, config: VQBeTConfig | None = None, dataset_stats: dict[str, dict[str, Tensor]] | None = None, ): """ Args: config: Policy configuration class instance or None, in which case the default instantiation of the configuration class is used. dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__() if config is None: config = VQBeTConfig() self.config = config self.normalize_inputs = Normalize( config.input_shapes, config.input_normalization_modes, dataset_stats ) self.normalize_targets = Normalize( config.output_shapes, config.output_normalization_modes, dataset_stats ) self.unnormalize_outputs = Unnormalize( config.output_shapes, config.output_normalization_modes, dataset_stats ) self.vqbet = VQBeTModel(config) self.expected_image_keys = [k for k in config.input_shapes if k.startswith("observation.image")] self.reset() def reset(self): """ Clear observation and action queues. Should be called on `env.reset()` queues are populated during rollout of the policy, they contain the n latest observations and actions """ self._queues = { "observation.images": deque(maxlen=self.config.n_obs_steps), "observation.state": deque(maxlen=self.config.n_obs_steps), "action": deque(maxlen=self.config.action_chunk_size), } @torch.no_grad def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select a single action given environment observations. This method wraps `select_actions` in order to return one action at a time for execution in the environment. It works by managing the actions in a queue and only calling `select_actions` when the queue is empty. """ batch = self.normalize_inputs(batch) batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) # Note: It's important that this happens after stacking the images into a single key. self._queues = populate_queues(self._queues, batch) if not self.vqbet.action_head.vqvae_model.discretized.item(): warnings.warn( "To evaluate in the environment, your VQ-BeT model should contain a pretrained Residual VQ.", stacklevel=1, ) if len(self._queues["action"]) == 0: batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} actions = self.vqbet(batch, rollout=True)[:, : self.config.action_chunk_size] # the dimension of returned action is (batch_size, action_chunk_size, action_dim) actions = self.unnormalize_outputs({"action": actions})["action"] # since the data in the action queue's dimension is (action_chunk_size, batch_size, action_dim), we transpose the action and fill the queue self._queues["action"].extend(actions.transpose(0, 1)) action = self._queues["action"].popleft() return action def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: """Run the batch through the model and compute the loss for training or validation.""" batch = self.normalize_inputs(batch) batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch["observation.images"] = torch.stack([batch[k] for k in self.expected_image_keys], dim=-4) batch = self.normalize_targets(batch) # VQ-BeT discretizes action using VQ-VAE before training BeT (please refer to section 3.2 in the VQ-BeT paper https://arxiv.org/pdf/2403.03181) if not self.vqbet.action_head.vqvae_model.discretized.item(): # loss: total loss of training RVQ # n_different_codes: how many of the total possible VQ codes are being used in single batch (how many of them have at least one encoder embedding as a nearest neighbor). This can be at most `vqvae_n_embed * number of layers of RVQ (=2)`. # n_different_combinations: how many different code combinations are being used out of all possible combinations in single batch. This can be at most `vqvae_n_embed ^ number of layers of RVQ (=2)` (hint consider the RVQ as a decision tree). loss, n_different_codes, n_different_combinations, recon_l1_error = ( self.vqbet.action_head.discretize(self.config.n_vqvae_training_steps, batch["action"]) ) return { "loss": loss, "n_different_codes": n_different_codes, "n_different_combinations": n_different_combinations, "recon_l1_error": recon_l1_error, } # if Residual VQ is already trained, VQ-BeT trains its GPT and bin prediction head / offset prediction head parts. _, loss_dict = self.vqbet(batch, rollout=False) return loss_dict class SpatialSoftmax(nn.Module): """ Spatial Soft Argmax operation described in "Deep Spatial Autoencoders for Visuomotor Learning" by Finn et al. (https://arxiv.org/pdf/1509.06113). A minimal port of the robomimic implementation. At a high level, this takes 2D feature maps (from a convnet/ViT) and returns the "center of mass" of activations of each channel, i.e., keypoints in the image space for the policy to focus on. Example: take feature maps of size (512x10x12). We generate a grid of normalized coordinates (10x12x2): ----------------------------------------------------- | (-1., -1.) | (-0.82, -1.) | ... | (1., -1.) | | (-1., -0.78) | (-0.82, -0.78) | ... | (1., -0.78) | | ... | ... | ... | ... | | (-1., 1.) | (-0.82, 1.) | ... | (1., 1.) | ----------------------------------------------------- This is achieved by applying channel-wise softmax over the activations (512x120) and computing the dot product with the coordinates (120x2) to get expected points of maximal activation (512x2). The example above results in 512 keypoints (corresponding to the 512 input channels). We can optionally provide num_kp != None to control the number of keypoints. This is achieved by a first applying a learnable linear mapping (in_channels, H, W) -> (num_kp, H, W). """ def __init__(self, input_shape, num_kp=None): """ Args: input_shape (list): (C, H, W) input feature map shape. num_kp (int): number of keypoints in output. If None, output will have the same number of channels as input. """ super().__init__() assert len(input_shape) == 3 self._in_c, self._in_h, self._in_w = input_shape if num_kp is not None: self.nets = torch.nn.Conv2d(self._in_c, num_kp, kernel_size=1) self._out_c = num_kp else: self.nets = None self._out_c = self._in_c # we could use torch.linspace directly but that seems to behave slightly differently than numpy # and causes a small degradation in pc_success of pre-trained models. pos_x, pos_y = np.meshgrid(np.linspace(-1.0, 1.0, self._in_w), np.linspace(-1.0, 1.0, self._in_h)) pos_x = torch.from_numpy(pos_x.reshape(self._in_h * self._in_w, 1)).float() pos_y = torch.from_numpy(pos_y.reshape(self._in_h * self._in_w, 1)).float() # register as buffer so it's moved to the correct device. self.register_buffer("pos_grid", torch.cat([pos_x, pos_y], dim=1)) def forward(self, features: Tensor) -> Tensor: """ Args: features: (B, C, H, W) input feature maps. Returns: (B, K, 2) image-space coordinates of keypoints. """ if self.nets is not None: features = self.nets(features) # [B, K, H, W] -> [B * K, H * W] where K is number of keypoints features = features.reshape(-1, self._in_h * self._in_w) # 2d softmax normalization attention = F.softmax(features, dim=-1) # [B * K, H * W] x [H * W, 2] -> [B * K, 2] for spatial coordinate mean in x and y dimensions expected_xy = attention @ self.pos_grid # reshape to [B, K, 2] feature_keypoints = expected_xy.view(-1, self._out_c, 2) return feature_keypoints class VQBeTModel(nn.Module): """VQ-BeT: The underlying neural network for VQ-BeT Note: In this code we use the terms `rgb_encoder`, 'policy', `action_head`. The meanings are as follows. - The `rgb_encoder` process rgb-style image observations to one-dimensional embedding vectors - A `policy` is a minGPT architecture, that takes observation sequences and action query tokens to generate `features`. - These `features` pass through the action head, which passes through the code prediction, offset prediction head, and finally generates a prediction for the action chunks. -------------------------------** legend **------------------------------- │ n = n_obs_steps, p = n_action_pred_token, c = action_chunk_size) │ │ o_{t} : visual observation at timestep {t} │ │ s_{t} : state observation at timestep {t} │ │ a_{t} : action at timestep {t} │ │ A_Q : action_query_token │ -------------------------------------------------------------------------- Training Phase 1. Discretize action using Residual VQ (for config.n_vqvae_training_steps steps) ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │ RVQ encoder │ ─► │ Residual │ ─► │ RVQ Decoder │ │ (a_{t}~a_{t+p}) │ │ Code Quantizer │ │ │ │ │ │ │ │ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ Training Phase 2. timestep {t-n+1} timestep {t-n+2} timestep {t} ┌─────┴─────┐ ┌─────┴─────┐ ┌─────┴─────┐ o_{t-n+1} o_{t-n+2} ... o_{t} │ │ │ │ s_{t-n+1} │ s_{t-n+2} ... │ s_{t} p │ │ │ │ │ │ ┌───────┴───────┐ │ │ A_Q │ │ A_Q ... │ │ A_Q ... A_Q │ │ │ │ │ │ │ │ │ │ ┌───▼─────▼─────▼─────▼─────▼─────▼─────────────────▼─────▼─────▼───────────────▼───┐ │ │ │ GPT │ => policy │ │ └───────────────▼─────────────────▼─────────────────────────────▼───────────────▼───┘ │ │ │ │ ┌───┴───┐ ┌───┴───┐ ┌───┴───┐ ┌───┴───┐ code offset code offset code offset code offset ▼ │ ▼ │ ▼ │ ▼ │ => action_head RVQ Decoder │ RVQ Decoder │ RVQ Decoder │ RVQ Decoder │ └── + ──┘ └── + ──┘ └── + ──┘ └── + ──┘ ▼ ▼ ▼ ▼ action chunk action chunk action chunk action chunk a_{t-n+1} ~ a_{t-n+2} ~ a_{t} ~ ... a_{t+p-1} ~ a_{t-n+c} a_{t-n+c+1} a_{t+c-1} a_{t+p+c-1} ▼ ONLY this chunk is used in rollout! """ def __init__(self, config: VQBeTConfig): super().__init__() self.config = config self.rgb_encoder = VQBeTRgbEncoder(config) self.num_images = len([k for k in config.input_shapes if k.startswith("observation.image")]) # This action query token is used as a prompt for querying action chunks. Please refer to "A_Q" in the image above. # Note: During the forward pass, this token is repeated as many times as needed. The authors also experimented with initializing the necessary number of tokens independently and observed inferior results. self.action_token = nn.Parameter(torch.randn(1, 1, self.config.gpt_input_dim)) # To input state and observation features into GPT layers, we first project the features to fit the shape of input size of GPT. self.state_projector = MLP( config.input_shapes["observation.state"][0], hidden_channels=[self.config.gpt_input_dim] ) self.rgb_feature_projector = MLP( self.rgb_encoder.feature_dim, hidden_channels=[self.config.gpt_input_dim] ) # GPT part of VQ-BeT self.policy = GPT(config) # bin prediction head / offset prediction head part of VQ-BeT self.action_head = VQBeTHead(config) # Action tokens for: each observation step, the current action token, and all future action tokens. num_tokens = self.config.n_action_pred_token + self.config.n_obs_steps - 1 self.register_buffer( "select_target_actions_indices", torch.row_stack([torch.arange(i, i + self.config.action_chunk_size) for i in range(num_tokens)]), ) def forward(self, batch: dict[str, Tensor], rollout: bool) -> Tensor: # Input validation. assert set(batch).issuperset({"observation.state", "observation.images"}) batch_size, n_obs_steps = batch["observation.state"].shape[:2] assert n_obs_steps == self.config.n_obs_steps # Extract image feature (first combine batch and sequence dims). img_features = self.rgb_encoder( einops.rearrange(batch["observation.images"], "b s n ... -> (b s n) ...") ) # Separate batch and sequence dims. img_features = einops.rearrange( img_features, "(b s n) ... -> b s n ...", b=batch_size, s=n_obs_steps, n=self.num_images ) # Arrange prior and current observation step tokens as shown in the class docstring. # First project features to token dimension. rgb_tokens = self.rgb_feature_projector( img_features ) # (batch, obs_step, number of different cameras, projection dims) input_tokens = [rgb_tokens[:, :, i] for i in range(rgb_tokens.size(2))] input_tokens.append( self.state_projector(batch["observation.state"]) ) # (batch, obs_step, projection dims) input_tokens.append(einops.repeat(self.action_token, "1 1 d -> b n d", b=batch_size, n=n_obs_steps)) # Interleave tokens by stacking and rearranging. input_tokens = torch.stack(input_tokens, dim=2) input_tokens = einops.rearrange(input_tokens, "b n t d -> b (n t) d") len_additional_action_token = self.config.n_action_pred_token - 1 future_action_tokens = self.action_token.repeat(batch_size, len_additional_action_token, 1) # add additional action query tokens for predicting future action chunks input_tokens = torch.cat([input_tokens, future_action_tokens], dim=1) # get action features (pass through GPT) features = self.policy(input_tokens) # len(self.config.input_shapes) is the number of different observation modes. this line gets the index of action prompt tokens. historical_act_pred_index = np.arange(0, n_obs_steps) * (len(self.config.input_shapes) + 1) + len( self.config.input_shapes ) # only extract the output tokens at the position of action query: # Behavior Transformer (BeT), and VQ-BeT are both sequence-to-sequence prediction models, mapping sequential observation to sequential action (please refer to section 2.2 in BeT paper https://arxiv.org/pdf/2206.11251). # Thus, it predict historical action sequence, in addition to current and future actions (predicting future actions : optional). features = torch.cat( [features[:, historical_act_pred_index], features[:, -len_additional_action_token:]], dim=1 ) # pass through action head action_head_output = self.action_head(features) # if rollout, VQ-BeT don't calculate loss if rollout: return action_head_output["predicted_action"][:, n_obs_steps - 1, :].reshape( batch_size, self.config.action_chunk_size, -1 ) # else, it calculate overall loss (bin prediction loss, and offset loss) else: output = batch["action"][:, self.select_target_actions_indices] loss = self.action_head.loss_fn(action_head_output, output, reduction="mean") return action_head_output, loss class VQBeTHead(nn.Module): def __init__(self, config: VQBeTConfig): """ VQBeTHead takes output of GPT layers, and pass the feature through bin prediction head (`self.map_to_cbet_preds_bin`), and offset prediction head (`self.map_to_cbet_preds_offset`) self.map_to_cbet_preds_bin: outputs probability of each code (for each layer). The input dimension of `self.map_to_cbet_preds_bin` is same with the output of GPT, and the output dimension of `self.map_to_cbet_preds_bin` is `self.vqvae_model.vqvae_num_layers (=fixed as 2) * self.config.vqvae_n_embed`. if the agent select the code sequentially, we use self.map_to_cbet_preds_primary_bin and self.map_to_cbet_preds_secondary_bin instead of self._map_to_cbet_preds_bin. self.map_to_cbet_preds_offset: output the predicted offsets for all the codes in all the layers. The input dimension of ` self.map_to_cbet_preds_offset` is same with the output of GPT, and the output dimension of ` self.map_to_cbet_preds_offset` is `self.vqvae_model.vqvae_num_layers (=fixed as 2) * self.config.vqvae_n_embed * config.action_chunk_size * config.output_shapes["action"][0]`. """ super().__init__() self.config = config # init vqvae self.vqvae_model = VqVae(config) if config.sequentially_select: self.map_to_cbet_preds_primary_bin = MLP( in_channels=config.gpt_output_dim, hidden_channels=[self.config.vqvae_n_embed], ) self.map_to_cbet_preds_secondary_bin = MLP( in_channels=config.gpt_output_dim + self.config.vqvae_n_embed, hidden_channels=[self.config.vqvae_n_embed], ) else: self.map_to_cbet_preds_bin = MLP( in_channels=config.gpt_output_dim, hidden_channels=[self.vqvae_model.vqvae_num_layers * self.config.vqvae_n_embed], ) self.map_to_cbet_preds_offset = MLP( in_channels=config.gpt_output_dim, hidden_channels=[ self.vqvae_model.vqvae_num_layers * self.config.vqvae_n_embed * config.action_chunk_size * config.output_shapes["action"][0], ], ) # loss self._focal_loss_fn = FocalLoss(gamma=2.0) def discretize(self, n_vqvae_training_steps, actions): # Resize the action sequence data to fit the action chunk size using a sliding window approach. actions = torch.cat( [ actions[:, j : j + self.config.action_chunk_size, :] for j in range(actions.shape[1] + 1 - self.config.action_chunk_size) ], dim=0, ) # `actions` is a tensor of shape (new_batch, action_chunk_size, action_dim) where new_batch is the number of possible chunks created from the original sequences using the sliding window. loss, metric = self.vqvae_model.vqvae_forward(actions) n_different_codes = sum( [len(torch.unique(metric[2][:, i])) for i in range(self.vqvae_model.vqvae_num_layers)] ) n_different_combinations = len(torch.unique(metric[2], dim=0)) recon_l1_error = metric[0].detach().cpu().item() self.vqvae_model.optimized_steps += 1 # if we updated RVQ more than `n_vqvae_training_steps` steps, we freeze the RVQ part. if self.vqvae_model.optimized_steps >= n_vqvae_training_steps: self.vqvae_model.discretized = torch.tensor(True) self.vqvae_model.vq_layer.freeze_codebook = torch.tensor(True) print("Finished discretizing action data!") self.vqvae_model.eval() for param in self.vqvae_model.vq_layer.parameters(): param.requires_grad = False return loss, n_different_codes, n_different_combinations, recon_l1_error def forward(self, x, **kwargs): # N is the batch size, and T is number of action query tokens, which are process through same GPT N, T, _ = x.shape # we calculate N and T side parallely. Thus, the dimensions would be # (batch size * number of action query tokens, action chunk size, action dimension) x = einops.rearrange(x, "N T WA -> (N T) WA") # sample offsets cbet_offsets = self.map_to_cbet_preds_offset(x) cbet_offsets = einops.rearrange( cbet_offsets, "(NT) (G C WA) -> (NT) G C WA", G=self.vqvae_model.vqvae_num_layers, C=self.config.vqvae_n_embed, ) # if self.config.sequentially_select is True, bin prediction head first sample the primary code, and then sample secondary code if self.config.sequentially_select: cbet_primary_logits = self.map_to_cbet_preds_primary_bin(x) # select primary bin first cbet_primary_probs = torch.softmax( cbet_primary_logits / self.config.bet_softmax_temperature, dim=-1 ) NT, choices = cbet_primary_probs.shape sampled_primary_centers = einops.rearrange( torch.multinomial(cbet_primary_probs.view(-1, choices), num_samples=1), "(NT) 1 -> NT", NT=NT, ) cbet_secondary_logits = self.map_to_cbet_preds_secondary_bin( torch.cat( (x, F.one_hot(sampled_primary_centers, num_classes=self.config.vqvae_n_embed)), axis=1, ) ) cbet_secondary_probs = torch.softmax( cbet_secondary_logits / self.config.bet_softmax_temperature, dim=-1 ) sampled_secondary_centers = einops.rearrange( torch.multinomial(cbet_secondary_probs.view(-1, choices), num_samples=1), "(NT) 1 -> NT", NT=NT, ) sampled_centers = torch.stack((sampled_primary_centers, sampled_secondary_centers), axis=1) cbet_logits = torch.stack([cbet_primary_logits, cbet_secondary_logits], dim=1) # if self.config.sequentially_select is False, bin prediction head samples primary and secondary code at once. else: cbet_logits = self.map_to_cbet_preds_bin(x) cbet_logits = einops.rearrange( cbet_logits, "(NT) (G C) -> (NT) G C", G=self.vqvae_model.vqvae_num_layers ) cbet_probs = torch.softmax(cbet_logits / self.config.bet_softmax_temperature, dim=-1) NT, G, choices = cbet_probs.shape sampled_centers = einops.rearrange( torch.multinomial(cbet_probs.view(-1, choices), num_samples=1), "(NT G) 1 -> NT G", NT=NT, ) device = get_device_from_parameters(self) indices = ( torch.arange(NT, device=device).unsqueeze(1), torch.arange(self.vqvae_model.vqvae_num_layers, device=device).unsqueeze(0), sampled_centers, ) # Use advanced indexing to sample the values (Extract the only offsets corresponding to the sampled codes.) sampled_offsets = cbet_offsets[indices] # Then, sum the offsets over the RVQ layers to get a net offset for the bin prediction sampled_offsets = sampled_offsets.sum(dim=1) with torch.no_grad(): # Get the centroids (= vectors corresponding to the codes) of each layer to pass it through RVQ decoder return_decoder_input = self.vqvae_model.get_embeddings_from_code(sampled_centers).clone().detach() # pass the centroids through decoder to get actions. decoded_action = self.vqvae_model.get_action_from_latent(return_decoder_input).clone().detach() # reshaped extracted offset to match with decoded centroids sampled_offsets = einops.rearrange( sampled_offsets, "NT (W A) -> NT W A", W=self.config.action_chunk_size ) # add offset and decoded centroids predicted_action = decoded_action + sampled_offsets predicted_action = einops.rearrange( predicted_action, "(N T) W A -> N T (W A)", N=N, T=T, W=self.config.action_chunk_size, ) return { "cbet_logits": cbet_logits, "predicted_action": predicted_action, "sampled_centers": sampled_centers, "decoded_action": decoded_action, } def loss_fn(self, pred, target, **kwargs): """ for given ground truth action values (target), and prediction (pred) this function calculates the overall loss. predicted_action: predicted action chunk (offset + decoded centroids) sampled_centers: sampled centroids (code of RVQ) decoded_action: decoded action, which is produced by passing sampled_centers through RVQ decoder NT: batch size * T T: number of action query tokens, which are process through same GPT cbet_logits: probability of all codes in each layer """ action_seq = target predicted_action = pred["predicted_action"] sampled_centers = pred["sampled_centers"] decoded_action = pred["decoded_action"] NT = predicted_action.shape[0] * predicted_action.shape[1] cbet_logits = pred["cbet_logits"] predicted_action = einops.rearrange( predicted_action, "N T (W A) -> (N T) W A", W=self.config.action_chunk_size ) action_seq = einops.rearrange(action_seq, "N T W A -> (N T) W A") # Figure out the loss for the actions. # First, we need to find the closest cluster center for each ground truth action. with torch.no_grad(): state_vq, action_bins = self.vqvae_model.get_code(action_seq) # action_bins: NT, G # Now we can compute the loss. # offset loss is L1 distance between the predicted action and ground truth action offset_loss = F.l1_loss(action_seq, predicted_action) # calculate primary code prediction loss cbet_loss1 = self._focal_loss_fn( cbet_logits[:, 0, :], action_bins[:, 0], ) # calculate secondary code prediction loss cbet_loss2 = self._focal_loss_fn( cbet_logits[:, 1, :], action_bins[:, 1], ) # add all the prediction loss cbet_loss = ( cbet_loss1 * self.config.primary_code_loss_weight + cbet_loss2 * self.config.secondary_code_loss_weight ) equal_primary_code_rate = torch.sum((action_bins[:, 0] == sampled_centers[:, 0]).int()) / (NT) equal_secondary_code_rate = torch.sum((action_bins[:, 1] == sampled_centers[:, 1]).int()) / (NT) action_mse_error = torch.mean((action_seq - predicted_action) ** 2) vq_action_error = torch.mean(torch.abs(action_seq - decoded_action)) offset_action_error = torch.mean(torch.abs(action_seq - predicted_action)) action_error_max = torch.max(torch.abs(action_seq - predicted_action)) loss = cbet_loss + self.config.offset_loss_weight * offset_loss loss_dict = { "loss": loss, "classification_loss": cbet_loss.detach().cpu().item(), "offset_loss": offset_loss.detach().cpu().item(), "equal_primary_code_rate": equal_primary_code_rate.detach().cpu().item(), "equal_secondary_code_rate": equal_secondary_code_rate.detach().cpu().item(), "vq_action_error": vq_action_error.detach().cpu().item(), "offset_action_error": offset_action_error.detach().cpu().item(), "action_error_max": action_error_max.detach().cpu().item(), "action_mse_error": action_mse_error.detach().cpu().item(), } return loss_dict class VQBeTOptimizer(torch.optim.Adam): def __init__(self, policy, cfg): vqvae_params = ( list(policy.vqbet.action_head.vqvae_model.encoder.parameters()) + list(policy.vqbet.action_head.vqvae_model.decoder.parameters()) + list(policy.vqbet.action_head.vqvae_model.vq_layer.parameters()) ) decay_params, no_decay_params = policy.vqbet.policy.configure_parameters() decay_params = ( decay_params + list(policy.vqbet.rgb_encoder.parameters()) + list(policy.vqbet.state_projector.parameters()) + list(policy.vqbet.rgb_feature_projector.parameters()) + [policy.vqbet.action_token] + list(policy.vqbet.action_head.map_to_cbet_preds_offset.parameters()) ) if cfg.policy.sequentially_select: decay_params = ( decay_params + list(policy.vqbet.action_head.map_to_cbet_preds_primary_bin.parameters()) + list(policy.vqbet.action_head.map_to_cbet_preds_secondary_bin.parameters()) ) else: decay_params = decay_params + list(policy.vqbet.action_head.map_to_cbet_preds_bin.parameters()) optim_groups = [ { "params": decay_params, "weight_decay": cfg.training.adam_weight_decay, "lr": cfg.training.lr, }, { "params": vqvae_params, "weight_decay": 0.0001, "lr": cfg.training.vqvae_lr, }, { "params": no_decay_params, "weight_decay": 0.0, "lr": cfg.training.lr, }, ] super().__init__( optim_groups, cfg.training.lr, cfg.training.adam_betas, cfg.training.adam_eps, ) class VQBeTScheduler(nn.Module): def __init__(self, optimizer, cfg): super().__init__() n_vqvae_training_steps = cfg.training.n_vqvae_training_steps num_warmup_steps = cfg.training.lr_warmup_steps num_training_steps = cfg.training.offline_steps num_cycles = 0.5 def lr_lambda(current_step): if current_step < n_vqvae_training_steps: return float(1) else: current_step = current_step - n_vqvae_training_steps if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float( max(1, num_training_steps - num_warmup_steps) ) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) self.lr_scheduler = LambdaLR(optimizer, lr_lambda, -1) def step(self): self.lr_scheduler.step() class VQBeTRgbEncoder(nn.Module): """Encode an RGB image into a 1D feature vector. Includes the ability to normalize and crop the image first. Same with DiffusionRgbEncoder from modeling_diffusion.py """ def __init__(self, config: VQBeTConfig): super().__init__() # Set up optional preprocessing. if config.crop_shape is not None: self.do_crop = True # Always use center crop for eval self.center_crop = torchvision.transforms.CenterCrop(config.crop_shape) if config.crop_is_random: self.maybe_random_crop = torchvision.transforms.RandomCrop(config.crop_shape) else: self.maybe_random_crop = self.center_crop else: self.do_crop = False # Set up backbone. backbone_model = getattr(torchvision.models, config.vision_backbone)( weights=config.pretrained_backbone_weights ) # Note: This assumes that the layer4 feature map is children()[-3] # TODO(alexander-soare): Use a safer alternative. self.backbone = nn.Sequential(*(list(backbone_model.children())[:-2])) if config.use_group_norm: if config.pretrained_backbone_weights: raise ValueError( "You can't replace BatchNorm in a pretrained model without ruining the weights!" ) self.backbone = _replace_submodules( root_module=self.backbone, predicate=lambda x: isinstance(x, nn.BatchNorm2d), func=lambda x: nn.GroupNorm(num_groups=x.num_features // 16, num_channels=x.num_features), ) # Set up pooling and final layers. # Use a dry run to get the feature map shape. # The dummy input should take the number of image channels from `config.input_shapes` and it should # use the height and width from `config.crop_shape` if it is provided, otherwise it should use the # height and width from `config.input_shapes`. image_keys = [k for k in config.input_shapes if k.startswith("observation.image")] assert len(image_keys) == 1 image_key = image_keys[0] dummy_input_h_w = ( config.crop_shape if config.crop_shape is not None else config.input_shapes[image_key][1:] ) dummy_input = torch.zeros(size=(1, config.input_shapes[image_key][0], *dummy_input_h_w)) with torch.inference_mode(): dummy_feature_map = self.backbone(dummy_input) feature_map_shape = tuple(dummy_feature_map.shape[1:]) self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) self.feature_dim = config.spatial_softmax_num_keypoints * 2 self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim) self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: """ Args: x: (B, C, H, W) image tensor with pixel values in [0, 1]. Returns: (B, D) image feature. """ # Preprocess: maybe crop (if it was set up in the __init__). if self.do_crop: if self.training: # noqa: SIM108 x = self.maybe_random_crop(x) else: # Always use center crop for eval. x = self.center_crop(x) # Extract backbone feature. x = torch.flatten(self.pool(self.backbone(x)), start_dim=1) # Final linear layer with non-linearity. x = self.relu(self.out(x)) return x def _replace_submodules( root_module: nn.Module, predicate: Callable[[nn.Module], bool], func: Callable[[nn.Module], nn.Module] ) -> nn.Module: """ Args: root_module: The module for which the submodules need to be replaced predicate: Takes a module as an argument and must return True if the that module is to be replaced. func: Takes a module as an argument and returns a new module to replace it with. Returns: The root module with its submodules replaced. """ if predicate(root_module): return func(root_module) replace_list = [k.split(".") for k, m in root_module.named_modules(remove_duplicate=True) if predicate(m)] for *parents, k in replace_list: parent_module = root_module if len(parents) > 0: parent_module = root_module.get_submodule(".".join(parents)) if isinstance(parent_module, nn.Sequential): src_module = parent_module[int(k)] else: src_module = getattr(parent_module, k) tgt_module = func(src_module) if isinstance(parent_module, nn.Sequential): parent_module[int(k)] = tgt_module else: setattr(parent_module, k, tgt_module) # verify that all BN are replaced assert not any(predicate(m) for _, m in root_module.named_modules(remove_duplicate=True)) return root_module class VqVae(nn.Module): def __init__( self, config: VQBeTConfig, ): """ VQ-VAE is composed of three parts: encoder, vq_layer, and decoder. Encoder and decoder are MLPs consisting of an input, output layer, and hidden layer, respectively. The vq_layer uses residual VQs. This class contains functions for training the encoder and decoder along with the residual VQ layer (for trainign phase 1), as well as functions to help BeT training part in training phase 2. """ super().__init__() self.config = config # 'discretized' indicates whether the Residual VQ part is trained or not. (After finishing the training, we set discretized=True) self.register_buffer("discretized", torch.tensor(False)) self.optimized_steps = 0 # we use the fixed number of layers for Residual VQ across all environments. self.vqvae_num_layers = 2 self.vq_layer = ResidualVQ( dim=config.vqvae_embedding_dim, num_quantizers=self.vqvae_num_layers, codebook_size=config.vqvae_n_embed, ) self.encoder = MLP( in_channels=self.config.output_shapes["action"][0] * self.config.action_chunk_size, hidden_channels=[ config.vqvae_enc_hidden_dim, config.vqvae_enc_hidden_dim, config.vqvae_embedding_dim, ], ) self.decoder = MLP( in_channels=config.vqvae_embedding_dim, hidden_channels=[ config.vqvae_enc_hidden_dim, config.vqvae_enc_hidden_dim, self.config.output_shapes["action"][0] * self.config.action_chunk_size, ], ) def get_embeddings_from_code(self, encoding_indices): # This function gets code indices as inputs, and outputs embedding vectors corresponding to the code indices. with torch.no_grad(): z_embed = self.vq_layer.get_codebook_vector_from_indices(encoding_indices) # since the RVQ has multiple layers, it adds the vectors in the axis of layers to provide a vector for that code combination. z_embed = z_embed.sum(dim=0) return z_embed def get_action_from_latent(self, latent): # given latent vector, this function outputs the decoded action. output = self.decoder(latent) if self.config.action_chunk_size == 1: return einops.rearrange(output, "N (T A) -> N T A", A=self.config.output_shapes["action"][0]) else: return einops.rearrange(output, "N (T A) -> N T A", A=self.config.output_shapes["action"][0]) def get_code(self, state): # in phase 2 of VQ-BeT training, we need a `ground truth labels of action data` to calculate the Focal loss for code prediction head. (please refer to section 3.3 in the paper https://arxiv.org/pdf/2403.03181) # this function outputs the `GT code` of given action using frozen encoder and quantization layers. (please refer to Figure 2. in the paper https://arxiv.org/pdf/2403.03181) state = einops.rearrange(state, "N T A -> N (T A)") with torch.no_grad(): state_rep = self.encoder(state) state_rep_shape = state_rep.shape[:-1] state_rep_flat = state_rep.view(state_rep.size(0), -1, state_rep.size(1)) state_rep_flat, vq_code, vq_loss_state = self.vq_layer(state_rep_flat) state_vq = state_rep_flat.view(*state_rep_shape, -1) vq_code = vq_code.view(*state_rep_shape, -1) vq_loss_state = torch.sum(vq_loss_state) return state_vq, vq_code def vqvae_forward(self, state): # This function passes the given data through Residual VQ with Encoder and Decoder. Please refer to section 3.2 in the paper https://arxiv.org/pdf/2403.03181). state = einops.rearrange(state, "N T A -> N (T A)") # We start with passing action (or action chunk) at:t+n through the encoder ϕ. state_rep = self.encoder(state) state_rep_shape = state_rep.shape[:-1] state_rep_flat = state_rep.view(state_rep.size(0), -1, state_rep.size(1)) # The resulting latent embedding vector x = ϕ(at:t+n) is then mapped to an embedding vector in the codebook of the RVQ layers by the nearest neighbor look-up. state_rep_flat, vq_code, vq_loss_state = self.vq_layer(state_rep_flat) state_vq = state_rep_flat.view(*state_rep_shape, -1) vq_code = vq_code.view(*state_rep_shape, -1) # since the RVQ has multiple layers, it adds the vectors in the axis of layers to provide a vector for that code combination. vq_loss_state = torch.sum(vq_loss_state) # Then, the discretized vector zq(x) is reconstructed as ψ(zq(x)) by passing through the decoder ψ. dec_out = self.decoder(state_vq) # Calculate L1 reconstruction loss encoder_loss = (state - dec_out).abs().mean() # add encoder reconstruction loss and commitment loss rep_loss = encoder_loss + vq_loss_state * 5 metric = ( encoder_loss.clone().detach(), vq_loss_state.clone().detach(), vq_code, rep_loss.item(), ) return rep_loss, metric class FocalLoss(nn.Module): """ From https://github.com/notmahi/miniBET/blob/main/behavior_transformer/bet.py """ def __init__(self, gamma: float = 0, size_average: bool = True): super().__init__() self.gamma = gamma self.size_average = size_average def forward(self, input, target): if len(input.shape) == 3: N, T, _ = input.shape logpt = F.log_softmax(input, dim=-1) logpt = logpt.gather(-1, target.view(N, T, 1)).view(N, T) elif len(input.shape) == 2: logpt = F.log_softmax(input, dim=-1) logpt = logpt.gather(-1, target.view(-1, 1)).view(-1) pt = logpt.exp() loss = -1 * (1 - pt) ** self.gamma * logpt if self.size_average: return loss.mean() else: return loss.sum() class MLP(torch.nn.Sequential): def __init__( self, in_channels: int, hidden_channels: List[int], ): layers = [] in_dim = in_channels for hidden_dim in hidden_channels[:-1]: layers.append(torch.nn.Linear(in_dim, hidden_dim)) layers.append(torch.nn.ReLU()) in_dim = hidden_dim layers.append(torch.nn.Linear(in_dim, hidden_channels[-1])) super().__init__(*layers)
lerobot/lerobot/common/policies/vqbet/modeling_vqbet.py/0
{ "file_path": "lerobot/lerobot/common/policies/vqbet/modeling_vqbet.py", "repo_id": "lerobot", "token_count": 21233 }
177
# @package _global_ fps: 30 env: name: dora task: DoraAloha-v0 state_dim: 14 action_dim: 14 fps: ${fps} episode_length: 400 gym: fps: ${fps}
lerobot/lerobot/configs/env/dora_aloha_real.yaml/0
{ "file_path": "lerobot/lerobot/configs/env/dora_aloha_real.yaml", "repo_id": "lerobot", "token_count": 74 }
178
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Use this script to get a quick summary of your system config. It should be able to run without any of LeRobot's dependencies or LeRobot itself installed. """ import platform HAS_HF_HUB = True HAS_HF_DATASETS = True HAS_NP = True HAS_TORCH = True HAS_LEROBOT = True try: import huggingface_hub except ImportError: HAS_HF_HUB = False try: import datasets except ImportError: HAS_HF_DATASETS = False try: import numpy as np except ImportError: HAS_NP = False try: import torch except ImportError: HAS_TORCH = False try: import lerobot except ImportError: HAS_LEROBOT = False lerobot_version = lerobot.__version__ if HAS_LEROBOT else "N/A" hf_hub_version = huggingface_hub.__version__ if HAS_HF_HUB else "N/A" hf_datasets_version = datasets.__version__ if HAS_HF_DATASETS else "N/A" np_version = np.__version__ if HAS_NP else "N/A" torch_version = torch.__version__ if HAS_TORCH else "N/A" torch_cuda_available = torch.cuda.is_available() if HAS_TORCH else "N/A" cuda_version = torch._C._cuda_getCompiledVersion() if HAS_TORCH and torch.version.cuda is not None else "N/A" # TODO(aliberts): refactor into an actual command `lerobot env` def display_sys_info() -> dict: """Run this to get basic system info to help for tracking issues & bugs.""" info = { "`lerobot` version": lerobot_version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": hf_hub_version, "Dataset version": hf_datasets_version, "Numpy version": np_version, "PyTorch version (GPU?)": f"{torch_version} ({torch_cuda_available})", "Cuda version": cuda_version, "Using GPU in script?": "<fill in>", # "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the last point.\n") print(format_dict(info)) return info def format_dict(d: dict) -> str: return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n" if __name__ == "__main__": display_sys_info()
lerobot/lerobot/scripts/display_sys_info.py/0
{ "file_path": "lerobot/lerobot/scripts/display_sys_info.py", "repo_id": "lerobot", "token_count": 998 }
179
version https://git-lfs.github.com/spec/v1 oid sha256:4ee862b1a6dc1d11df77c36c47ea00db88ad35a48e4d71c2940ad26b55fe2167 size 136
lerobot/tests/data/lerobot/aloha_mobile_chair/meta_data/episode_data_index.safetensors/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_mobile_chair/meta_data/episode_data_index.safetensors", "repo_id": "lerobot", "token_count": 66 }
180
version https://git-lfs.github.com/spec/v1 oid sha256:921505133c62906bd53034a613a827996994875d84c8b26d69d188df9a7ffeba size 247
lerobot/tests/data/lerobot/aloha_mobile_wash_pan/train/state.json/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_mobile_wash_pan/train/state.json", "repo_id": "lerobot", "token_count": 62 }
181
version https://git-lfs.github.com/spec/v1 oid sha256:09de36f2d6786e65e26d4602e00f9097f63a087a6a4f36e98c5367724acfc755 size 2904
lerobot/tests/data/lerobot/aloha_sim_transfer_cube_human_image/meta_data/stats.safetensors/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_sim_transfer_cube_human_image/meta_data/stats.safetensors", "repo_id": "lerobot", "token_count": 67 }
182
version https://git-lfs.github.com/spec/v1 oid sha256:fda1fe75c9f987c065d4244594e4f6456b7ac6efd7fae2a7952fb48b044dbd30 size 247
lerobot/tests/data/lerobot/aloha_sim_transfer_cube_scripted_image/train/state.json/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_sim_transfer_cube_scripted_image/train/state.json", "repo_id": "lerobot", "token_count": 67 }
183
version https://git-lfs.github.com/spec/v1 oid sha256:08343c525b04a96a2c21b6929616dac092e2ece9789f9c15830241ae0e1ad020 size 247
lerobot/tests/data/lerobot/aloha_static_candy/train/state.json/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_static_candy/train/state.json", "repo_id": "lerobot", "token_count": 64 }
184
version https://git-lfs.github.com/spec/v1 oid sha256:0a6e2d2ebfca08420a2aafec744f99939f58ee0cc12a8f4a53bfe71381c9846a size 247
lerobot/tests/data/lerobot/aloha_static_tape/train/state.json/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_static_tape/train/state.json", "repo_id": "lerobot", "token_count": 68 }
185
version https://git-lfs.github.com/spec/v1 oid sha256:80da670129e42cb85e93a9f6779c27b081bbaa7e1ecd68577f78877ac3726e70 size 122088
lerobot/tests/data/lerobot/aloha_static_ziploc_slide/train/data-00000-of-00001.arrow/0
{ "file_path": "lerobot/tests/data/lerobot/aloha_static_ziploc_slide/train/data-00000-of-00001.arrow", "repo_id": "lerobot", "token_count": 67 }
186
version https://git-lfs.github.com/spec/v1 oid sha256:65ceff2650ebbba2ee024be9ec083b7f8f20a69cd2c5bd6624382fe5fb974697 size 3056
lerobot/tests/data/lerobot/pusht_image/meta_data/stats.safetensors/0
{ "file_path": "lerobot/tests/data/lerobot/pusht_image/meta_data/stats.safetensors", "repo_id": "lerobot", "token_count": 64 }
187
version https://git-lfs.github.com/spec/v1 oid sha256:b6cfcf6051a043dfb16d797fe382ef786b3f818f8ff35a53ecf139f22258d3c3 size 2832
lerobot/tests/data/lerobot/xarm_lift_medium_replay/meta_data/stats.safetensors/0
{ "file_path": "lerobot/tests/data/lerobot/xarm_lift_medium_replay/meta_data/stats.safetensors", "repo_id": "lerobot", "token_count": 65 }
188
version https://git-lfs.github.com/spec/v1 oid sha256:3ca324435b18abab8956aaac43bdac463e8881f654f7a851a39a7eb190e7e040 size 247
lerobot/tests/data/lerobot/xarm_push_medium/train/state.json/0
{ "file_path": "lerobot/tests/data/lerobot/xarm_push_medium/train/state.json", "repo_id": "lerobot", "token_count": 63 }
189
version https://git-lfs.github.com/spec/v1 oid sha256:24722873fa5260e960f2c64b1decb02e7bcf31ba44849347e34ea2a268c458e8 size 65
lerobot/tests/data/lerobot/xarm_push_medium_replay_image/meta_data/info.json/0
{ "file_path": "lerobot/tests/data/lerobot/xarm_push_medium_replay_image/meta_data/info.json", "repo_id": "lerobot", "token_count": 62 }
190
version https://git-lfs.github.com/spec/v1 oid sha256:96431ca3479eef2379406ef901cad7ba5eac4f7edcc48ecc9e8d1fa0e99d8017 size 111338
lerobot/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_81.safetensors/0
{ "file_path": "lerobot/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_81.safetensors", "repo_id": "lerobot", "token_count": 67 }
191
version https://git-lfs.github.com/spec/v1 oid sha256:683a2038185f3d070e7d7c0c31e4aa75067c11bf798daa41c9fab336f4183fda size 33400
lerobot/tests/data/save_policy_to_safetensors/aloha_act_1000_steps/param_stats.safetensors/0
{ "file_path": "lerobot/tests/data/save_policy_to_safetensors/aloha_act_1000_steps/param_stats.safetensors", "repo_id": "lerobot", "token_count": 65 }
192
version https://git-lfs.github.com/spec/v1 oid sha256:b7d08c9518f1f15226e4efc6f2a8542d0f3e620c91421c7cacea07d9bd9025d6 size 36312
lerobot/tests/data/save_policy_to_safetensors/xarm_tdmpcuse_mpc/param_stats.safetensors/0
{ "file_path": "lerobot/tests/data/save_policy_to_safetensors/xarm_tdmpcuse_mpc/param_stats.safetensors", "repo_id": "lerobot", "token_count": 71 }
193
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.d from copy import deepcopy from uuid import uuid4 import numpy as np import pytest import torch from datasets import Dataset from lerobot.common.datasets.lerobot_dataset import LeRobotDataset from lerobot.common.datasets.online_buffer import OnlineBuffer, compute_sampler_weights from lerobot.common.datasets.utils import hf_transform_to_torch # Some constants for OnlineBuffer tests. data_key = "data" data_shape = (2, 3) # just some arbitrary > 1D shape buffer_capacity = 100 fps = 10 def make_new_buffer( write_dir: str | None = None, delta_timestamps: dict[str, list[float]] | None = None ) -> tuple[OnlineBuffer, str]: if write_dir is None: write_dir = f"/tmp/online_buffer_{uuid4().hex}" buffer = OnlineBuffer( write_dir, data_spec={data_key: {"shape": data_shape, "dtype": np.dtype("float32")}}, buffer_capacity=buffer_capacity, fps=fps, delta_timestamps=delta_timestamps, ) return buffer, write_dir def make_spoof_data_frames(n_episodes: int, n_frames_per_episode: int) -> dict[str, np.ndarray]: new_data = { data_key: np.arange(n_frames_per_episode * n_episodes * np.prod(data_shape)).reshape(-1, *data_shape), OnlineBuffer.INDEX_KEY: np.arange(n_frames_per_episode * n_episodes), OnlineBuffer.EPISODE_INDEX_KEY: np.repeat(np.arange(n_episodes), n_frames_per_episode), OnlineBuffer.FRAME_INDEX_KEY: np.tile(np.arange(n_frames_per_episode), n_episodes), OnlineBuffer.TIMESTAMP_KEY: np.tile(np.arange(n_frames_per_episode) / fps, n_episodes), } return new_data def test_non_mutate(): """Checks that the data provided to the add_data method is copied rather than passed by reference. This means that mutating the data in the buffer does not mutate the original data. NOTE: If this test fails, it means some of the other tests may be compromised. For example, we can't trust a success case for `test_write_read`. """ buffer, _ = make_new_buffer() new_data = make_spoof_data_frames(2, buffer_capacity // 4) new_data_copy = deepcopy(new_data) buffer.add_data(new_data) buffer._data[data_key][:] += 1 assert all(np.array_equal(new_data[k], new_data_copy[k]) for k in new_data) def test_index_error_no_data(): buffer, _ = make_new_buffer() with pytest.raises(IndexError): buffer[0] def test_index_error_with_data(): buffer, _ = make_new_buffer() n_frames = buffer_capacity // 2 new_data = make_spoof_data_frames(1, n_frames) buffer.add_data(new_data) with pytest.raises(IndexError): buffer[n_frames] with pytest.raises(IndexError): buffer[-n_frames - 1] @pytest.mark.parametrize("do_reload", [False, True]) def test_write_read(do_reload: bool): """Checks that data can be added to the buffer and read back. If do_reload we delete the buffer object and load the buffer back from disk before reading. """ buffer, write_dir = make_new_buffer() n_episodes = 2 n_frames_per_episode = buffer_capacity // 4 new_data = make_spoof_data_frames(n_episodes, n_frames_per_episode) buffer.add_data(new_data) if do_reload: del buffer buffer, _ = make_new_buffer(write_dir) assert len(buffer) == n_frames_per_episode * n_episodes for i, item in enumerate(buffer): assert all(isinstance(item[k], torch.Tensor) for k in item) assert np.array_equal(item[data_key].numpy(), new_data[data_key][i]) def test_read_data_key(): """Tests that data can be added to a buffer and all data for a. specific key can be read back.""" buffer, _ = make_new_buffer() n_episodes = 2 n_frames_per_episode = buffer_capacity // 4 new_data = make_spoof_data_frames(n_episodes, n_frames_per_episode) buffer.add_data(new_data) data_from_buffer = buffer.get_data_by_key(data_key) assert isinstance(data_from_buffer, torch.Tensor) assert np.array_equal(data_from_buffer.numpy(), new_data[data_key]) def test_fifo(): """Checks that if data is added beyond the buffer capacity, we discard the oldest data first.""" buffer, _ = make_new_buffer() n_frames_per_episode = buffer_capacity // 4 n_episodes = 3 new_data = make_spoof_data_frames(n_episodes, n_frames_per_episode) buffer.add_data(new_data) n_more_episodes = 2 # Developer sanity check (in case someone changes the global `buffer_capacity`). assert ( n_episodes + n_more_episodes ) * n_frames_per_episode > buffer_capacity, "Something went wrong with the test code." more_new_data = make_spoof_data_frames(n_more_episodes, n_frames_per_episode) buffer.add_data(more_new_data) assert len(buffer) == buffer_capacity, "The buffer should be full." expected_data = {} for k in new_data: # Concatenate, left-truncate, then roll, to imitate the cyclical FIFO pattern in OnlineBuffer. expected_data[k] = np.roll( np.concatenate([new_data[k], more_new_data[k]])[-buffer_capacity:], shift=len(new_data[k]) + len(more_new_data[k]) - buffer_capacity, axis=0, ) for i, item in enumerate(buffer): assert all(isinstance(item[k], torch.Tensor) for k in item) assert np.array_equal(item[data_key].numpy(), expected_data[data_key][i]) def test_delta_timestamps_within_tolerance(): """Check that getting an item with delta_timestamps within tolerance succeeds. Note: Copied from `test_datasets.py::test_load_previous_and_future_frames_within_tolerance`. """ # Sanity check on global fps as we are assuming it is 10 here. assert fps == 10, "This test assumes fps==10" buffer, _ = make_new_buffer(delta_timestamps={"index": [-0.2, 0, 0.139]}) new_data = make_spoof_data_frames(n_episodes=1, n_frames_per_episode=5) buffer.add_data(new_data) buffer.tolerance_s = 0.04 item = buffer[2] data, is_pad = item["index"], item[f"index{OnlineBuffer.IS_PAD_POSTFIX}"] assert torch.allclose(data, torch.tensor([0, 2, 3])), "Data does not match expected values" assert not is_pad.any(), "Unexpected padding detected" def test_delta_timestamps_outside_tolerance_inside_episode_range(): """Check that getting an item with delta_timestamps outside of tolerance fails. We expect it to fail if and only if the requested timestamps are within the episode range. Note: Copied from `test_datasets.py::test_load_previous_and_future_frames_outside_tolerance_inside_episode_range` """ # Sanity check on global fps as we are assuming it is 10 here. assert fps == 10, "This test assumes fps==10" buffer, _ = make_new_buffer(delta_timestamps={"index": [-0.2, 0, 0.141]}) new_data = make_spoof_data_frames(n_episodes=1, n_frames_per_episode=5) buffer.add_data(new_data) buffer.tolerance_s = 0.04 with pytest.raises(AssertionError): buffer[2] def test_delta_timestamps_outside_tolerance_outside_episode_range(): """Check that copy-padding of timestamps outside of the episode range works. Note: Copied from `test_datasets.py::test_load_previous_and_future_frames_outside_tolerance_outside_episode_range` """ # Sanity check on global fps as we are assuming it is 10 here. assert fps == 10, "This test assumes fps==10" buffer, _ = make_new_buffer(delta_timestamps={"index": [-0.3, -0.24, 0, 0.26, 0.3]}) new_data = make_spoof_data_frames(n_episodes=1, n_frames_per_episode=5) buffer.add_data(new_data) buffer.tolerance_s = 0.04 item = buffer[2] data, is_pad = item["index"], item["index_is_pad"] assert torch.equal(data, torch.tensor([0, 0, 2, 4, 4])), "Data does not match expected values" assert torch.equal( is_pad, torch.tensor([True, False, False, True, True]) ), "Padding does not match expected values" # Arbitrarily set small dataset sizes, making sure to have uneven sizes. @pytest.mark.parametrize("offline_dataset_size", [0, 6]) @pytest.mark.parametrize("online_dataset_size", [0, 4]) @pytest.mark.parametrize("online_sampling_ratio", [0.0, 1.0]) def test_compute_sampler_weights_trivial( offline_dataset_size: int, online_dataset_size: int, online_sampling_ratio: float ): # Pass/skip the test if both datasets sizes are zero. if offline_dataset_size + online_dataset_size == 0: return # Create spoof offline dataset. offline_dataset = LeRobotDataset.from_preloaded( hf_dataset=Dataset.from_dict({"data": list(range(offline_dataset_size))}) ) offline_dataset.hf_dataset.set_transform(hf_transform_to_torch) if offline_dataset_size == 0: offline_dataset.episode_data_index = {} else: # Set up an episode_data_index with at least two episodes. offline_dataset.episode_data_index = { "from": torch.tensor([0, offline_dataset_size // 2]), "to": torch.tensor([offline_dataset_size // 2, offline_dataset_size]), } # Create spoof online datset. online_dataset, _ = make_new_buffer() if online_dataset_size > 0: online_dataset.add_data( make_spoof_data_frames(n_episodes=2, n_frames_per_episode=online_dataset_size // 2) ) weights = compute_sampler_weights( offline_dataset, online_dataset=online_dataset, online_sampling_ratio=online_sampling_ratio ) if offline_dataset_size == 0 or online_dataset_size == 0: expected_weights = torch.ones(offline_dataset_size + online_dataset_size) elif online_sampling_ratio == 0: expected_weights = torch.cat([torch.ones(offline_dataset_size), torch.zeros(online_dataset_size)]) elif online_sampling_ratio == 1: expected_weights = torch.cat([torch.zeros(offline_dataset_size), torch.ones(online_dataset_size)]) expected_weights /= expected_weights.sum() assert torch.allclose(weights, expected_weights) def test_compute_sampler_weights_nontrivial_ratio(): # Arbitrarily set small dataset sizes, making sure to have uneven sizes. # Create spoof offline dataset. offline_dataset = LeRobotDataset.from_preloaded(hf_dataset=Dataset.from_dict({"data": list(range(4))})) offline_dataset.hf_dataset.set_transform(hf_transform_to_torch) offline_dataset.episode_data_index = { "from": torch.tensor([0, 2]), "to": torch.tensor([2, 4]), } # Create spoof online datset. online_dataset, _ = make_new_buffer() online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)) online_sampling_ratio = 0.8 weights = compute_sampler_weights( offline_dataset, online_dataset=online_dataset, online_sampling_ratio=online_sampling_ratio ) assert torch.allclose( weights, torch.tensor([0.05, 0.05, 0.05, 0.05, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) ) def test_compute_sampler_weights_nontrivial_ratio_and_drop_last_n(): # Arbitrarily set small dataset sizes, making sure to have uneven sizes. # Create spoof offline dataset. offline_dataset = LeRobotDataset.from_preloaded(hf_dataset=Dataset.from_dict({"data": list(range(4))})) offline_dataset.hf_dataset.set_transform(hf_transform_to_torch) offline_dataset.episode_data_index = { "from": torch.tensor([0]), "to": torch.tensor([4]), } # Create spoof online datset. online_dataset, _ = make_new_buffer() online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)) weights = compute_sampler_weights( offline_dataset, online_dataset=online_dataset, online_sampling_ratio=0.8, online_drop_n_last_frames=1 ) assert torch.allclose( weights, torch.tensor([0.05, 0.05, 0.05, 0.05, 0.2, 0.0, 0.2, 0.0, 0.2, 0.0, 0.2, 0.0]) ) def test_compute_sampler_weights_drop_n_last_frames(): """Note: test copied from test_sampler.""" data_dict = { "timestamp": [0, 0.1], "index": [0, 1], "episode_index": [0, 0], "frame_index": [0, 1], } offline_dataset = LeRobotDataset.from_preloaded(hf_dataset=Dataset.from_dict(data_dict)) offline_dataset.hf_dataset.set_transform(hf_transform_to_torch) offline_dataset.episode_data_index = {"from": torch.tensor([0]), "to": torch.tensor([2])} online_dataset, _ = make_new_buffer() online_dataset.add_data(make_spoof_data_frames(n_episodes=4, n_frames_per_episode=2)) weights = compute_sampler_weights( offline_dataset, offline_drop_n_last_frames=1, online_dataset=online_dataset, online_sampling_ratio=0.5, online_drop_n_last_frames=1, ) assert torch.allclose(weights, torch.tensor([0.5, 0, 0.125, 0, 0.125, 0, 0.125, 0, 0.125, 0]))
lerobot/tests/test_online_buffer.py/0
{ "file_path": "lerobot/tests/test_online_buffer.py", "repo_id": "lerobot", "token_count": 5333 }
194
import torch from torchaudio.pipelines import SQUIM_OBJECTIVE import torchaudio import evaluate from transformers import ( AutoModel, AutoProcessor, pipeline, WhisperForConditionalGeneration, WhisperTokenizer, WhisperTokenizerFast, ) from accelerate.utils.memory import release_memory import numpy as np def clap_similarity(clap_model_name_or_path, texts, audios, device, input_sampling_rate=44100): clap = AutoModel.from_pretrained(clap_model_name_or_path) clap_processor = AutoProcessor.from_pretrained(clap_model_name_or_path) output_sampling_rate = clap_processor.feature_extractor.sampling_rate if input_sampling_rate != output_sampling_rate: audios = [ torchaudio.functional.resample(torch.from_numpy(audio), input_sampling_rate, output_sampling_rate).numpy() for audio in audios ] clap_inputs = clap_processor( text=texts, audios=audios, padding=True, return_tensors="pt", sampling_rate=output_sampling_rate ).to(device) clap.to(device) with torch.no_grad(): text_features = clap.get_text_features( clap_inputs["input_ids"], attention_mask=clap_inputs.get("attention_mask", None) ) audio_features = clap.get_audio_features(clap_inputs["input_features"]) cosine_sim = torch.nn.functional.cosine_similarity(audio_features, text_features, dim=1, eps=1e-8).mean() cosine_sim = cosine_sim.to("cpu") clap.to("cpu") clap, clap_inputs, audio_features, text_features = release_memory(clap, clap_inputs, audio_features, text_features) return cosine_sim def si_sdr(audios, device, input_sampling_rate=44100): max_audio_length = 15 * SQUIM_OBJECTIVE.sample_rate model = SQUIM_OBJECTIVE.get_model().to((device)) output_sampling_rate = SQUIM_OBJECTIVE.sample_rate if input_sampling_rate != output_sampling_rate: audios = [ torchaudio.functional.resample( torch.tensor(audio)[None, :].to(device).float(), input_sampling_rate, output_sampling_rate ) for audio in audios ] def apply_squim(waveform): with torch.no_grad(): waveform = waveform[:, : min(max_audio_length, waveform.shape[1])] _, _, sdr_sample = model(waveform) sdr_sample = sdr_sample.cpu()[0] return sdr_sample si_sdrs = [apply_squim(audio) for audio in audios] audios, model = release_memory(audios, model) return si_sdrs def wer( asr_model_name_or_path, prompts, audios, device, per_device_eval_batch_size, sampling_rate, noise_level_to_compute_clean_wer, si_sdr_measures, ): metric = evaluate.load("wer") asr_pipeline = pipeline(model=asr_model_name_or_path, device=device, chunk_length_s=25.0) return_language = None if isinstance(asr_pipeline.model, WhisperForConditionalGeneration): return_language = True transcriptions = asr_pipeline( [{"raw": audio, "sampling_rate": sampling_rate} for audio in audios], batch_size=int(per_device_eval_batch_size), return_language=return_language, ) if isinstance(asr_pipeline.tokenizer, (WhisperTokenizer, WhisperTokenizerFast)): tokenizer = asr_pipeline.tokenizer else: tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-large-v3") english_normalizer = tokenizer.normalize basic_normalizer = tokenizer.basic_normalize normalized_predictions = [] normalized_references = [] for pred, ref in zip(transcriptions, prompts): normalizer = ( english_normalizer if isinstance(pred.get("chunks", None), list) and pred["chunks"][0].get("language", None) == "english" else basic_normalizer ) norm_ref = normalizer(ref) if len(norm_ref) > 0: norm_pred = normalizer(pred["text"]) normalized_predictions.append(norm_pred) normalized_references.append(norm_ref) word_error = 100 clean_word_error = None noisy_word_error = None percent_clean_samples = 0 if len(normalized_references) > 0: word_error = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references) if noise_level_to_compute_clean_wer and si_sdr_measures: si_sdr_measures = np.array(si_sdr_measures) mask = si_sdr_measures >= noise_level_to_compute_clean_wer if mask.any(): clean_word_error = 100 * metric.compute( predictions=np.array(normalized_predictions)[mask], references=np.array(normalized_references)[mask] ) if not mask.all(): noisy_word_error = 100 * metric.compute( predictions=np.array(normalized_predictions)[~mask], references=np.array(normalized_references)[~mask] ) else: noisy_word_error = 0 percent_clean_samples = mask.sum() / len(mask) asr_pipeline.model.to("cpu") asr_pipeline = release_memory(asr_pipeline) return word_error, [t["text"] for t in transcriptions], clean_word_error, noisy_word_error, percent_clean_samples
parler-tts/training/eval.py/0
{ "file_path": "parler-tts/training/eval.py", "repo_id": "parler-tts", "token_count": 2276 }
195
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <h1 align="center"> <p>🤗 PEFT</p></h1> <h3 align="center"> <p>State-of-the-art Parameter-Efficient Fine-Tuning (PEFT) methods</p> </h3> Fine-tuning large pretrained models is often prohibitively costly due to their scale. Parameter-Efficient Fine-Tuning (PEFT) methods enable efficient adaptation of large pretrained models to various downstream applications by only fine-tuning a small number of (extra) model parameters instead of all the model's parameters. This significantly decreases the computational and storage costs. Recent state-of-the-art PEFT techniques achieve performance comparable to fully fine-tuned models. PEFT is integrated with Transformers for easy model training and inference, Diffusers for conveniently managing different adapters, and Accelerate for distributed training and inference for really big models. > [!TIP] > Visit the [PEFT](https://huggingface.co/PEFT) organization to read about the PEFT methods implemented in the library and to see notebooks demonstrating how to apply these methods to a variety of downstream tasks. Click the "Watch repos" button on the organization page to be notified of newly implemented methods and notebooks! Check the PEFT Adapters API Reference section for a list of supported PEFT methods, and read the [Adapters](https://huggingface.co/docs/peft/en/conceptual_guides/adapter), [Soft prompts](https://huggingface.co/docs/peft/en/conceptual_guides/prompting), and [IA3](https://huggingface.co/docs/peft/en/conceptual_guides/ia3) conceptual guides to learn more about how these methods work. ## Quickstart Install PEFT from pip: ```bash pip install peft ``` Prepare a model for training with a PEFT method such as LoRA by wrapping the base model and PEFT configuration with `get_peft_model`. For the bigscience/mt0-large model, you're only training 0.19% of the parameters! ```python from transformers import AutoModelForSeq2SeqLM from peft import get_peft_config, get_peft_model, LoraConfig, TaskType model_name_or_path = "bigscience/mt0-large" tokenizer_name_or_path = "bigscience/mt0-large" peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282" ``` To load a PEFT model for inference: ```py from peft import AutoPeftModelForCausalLM from transformers import AutoTokenizer import torch model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora").to("cuda") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model.eval() inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt") outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) "Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla." ``` ## Why you should use PEFT There are many benefits of using PEFT but the main one is the huge savings in compute and storage, making PEFT applicable to many different use cases. ### High performance on consumer hardware Consider the memory requirements for training the following models on the [ought/raft/twitter_complaints](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints) dataset with an A100 80GB GPU with more than 64GB of CPU RAM. | Model | Full Finetuning | PEFT-LoRA PyTorch | PEFT-LoRA DeepSpeed with CPU Offloading | | --------- | ---- | ---- | ---- | | bigscience/T0_3B (3B params) | 47.14GB GPU / 2.96GB CPU | 14.4GB GPU / 2.96GB CPU | 9.8GB GPU / 17.8GB CPU | | bigscience/mt0-xxl (12B params) | OOM GPU | 56GB GPU / 3GB CPU | 22GB GPU / 52GB CPU | | bigscience/bloomz-7b1 (7B params) | OOM GPU | 32GB GPU / 3.8GB CPU | 18.1GB GPU / 35GB CPU | With LoRA you can fully finetune a 12B parameter model that would've otherwise run out of memory on the 80GB GPU, and comfortably fit and train a 3B parameter model. When you look at the 3B parameter model's performance, it is comparable to a fully finetuned model at a fraction of the GPU memory. | Submission Name | Accuracy | | --------- | ---- | | Human baseline (crowdsourced) | 0.897 | | Flan-T5 | 0.892 | | lora-t0-3b | 0.863 | > [!TIP] > The bigscience/T0_3B model performance isn't optimized in the table above. You can squeeze even more performance out of it by playing around with the input instruction templates, LoRA hyperparameters, and other training related hyperparameters. The final checkpoint size of this model is just 19MB compared to 11GB of the full bigscience/T0_3B model. Learn more about the advantages of finetuning with PEFT in this [blog post](https://www.philschmid.de/fine-tune-flan-t5-peft). ### Quantization Quantization is another method for reducing the memory requirements of a model by representing the data in a lower precision. It can be combined with PEFT methods to make it even easier to train and load LLMs for inference. * Learn how to finetune [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) with QLoRA and the [TRL](https://huggingface.co/docs/trl/index) library on a 16GB GPU in the [Finetune LLMs on your own consumer hardware using tools from PyTorch and Hugging Face ecosystem](https://pytorch.org/blog/finetune-llms/) blog post. * Learn how to finetune a [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) model for multilingual automatic speech recognition with LoRA and 8-bit quantization in this [notebook](https://colab.research.google.com/drive/1DOkD_5OUjFa0r5Ik3SgywJLJtEo2qLxO?usp=sharing) (see this [notebook](https://colab.research.google.com/drive/1vhF8yueFqha3Y3CpTHN6q9EVcII9EYzs?usp=sharing) instead for an example of streaming a dataset). ### Save compute and storage PEFT can help you save storage by avoiding full finetuning of models on each of downstream task or dataset. In many cases, you're only finetuning a very small fraction of a model's parameters and each checkpoint is only a few MBs in size (instead of GBs). These smaller PEFT adapters demonstrate performance comparable to a fully finetuned model. If you have many datasets, you can save a lot of storage with a PEFT model and not have to worry about catastrophic forgetting or overfitting the backbone or base model. ## PEFT integrations PEFT is widely supported across the Hugging Face ecosystem because of the massive efficiency it brings to training and inference. ### Diffusers The iterative diffusion process consumes a lot of memory which can make it difficult to train. PEFT can help reduce the memory requirements and reduce the storage size of the final model checkpoint. For example, consider the memory required for training a Stable Diffusion model with LoRA on an A100 80GB GPU with more than 64GB of CPU RAM. The final model checkpoint size is only 8.8MB! | Model | Full Finetuning | PEFT-LoRA | PEFT-LoRA with Gradient Checkpointing | | --------- | ---- | ---- | ---- | | CompVis/stable-diffusion-v1-4 | 27.5GB GPU / 3.97GB CPU | 15.5GB GPU / 3.84GB CPU | 8.12GB GPU / 3.77GB CPU | > [!TIP] > Take a look at the [examples/lora_dreambooth/train_dreambooth.py](examples/lora_dreambooth/train_dreambooth.py) training script to try training your own Stable Diffusion model with LoRA, and play around with the [smangrul/peft-lora-sd-dreambooth](https://huggingface.co/spaces/smangrul/peft-lora-sd-dreambooth) Space which is running on a T4 instance. Learn more about the PEFT integration in Diffusers in this [tutorial](https://huggingface.co/docs/peft/main/en/tutorial/peft_integrations#diffusers). ### Accelerate [Accelerate](https://huggingface.co/docs/accelerate/index) is a library for distributed training and inference on various training setups and hardware (GPUs, TPUs, Apple Silicon, etc.). PEFT models work with Accelerate out of the box, making it really convenient to train really large models or use them for inference on consumer hardware with limited resources. ### TRL PEFT can also be applied to training LLMs with RLHF components such as the ranker and policy. Get started by reading: * [Fine-tune a Mistral-7b model with Direct Preference Optimization](https://towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library to learn more about the Direct Preference Optimization (DPO) method and how to apply it to a LLM. * [Fine-tuning 20B LLMs with RLHF on a 24GB consumer GPU](https://huggingface.co/blog/trl-peft) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library, and then try out the [gpt2-sentiment_peft.ipynb](https://github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb) notebook to optimize GPT2 to generate positive movie reviews. * [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama) with PEFT, and then try out the [stack_llama/scripts](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts) for supervised finetuning, reward modeling, and RL finetuning. ## Model support Use this [Space](https://stevhliu-peft-methods.hf.space) or check out the [docs](https://huggingface.co/docs/peft/main/en/index) to find which models officially support a PEFT method out of the box. Even if you don't see a model listed below, you can manually configure the model config to enable PEFT for a model. Read the [New transformers architecture](https://huggingface.co/docs/peft/main/en/developer_guides/custom_models#new-transformers-architectures) guide to learn how. ## Contribute If you would like to contribute to PEFT, please check out our [contribution guide](https://huggingface.co/docs/peft/developer_guides/contributing). ## Citing 🤗 PEFT To use 🤗 PEFT in your publication, please cite it by using the following BibTeX entry. ```bibtex @Misc{peft, title = {PEFT: State-of-the-art Parameter-Efficient Fine-Tuning methods}, author = {Sourab Mangrulkar and Sylvain Gugger and Lysandre Debut and Younes Belkada and Sayak Paul and Benjamin Bossan}, howpublished = {\url{https://github.com/huggingface/peft}}, year = {2022} } ```
peft/README.md/0
{ "file_path": "peft/README.md", "repo_id": "peft", "token_count": 3408 }
196
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Soft prompts Training large pretrained language models is very time-consuming and compute-intensive. As they continue to grow in size, there is increasing interest in more efficient training methods such as *prompting*. Prompting primes a frozen pretrained model for a specific downstream task by including a text prompt that describes the task or even demonstrates an example of the task. With prompting, you can avoid fully training a separate model for each downstream task, and use the same frozen pretrained model instead. This is a lot easier because you can use the same model for several different tasks, and it is significantly more efficient to train and store a smaller set of prompt parameters than to train all the model's parameters. There are two categories of prompting methods: - hard prompts are manually handcrafted text prompts with discrete input tokens; the downside is that it requires a lot of effort to create a good prompt - soft prompts are learnable tensors concatenated with the input embeddings that can be optimized to a dataset; the downside is that they aren't human readable because you aren't matching these "virtual tokens" to the embeddings of a real word This conceptual guide provides a brief overview of the soft prompt methods included in 🤗 PEFT: prompt tuning, prefix tuning, P-tuning, and multitask prompt tuning. ## Prompt tuning <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/prompt-tuning.png"/> </div> <small>Only train and store a significantly smaller set of task-specific prompt parameters <a href="https://hf.co/papers/2104.08691">(image source)</a>.</small> [Prompt tuning](https://hf.co/papers/2104.08691) was developed for text classification tasks on T5 models, and all downstream tasks are cast as a text generation task. For example, sequence classification usually assigns a single class label to a sequence of text. By casting it as a text generation task, the tokens that make up the class label are *generated*. Prompts are added to the input as a series of tokens. Typically, the model parameters are fixed which means the prompt tokens are also fixed by the model parameters. The key idea behind prompt tuning is that prompt tokens have their own parameters that are updated independently. This means you can keep the pretrained model's parameters frozen, and only update the gradients of the prompt token embeddings. The results are comparable to the traditional method of training the entire model, and prompt tuning performance scales as model size increases. Take a look at [Prompt tuning for causal language modeling](../task_guides/clm-prompt-tuning) for a step-by-step guide on how to train a model with prompt tuning. ## Prefix tuning <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/prefix-tuning.png"/> </div> <small>Optimize the prefix parameters for each task <a href="https://hf.co/papers/2101.00190">(image source)</a>.</small> [Prefix tuning](https://hf.co/papers/2101.00190) was designed for natural language generation (NLG) tasks on GPT models. It is very similar to prompt tuning; prefix tuning also prepends a sequence of task-specific vectors to the input that can be trained and updated while keeping the rest of the pretrained model's parameters frozen. The main difference is that the prefix parameters are inserted in **all** of the model layers, whereas prompt tuning only adds the prompt parameters to the model input embeddings. The prefix parameters are also optimized by a separate feed-forward network (FFN) instead of training directly on the soft prompts because it causes instability and hurts performance. The FFN is discarded after updating the soft prompts. As a result, the authors found that prefix tuning demonstrates comparable performance to fully finetuning a model, despite having 1000x fewer parameters, and it performs even better in low-data settings. Take a look at [Prefix tuning for conditional generation](../task_guides/seq2seq-prefix-tuning) for a step-by-step guide on how to train a model with prefix tuning. ## P-tuning <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/p-tuning.png"/> </div> <small>Prompt tokens can be inserted anywhere in the input sequence, and they are optimized by a prompt encoder <a href="https://hf.co/papers/2103.10385">(image source)</a>.</small> [P-tuning](https://hf.co/papers/2103.10385) is designed for natural language understanding (NLU) tasks and all language models. It is another variation of a soft prompt method; P-tuning also adds a trainable embedding tensor that can be optimized to find better prompts, and it uses a prompt encoder (a bidirectional long-short term memory network or LSTM) to optimize the prompt parameters. Unlike prefix tuning though: - the prompt tokens can be inserted anywhere in the input sequence, and it isn't restricted to only the beginning - the prompt tokens are only added to the input instead of adding them to every layer of the model - introducing *anchor* tokens can improve performance because they indicate characteristics of a component in the input sequence The results suggest that P-tuning is more efficient than manually crafting prompts, and it enables GPT-like models to compete with BERT-like models on NLU tasks. Take a look at [P-tuning for sequence classification](../task_guides/ptuning-seq-classification) for a step-by-step guide on how to train a model with P-tuning. ## Multitask prompt tuning <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/mpt.png"/> </div> <small><a href="https://hf.co/papers/2303.02861">Multitask prompt tuning enables parameter-efficient transfer learning</a>.</small> [Multitask prompt tuning (MPT)](https://hf.co/papers/2303.02861) learns a single prompt from data for multiple task types that can be shared for different target tasks. Other existing approaches learn a separate soft prompt for each task that need to be retrieved or aggregated for adaptation to target tasks. MPT consists of two stages: 1. source training - for each task, its soft prompt is decomposed into task-specific vectors. The task-specific vectors are multiplied together to form another matrix W, and the Hadamard product is used between W and a shared prompt matrix P to generate a task-specific prompt matrix. The task-specific prompts are distilled into a single prompt matrix that is shared across all tasks. This prompt is trained with multitask training. 2. target adaptation - to adapt the single prompt for a target task, a target prompt is initialized and expressed as the Hadamard product of the shared prompt matrix and the task-specific low-rank prompt matrix. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/mpt-decomposition.png"/> </div> <small><a href="https://hf.co/papers/2103.10385">Prompt decomposition</a>.</small>
peft/docs/source/conceptual_guides/prompting.md/0
{ "file_path": "peft/docs/source/conceptual_guides/prompting.md", "repo_id": "peft", "token_count": 1830 }
197
<jupyter_start><jupyter_code>from datasets import load_dataset from transformers import set_seed, AutoModelForSeq2SeqLM, AutoTokenizer from peft import get_peft_model, MultitaskPromptTuningConfig, TaskType, MultitaskPromptTuningInit set_seed(42) model_name = "google/flan-t5-base" peft_config = MultitaskPromptTuningConfig( tokenizer_name_or_path=model_name, num_tasks=2, task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=MultitaskPromptTuningInit.TEXT, num_virtual_tokens=50, num_transformer_submodules=1, prompt_tuning_init_text="classify the following into either positive or negative, or entailment, neutral or contradiction:", ) tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model = get_peft_model(model, peft_config) model = model.cuda() def send_to_device(batch): for i in batch: batch[i] = batch[i].cuda() return batch def get_sst2(split: str): examples = load_dataset("sst2")[split] result_examples = [] for example in examples: result_examples.append({}) result_examples[-1]["input"] = example["sentence"].strip() + "</s>" result_examples[-1]["output"] = ( f"positive{tokenizer.eos_token}" if example["label"] == 1 else f"negative{tokenizer.eos_token}" ) result_examples[-1]["task_id"] = 0 return result_examples def get_mnli(split: str): examples = load_dataset("multi_nli")[split] result_examples = [] for example in examples: result_examples.append({}) result_examples[-1]["input"] = example["premise"].strip() + " " + example["hypothesis"].strip() + "</s>" if example["label"] == 0: result_examples[-1]["output"] = f"entailment{tokenizer.eos_token}" elif example["label"] == 1: result_examples[-1]["output"] = f"neutral{tokenizer.eos_token}" else: result_examples[-1]["output"] = f"contradiction{tokenizer.eos_token}" result_examples[-1]["task_id"] = 1 return result_examples from typing import Tuple from torch.utils.data import Dataset, DataLoader import torch class MyDataset(Dataset): def __init__(self, split: str, mode: str = "source") -> None: super().__init__() if split == "train": if mode == "source": self.examples = get_sst2(split) + get_mnli(split) elif mode == "target": self.examples = get_sst2(split) if split == "val": self.examples = get_sst2("validation") if split == "test": self.examples = get_sst2("validation") def __getitem__(self, index) -> dict: return self.examples[index] def __len__(self) -> int: return len(self.examples) def __getitem__(self, index) -> dict: return self.examples[index] def __len__(self) -> int: return len(self.examples) def collate_fn(batch: dict) -> Tuple[torch.Tensor, torch.Tensor]: input = [i["input"] for i in batch] input = tokenizer(input, add_special_tokens=False, return_tensors="pt", padding=True) output = [i["output"] for i in batch] output = tokenizer(output, add_special_tokens=False, return_tensors="pt", padding=True).input_ids output[output == tokenizer.pad_token_id] = -100 task_ids = [i["task_id"] for i in batch] task_ids = torch.tensor(task_ids) return { "input_ids": input.input_ids, "attention_mask": input.attention_mask, "labels": output, "task_ids": task_ids, } train = DataLoader(MyDataset("train"), shuffle=True, batch_size=8, collate_fn=collate_fn) val = DataLoader(MyDataset("val"), shuffle=False, batch_size=8, collate_fn=collate_fn) test = DataLoader(MyDataset("test"), shuffle=False, batch_size=8, collate_fn=collate_fn)<jupyter_output><empty_output><jupyter_text>source training<jupyter_code>from torch.optim.adamw import AdamW from transformers import get_cosine_schedule_with_warmup from tqdm import tqdm from sklearn.metrics import f1_score POSITIVE_TOKEN_ID = tokenizer(" positive", add_special_tokens=False)["input_ids"][0] NEGATIVE_TOKEN_ID = tokenizer(" negative", add_special_tokens=False)["input_ids"][0] def classify(batch): batch = send_to_device(batch) # we pass labels here since we need to generate and peft doesn't support generation yet. # No clue how to get around this scores = model(**batch).logits preds = [] for i in range(scores.shape[0]): if scores[i, 0, POSITIVE_TOKEN_ID] > scores[i, 0, NEGATIVE_TOKEN_ID]: preds.append(POSITIVE_TOKEN_ID) else: preds.append(NEGATIVE_TOKEN_ID) return preds @torch.inference_mode() def evaluate(model, data): loss = 0 preds = [] golds = [] for batch in tqdm(data): batch = send_to_device(batch) loss += model(**batch).loss golds.extend(batch["labels"][:, 0].tolist()) preds.extend(classify(batch)) return loss / len(val), f1_score(golds, preds, pos_label=POSITIVE_TOKEN_ID) optimizer = AdamW(model.parameters(), lr=1e-4) scheduler = get_cosine_schedule_with_warmup(optimizer, 200, len(train)) n = 1000 step = 0 train_ = tqdm(train) val_loss, f1 = evaluate(model, val) print( f""" before source training val loss = {val_loss} f1 = {f1}""" ) for batch in train_: if step % n == 0: val_loss, f1 = evaluate(model, val) print( f""" step = {step} val loss = {val_loss} f1 = {f1}""" ) model.save_pretrained(f"checkpoints_source/{step}") step += 1 batch = send_to_device(batch) loss = model(**batch).loss loss.backward() optimizer.step() scheduler.step() train_.set_postfix(train_loss=loss)<jupyter_output><empty_output><jupyter_text>target training<jupyter_code>train = DataLoader(MyDataset("train", "target"), shuffle=True, batch_size=8, collate_fn=collate_fn) val = DataLoader(MyDataset("val", "target"), shuffle=False, batch_size=8, collate_fn=collate_fn) test = DataLoader(MyDataset("test", "target"), shuffle=False, batch_size=8, collate_fn=collate_fn)<jupyter_output><empty_output><jupyter_text>create a fresh model<jupyter_code>peft_config = MultitaskPromptTuningConfig( tokenizer_name_or_path=model_name, num_tasks=1, task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=MultitaskPromptTuningInit.EXACT_SOURCE_TASK, prompt_tuning_init_state_dict_path="checkpoints_source/50000/adapter_model.bin", num_virtual_tokens=50, num_transformer_submodules=1, ) tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model = get_peft_model(model, peft_config) model = model.cuda() optimizer = AdamW(model.parameters(), lr=1e-4) scheduler = get_cosine_schedule_with_warmup(optimizer, 200, len(train)) n = 1000 step = 0 train_ = tqdm(train) val_loss, f1 = evaluate(model, val) print( f""" before target training val loss = {val_loss} f1 = {f1}""" ) for batch in train_: if step % n == 0: val_loss, f1 = evaluate(model, val) print( f""" step = {step} val loss = {val_loss} f1 = {f1}""" ) model.save_pretrained(f"checkpoints_target/{step}") step += 1 batch = send_to_device(batch) loss = model(**batch).loss loss.backward() optimizer.step() scheduler.step() train_.set_postfix(train_loss=loss) # load last checkpoint for now from peft import set_peft_model_state_dict sd_6000 = torch.load("checkpoints_target/6000/adapter_model.bin") set_peft_model_state_dict(model, sd_6000) # evaluate val val_loss, f1 = evaluate(model, val) print( f""" final val loss = {val_loss} f1 = {f1}""" ) # evaluate test test_loss, f1 = evaluate(model, test) print( f""" final test loss = {test_loss} f1 = {f1}""" )<jupyter_output><empty_output>
peft/examples/conditional_generation/multitask_prompt_tuning.ipynb/0
{ "file_path": "peft/examples/conditional_generation/multitask_prompt_tuning.ipynb", "repo_id": "peft", "token_count": 3341 }
198
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import math import os import random from pathlib import Path import datasets import evaluate import torch import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import DatasetDict, load_dataset from huggingface_hub import HfApi from torch import nn from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModel, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler from peft import LoraConfig, TaskType, get_peft_model logger = get_logger(__name__) def parse_args(): parser = argparse.ArgumentParser(description="Training a PEFT model for Semantic Search task") parser.add_argument("--dataset_name", type=str, default=None, help="dataset name on HF hub") parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--sanity_test", action="store_true", help="Whether to enable sanity test.", ) parser.add_argument( "--use_peft", action="store_true", help="Whether to use PEFT.", ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def save_model_hook(models, weights, output_dir): for i, model in enumerate(models): model.save_pretrained(output_dir, state_dict=weights[i]) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): while len(models) > 0: model = models.pop() # pop models so that they are not loaded again if hasattr(model, "active_adapter") and hasattr(model, "load_adapter"): model.load_adapter(input_dir, model.active_adapter, is_trainable=True) class AutoModelForSentenceEmbedding(nn.Module): def __init__(self, model_name, tokenizer, normalize=True): super().__init__() self.model = AutoModel.from_pretrained( model_name ) # , quantizaton_config=BitsAndBytesConfig(load_in_8bit=True), device_map={"":0}) self.normalize = normalize self.tokenizer = tokenizer def forward(self, **kwargs): model_output = self.model(**kwargs) embeddings = self.mean_pooling(model_output, kwargs["attention_mask"]) if self.normalize: embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) return embeddings def mean_pooling(self, model_output, attention_mask): token_embeddings = model_output[0] # First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) def get_cosing_embeddings(query_embs, product_embs): return torch.sum(query_embs * product_embs, axis=1) def get_loss(cosine_score, labels): return torch.mean(torch.square(labels * (1 - cosine_score) + torch.clamp((1 - labels) * cosine_score, min=0.0))) def main(): args = parse_args() accelerator_kwargs = {"gradient_accumulation_steps": args.gradient_accumulation_steps} if args.with_tracking: accelerator_kwargs["log_with"] = args.report_to accelerator_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(**accelerator_kwargs) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: api = HfApi(token=args.hub_token) # Create repo (repo_name from args or inferred) repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name repo_id = api.create_repo(repo_name, exist_ok=True).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # get the tokenizer tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # dataset download and preprocessing if args.sanity_test: train_dataset = load_dataset("smangrul/amazon_esci", split="train[:1024]") val_dataset = load_dataset("smangrul/amazon_esci", split="validation[:1024]") dataset = DatasetDict({"train": train_dataset, "validation": val_dataset}) else: dataset = load_dataset(args.dataset_name) def preprocess_function(examples): queries = examples["query"] result = tokenizer(queries, padding="max_length", max_length=70, truncation=True) result = {f"query_{k}": v for k, v in result.items()} products = examples["product_title"] result_products = tokenizer(products, padding="max_length", max_length=70, truncation=True) for k, v in result_products.items(): result[f"product_{k}"] = v result["labels"] = examples["relevance_label"] return result processed_datasets = dataset.map( preprocess_function, batched=True, remove_columns=dataset["train"].column_names, desc="Running tokenizer on dataset", ) # Log a few random samples from the training set: for index in random.sample(range(len(processed_datasets["train"])), 3): logger.info(f"Sample {index} of the training set: {processed_datasets['train'][index]}.") # base model model = AutoModelForSentenceEmbedding(args.model_name_or_path, tokenizer) if args.use_peft: # peft config and wrapping peft_config = LoraConfig( r=8, lora_alpha=16, bias="none", task_type=TaskType.FEATURE_EXTRACTION, target_modules=["key", "query", "value"], ) model = get_peft_model(model, peft_config) model.print_trainable_parameters() accelerator.print(model) # get dataloaders train_dataloader = DataLoader( processed_datasets["train"], shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size, pin_memory=True, ) eval_dataloader = DataLoader( processed_datasets["validation"], shuffle=False, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size, pin_memory=True, ) optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("peft_semantic_search", experiment_config) metric = evaluate.load("roc_auc") total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps if args.use_peft: # saving and loading checkpoints for resuming training accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) logger.info("***** Running training *****") logger.info(f" Num examples = {len(processed_datasets['train'])}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): with accelerator.accumulate(model): query_embs = model(**{k.replace("query_", ""): v for k, v in batch.items() if "query" in k}) product_embs = model(**{k.replace("product_", ""): v for k, v in batch.items() if "product" in k}) loss = get_loss(get_cosing_embeddings(query_embs, product_embs), batch["labels"]) total_loss += accelerator.reduce(loss.detach().float(), reduction="sum") accelerator.backward(loss) optimizer.step() lr_scheduler.step() model.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if (step + 1) % 100 == 0: logger.info(f"Step: {step+1}, Loss: {total_loss/(step+1)}") if args.with_tracking: accelerator.log({"train/loss": total_loss / (step + 1)}, step=completed_steps) if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f"step_{completed_steps }" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break model.eval() for step, batch in enumerate(eval_dataloader): with torch.no_grad(): query_embs = model(**{k.replace("query_", ""): v for k, v in batch.items() if "query" in k}) product_embs = model(**{k.replace("product_", ""): v for k, v in batch.items() if "product" in k}) prediction_scores = get_cosing_embeddings(query_embs, product_embs) prediction_scores, references = accelerator.gather_for_metrics((prediction_scores, batch["labels"])) metric.add_batch( prediction_scores=prediction_scores, references=references, ) result = metric.compute() result = {f"eval/{k}": v for k, v in result.items()} # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", result) if args.with_tracking: result["train/epoch_loss"] = total_loss.item() / len(train_dataloader) accelerator.log(result, step=completed_steps) if args.output_dir is not None: accelerator.wait_for_everyone() if accelerator.is_main_process: if isinstance(checkpointing_steps, str): accelerator.save_state(os.path.join(args.output_dir, f"epoch_{epoch}")) accelerator.unwrap_model(model).save_pretrained( args.output_dir, state_dict=accelerator.get_state_dict(accelerator.unwrap_model(model)) ) tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: commit_message = ( f"Training in progress epoch {epoch}" if epoch < args.num_train_epochs - 1 else "End of training" ) api.upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message=commit_message, run_as_future=True, ) accelerator.wait_for_everyone() accelerator.end_training() if __name__ == "__main__": main()
peft/examples/feature_extraction/peft_lora_embedding_semantic_search.py/0
{ "file_path": "peft/examples/feature_extraction/peft_lora_embedding_semantic_search.py", "repo_id": "peft", "token_count": 8771 }
199
<jupyter_start><jupyter_text>Using PEFT with timm `peft` allows us to train any model with LoRA as long as the layer type is supported. Since `Conv2D` is one of the supported layer types, it makes sense to test it on image models.In this short notebook, we will demonstrate this with an image classification task using [`timm`](https://huggingface.co/docs/timm/index). Imports Make sure that you have the latest version of `peft` installed. To ensure that, run this in your Python environment: python -m pip install --upgrade peft Also, ensure that `timm` is installed: python -m pip install --upgrade timm<jupyter_code>import timm import torch from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform import peft from datasets import load_dataset torch.manual_seed(0)<jupyter_output><empty_output><jupyter_text>Loading the pre-trained base model We use a small pretrained `timm` model, `PoolFormer`. Find more info on its [model card](https://huggingface.co/timm/poolformer_m36.sail_in1k).<jupyter_code>model_id_timm = "timm/poolformer_m36.sail_in1k"<jupyter_output><empty_output><jupyter_text>We tell `timm` that we deal with 3 classes, to ensure that the classification layer has the correct size.<jupyter_code>model = timm.create_model(model_id_timm, pretrained=True, num_classes=3)<jupyter_output><empty_output><jupyter_text>These are the transformations steps necessary to process the image.<jupyter_code>transform = create_transform(**resolve_data_config(model.pretrained_cfg, model=model))<jupyter_output><empty_output><jupyter_text>Data For this exercise, we use the "beans" dataset. More details on the dataset can be found on [its datasets page](https://huggingface.co/datasets/beans). For our purposes, what's important is that we have image inputs and the target we're trying to predict is one of three classes for each image.<jupyter_code>ds = load_dataset("beans") ds_train = ds["train"] ds_valid = ds["validation"] ds_train[0]["image"]<jupyter_output><empty_output><jupyter_text>We define a small processing function which is responsible for loading and transforming the images, as well as extracting the labels.<jupyter_code>def process(batch): x = torch.cat([transform(img).unsqueeze(0) for img in batch["image"]]) y = torch.tensor(batch["labels"]) return {"x": x, "y": y} ds_train.set_transform(process) ds_valid.set_transform(process) train_loader = torch.utils.data.DataLoader(ds_train, batch_size=32) valid_loader = torch.utils.data.DataLoader(ds_valid, batch_size=32)<jupyter_output><empty_output><jupyter_text>Training This is just a function that performs the train loop, nothing fancy happening.<jupyter_code>def train(model, optimizer, criterion, train_dataloader, valid_dataloader, epochs): for epoch in range(epochs): model.train() train_loss = 0 for batch in train_dataloader: xb, yb = batch["x"], batch["y"] xb, yb = xb.to(device), yb.to(device) outputs = model(xb) lsm = torch.nn.functional.log_softmax(outputs, dim=-1) loss = criterion(lsm, yb) train_loss += loss.detach().float() loss.backward() optimizer.step() optimizer.zero_grad() model.eval() valid_loss = 0 correct = 0 n_total = 0 for batch in valid_dataloader: xb, yb = batch["x"], batch["y"] xb, yb = xb.to(device), yb.to(device) with torch.no_grad(): outputs = model(xb) lsm = torch.nn.functional.log_softmax(outputs, dim=-1) loss = criterion(lsm, yb) valid_loss += loss.detach().float() correct += (outputs.argmax(-1) == yb).sum().item() n_total += len(yb) train_loss_total = (train_loss / len(train_dataloader)).item() valid_loss_total = (valid_loss / len(valid_dataloader)).item() valid_acc_total = correct / n_total print(f"{epoch=:<2} {train_loss_total=:.4f} {valid_loss_total=:.4f} {valid_acc_total=:.4f}")<jupyter_output><empty_output><jupyter_text>Selecting which layers to fine-tune with LoRA Let's take a look at the layers of our model. We only print the first 30, since there are quite a few:<jupyter_code>[(n, type(m)) for n, m in model.named_modules()][:30]<jupyter_output><empty_output><jupyter_text>Most of these layers are not good targets for LoRA, but we see a couple that should interest us. Their names are `'stages.0.blocks.0.mlp.fc1'`, etc. With a bit of regex, we can match them easily.Also, we should inspect the name of the classification layer, since we want to train that one too!<jupyter_code>[(n, type(m)) for n, m in model.named_modules()][-5:]<jupyter_output><empty_output><jupyter_text>config = peft.LoraConfig( r=8, target_modules=r".*\.mlp\.fc\d|head\.fc", ) Okay, this gives us all the information we need to fine-tune this model. With a bit of regex, we match the convolutional layers that should be targeted for LoRA. We also want to train the classification layer `'head.fc'` (without LoRA), so we add it to the `modules_to_save`.<jupyter_code>config = peft.LoraConfig(r=8, target_modules=r".*\.mlp\.fc\d", modules_to_save=["head.fc"])<jupyter_output><empty_output><jupyter_text>Finally, let's create the `peft` model, the optimizer and criterion, and we can get started. As shown below, less than 2% of the model's total parameters are updated thanks to `peft`.<jupyter_code>device = "cuda" if torch.cuda.is_available() else "cpu" peft_model = peft.get_peft_model(model, config).to(device) optimizer = torch.optim.Adam(peft_model.parameters(), lr=2e-4) criterion = torch.nn.CrossEntropyLoss() peft_model.print_trainable_parameters() %time train(peft_model, optimizer, criterion, train_loader, valid_dataloader=valid_loader, epochs=10)<jupyter_output>epoch=0 train_loss_total=1.2999 valid_loss_total=1.0624 valid_acc_total=0.4436 epoch=1 train_loss_total=1.0200 valid_loss_total=0.8906 valid_acc_total=0.7594 epoch=2 train_loss_total=0.8874 valid_loss_total=0.6894 valid_acc_total=0.8045 epoch=3 train_loss_total=0.7440 valid_loss_total=0.4797 valid_acc_total=0.8045 epoch=4 train_loss_total=0.6025 valid_loss_total=0.3419 valid_acc_total=0.8120 epoch=5 train_loss_total=0.4820 valid_loss_total=0.2589 valid_acc_total=0.8421 epoch=6 train_loss_total=0.3567 valid_loss_total=0.2101 valid_acc_total=0.8722 epoch=7 train_loss_total=0.2835 valid_loss_total=0.1385 valid_acc_total=0.9098 epoch=8 train_loss_total=0.1815 valid_loss_total=0.1108 valid_acc_total=0.9474 epoch=9 train_loss_total=0.1341 valid_loss_total=0.0785 valid_acc_total=0.9699 CPU times: user 4min 3s, sys: 36.3 s, total: 4min 40s Wall time: 3min 32s<jupyter_text>We get an accuracy of ~0.97, despite only training a tiny amount of parameters. That's a really nice result. Sharing the model through Hugging Face Hub Pushing the model to Hugging Face Hub If we want to share the fine-tuned weights with the world, we can upload them to Hugging Face Hub like this:<jupyter_code>user = "BenjaminB" # put your user name here model_name = "peft-lora-with-timm-model" model_id = f"{user}/{model_name}" peft_model.push_to_hub(model_id);<jupyter_output><empty_output><jupyter_text>As we can see, the adapter size is only 4.3 MB. The original model was 225 MB. That's a very big saving. Loading the model from HF Hub Now, it only takes one step to load the model from HF Hub. To do this, we can use `PeftModel.from_pretrained`, passing our base model and the model ID:<jupyter_code>base_model = timm.create_model(model_id_timm, pretrained=True, num_classes=3) loaded = peft.PeftModel.from_pretrained(base_model, model_id) x = ds_train[:1]["x"] y_peft = peft_model(x.to(device)) y_loaded = loaded(x) torch.allclose(y_peft.cpu(), y_loaded)<jupyter_output><empty_output><jupyter_text>Clean up Finally, as a clean up step, you may want to delete the repo.<jupyter_code>from huggingface_hub import delete_repo delete_repo(model_id)<jupyter_output><empty_output>
peft/examples/image_classification/image_classification_timm_peft_lora.ipynb/0
{ "file_path": "peft/examples/image_classification/image_classification_timm_peft_lora.ipynb", "repo_id": "peft", "token_count": 3067 }
200
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfApi from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoraConfig, get_peft_model # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) UNET_TARGET_MODULES = ["to_q", "to_v", "query", "value"] # , "ff.net.0.proj"] TEXT_ENCODER_TARGET_MODULES = ["q_proj", "v_proj"] def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") # lora args parser.add_argument("--use_lora", action="store_true", help="Whether to use Lora for parameter efficient tuning") parser.add_argument("--lora_r", type=int, default=8, help="Lora rank, only used if use_lora is True") parser.add_argument("--lora_alpha", type=int, default=32, help="Lora alpha, only used if use_lora is True") parser.add_argument("--lora_dropout", type=float, default=0.0, help="Lora dropout, only used if use_lora is True") parser.add_argument( "--lora_bias", type=str, default="none", help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora is True", ) parser.add_argument( "--lora_text_encoder_r", type=int, default=8, help="Lora rank for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_alpha", type=int, default=32, help="Lora alpha for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_dropout", type=float, default=0.0, help="Lora dropout for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_bias", type=str, default="none", help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora and `train_text_encoder` are True", ) parser.add_argument( "--num_dataloader_workers", type=int, default=1, help="Num of workers for the training dataloader." ) parser.add_argument( "--no_tracemalloc", default=False, action="store_true", help="Flag to stop memory allocation tracing during training. This could speed up training on Windows.", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--wandb_key", type=str, default=None, help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), ) parser.add_argument( "--wandb_project_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt_ids"] = self.tokenizer( self.class_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids return example def collate_fn(examples, with_prior_preservation=False): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_dir=logging_dir, ) if args.report_to == "wandb": import wandb wandb.login(key=args.wandb_key) wandb.init(project=args.wandb_project_name) # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: api = HfApi(token=args.hub_token) # Create repo (repo_name from args or inferred) repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name repo_id = api.create_repo(repo_name, exist_ok=True).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, ) # DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) if args.use_lora: config = LoraConfig( r=args.lora_r, lora_alpha=args.lora_alpha, target_modules=UNET_TARGET_MODULES, lora_dropout=args.lora_dropout, bias=args.lora_bias, ) unet = get_peft_model(unet, config) unet.print_trainable_parameters() print(unet) vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) elif args.train_text_encoder and args.use_lora: config = LoraConfig( r=args.lora_text_encoder_r, lora_alpha=args.lora_text_encoder_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, lora_dropout=args.lora_text_encoder_dropout, bias=args.lora_text_encoder_bias, ) text_encoder = get_peft_model(text_encoder, config) text_encoder.print_trainable_parameters() print(text_encoder) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # below fails when using lora so commenting it out if args.train_text_encoder and not args.use_lora: text_encoder.gradient_checkpointing_enable() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.num_dataloader_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and text_encoder to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("dreambooth", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = resume_global_step // num_update_steps_per_epoch resume_step = resume_global_step % num_update_steps_per_epoch # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.train_text_encoder: text_encoder.train() with TorchTracemalloc() if not args.no_tracemalloc else nullcontext() as tracemalloc: for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * 0.18215 # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) global_step += 1 # if global_step % args.checkpointing_steps == 0: # if accelerator.is_main_process: # save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") # accelerator.save_state(save_path) # logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if ( args.validation_prompt is not None and (step + num_update_steps_per_epoch * epoch) % args.validation_steps == 0 ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, safety_checker=None, revision=args.revision, ) # set `keep_fp32_wrapper` to True because we do not want to remove # mixed precision hooks while we are still training pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True) pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference if args.seed is not None: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) else: generator = None images = [] for _ in range(args.num_validation_images): image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": import wandb tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() if global_step >= args.max_train_steps: break # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage if not args.no_tracemalloc: accelerator.print(f"GPU Memory before entering the train : {b2mb(tracemalloc.begin)}") accelerator.print(f"GPU Memory consumed at the end of the train (end-begin): {tracemalloc.used}") accelerator.print(f"GPU Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}") accelerator.print( f"GPU Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) accelerator.print(f"CPU Memory before entering the train : {b2mb(tracemalloc.cpu_begin)}") accelerator.print(f"CPU Memory consumed at the end of the train (end-begin): {tracemalloc.cpu_used}") accelerator.print(f"CPU Peak Memory consumed during the train (max-begin): {tracemalloc.cpu_peaked}") accelerator.print( f"CPU Total Peak Memory consumed during the train (max): {tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)}" ) # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: if args.use_lora: unwarpped_unet = accelerator.unwrap_model(unet) unwarpped_unet.save_pretrained( os.path.join(args.output_dir, "unet"), state_dict=accelerator.get_state_dict(unet) ) if args.train_text_encoder: unwarpped_text_encoder = accelerator.unwrap_model(text_encoder) unwarpped_text_encoder.save_pretrained( os.path.join(args.output_dir, "text_encoder"), state_dict=accelerator.get_state_dict(text_encoder), ) else: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), revision=args.revision, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", run_as_future=True, ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
peft/examples/lora_dreambooth/train_dreambooth.py/0
{ "file_path": "peft/examples/lora_dreambooth/train_dreambooth.py", "repo_id": "peft", "token_count": 20030 }
201
<jupyter_start><jupyter_text>Using FourierFT for sequence classification In this example, we fine-tune Roberta (base) on a sequence classification task using FourierFT. Imports<jupyter_code># To run this notebook, please run `pip install evaluate` to install additional dependencies not covered by PEFT. import torch from torch.optim import AdamW from torch.utils.data import DataLoader from peft import ( get_peft_model, FourierFTConfig, PeftType, ) import evaluate from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed, AutoConfig from tqdm import tqdm<jupyter_output>/home/zgaoat/anaconda3/envs/pr2/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm<jupyter_text>Parameters<jupyter_code>batch_size = 32 model_name_or_path = "roberta-base" task = "mrpc" peft_type = PeftType.FOURIERFT device = "cuda" if torch.cuda.is_available() else "cpu" num_epochs = 5 # for better results, increase this number n_frequency = 1000 # for better results, increase this number scaling = 150.0 max_length = 512 torch.manual_seed(0) peft_config = FourierFTConfig( task_type="SEQ_CLS", n_frequency=n_frequency, target_modules=["query", "value"], scaling = scaling, ) head_lr = 6e-3 # the learning rate for the classification head for NLU tasks fft_lr = 6e-2 # the learning rate for the parameters other than the classification head (q,v in this case)<jupyter_output><empty_output><jupyter_text>Loading data<jupyter_code>if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=max_length) return outputs tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size )<jupyter_output><empty_output><jupyter_text>Preparing the FourierFT model<jupyter_code>model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True, max_length=None) model = get_peft_model(model, peft_config) model.print_trainable_parameters() head_param = list(map(id, model.classifier.parameters())) others_param = filter(lambda p: id(p) not in head_param, model.parameters()) optimizer = AdamW([ {"params": model.classifier.parameters(), "lr": head_lr}, {"params": others_param, "lr": fft_lr} ],weight_decay=0.) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs), num_training_steps=(len(train_dataloader) * num_epochs), )<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>model.to(device) for epoch in range(num_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(f"epoch {epoch}:", eval_metric)<jupyter_output>0%| | 0/115 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. 100%|██████████| 115/115 [00:06<00:00, 19.03it/s] 100%|██████████| 13/13 [00:00<00:00, 41.72it/s]<jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>account_id = ... # your Hugging Face Hub account ID model.push_to_hub(f"{account_id}/roberta-base-mrpc-peft-fourierft")<jupyter_output>/home/zgaoat/anaconda3/envs/pr2/lib/python3.11/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. warnings.warn(<jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch from peft import PeftModel, PeftConfig from transformers import AutoTokenizer peft_model_id = f"{account_id}/roberta-base-mrpc-peft-fourierft" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the FourierFT model inference_model = PeftModel.from_pretrained(inference_model, peft_model_id, config=config) inference_model.to(device) inference_model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = inference_model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(eval_metric)<jupyter_output>0%| | 0/13 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. 100%|██████████| 13/13 [00:00<00:00, 43.06it/s]
peft/examples/sequence_classification/FourierFT.ipynb/0
{ "file_path": "peft/examples/sequence_classification/FourierFT.ipynb", "repo_id": "peft", "token_count": 2571 }
202
python train.py \ --seed 100 \ --model_name_or_path "mistralai/Mistral-7B-v0.1" \ --dataset_name "smangrul/ultrachat-10k-chatml" \ --chat_template_format "chatml" \ --add_special_tokens False \ --append_concat_token False \ --splits "train,test" \ --max_seq_len 2048 \ --num_train_epochs 1 \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ --evaluation_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ --hub_strategy "every_save" \ --bf16 True \ --packing True \ --learning_rate 1e-4 \ --lr_scheduler_type "cosine" \ --weight_decay 1e-4 \ --warmup_ratio 0.0 \ --max_grad_norm 1.0 \ --output_dir "mistral-sft-lora" \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --gradient_accumulation_steps 8 \ --gradient_checkpointing True \ --use_reentrant True \ --dataset_text_field "content" \ --use_peft_lora True \ --lora_r 8 \ --lora_alpha 16 \ --lora_dropout 0.1 \ --lora_target_modules "all-linear" \ --use_4bit_quantization True \ --use_nested_quant True \ --bnb_4bit_compute_dtype "bfloat16" \ --use_flash_attn True
peft/examples/sft/run_peft.sh/0
{ "file_path": "peft/examples/sft/run_peft.sh", "repo_id": "peft", "token_count": 458 }
203
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel from .lora import LoraConfig, LoraModel, LoftQConfig, LoraRuntimeConfig from .loha import LoHaConfig, LoHaModel from .lokr import LoKrConfig, LoKrModel from .ia3 import IA3Config, IA3Model from .adalora import AdaLoraConfig, AdaLoraModel from .boft import BOFTConfig, BOFTModel from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType from .prefix_tuning import PrefixEncoder, PrefixTuningConfig from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit from .oft import OFTConfig, OFTModel from .mixed import MixedModel from .poly import PolyConfig, PolyModel from .ln_tuning import LNTuningConfig, LNTuningModel from .vera import VeraConfig, VeraModel from .fourierft import FourierFTConfig, FourierFTModel from .xlora import XLoraConfig, XLoraModel from .hra import HRAConfig, HRAModel
peft/src/peft/tuners/__init__.py/0
{ "file_path": "peft/src/peft/tuners/__init__.py", "repo_id": "peft", "token_count": 534 }
204
#include <torch/torch.h> #include <vector> #include <iostream> #include <torch/extension.h> std::vector<at::Tensor> forward_fast_block_diag_cuda( at::Tensor input); std::vector<at::Tensor> forward_fast_block_diag( at::Tensor input ) { return forward_fast_block_diag_cuda(input); } std::vector<at::Tensor> backward_fast_block_diag_cuda( at::Tensor grad_output, at::Tensor input); std::vector<at::Tensor> backward_fast_block_diag( at::Tensor grad_output, at::Tensor input ) { return backward_fast_block_diag_cuda(grad_output, input); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("forward", &forward_fast_block_diag, "FAST BLOCK DIAG (CUDA)"); m.def("backward", &backward_fast_block_diag, "FAST BLOCK DIAG backward (CUDA)"); }
peft/src/peft/tuners/boft/fbd/fbd_cuda.cpp/0
{ "file_path": "peft/src/peft/tuners/boft/fbd/fbd_cuda.cpp", "repo_id": "peft", "token_count": 370 }
205
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import re import warnings from dataclasses import asdict, replace from enum import Enum from typing import Optional import torch from torch import nn from transformers.pytorch_utils import Conv1D from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists from peft.utils import ( TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _freeze_adapter, _get_submodules, ) from .layer import Conv2d, IA3Layer, Linear class IA3Model(BaseTuner): """ Creates a Infused Adapter by Inhibiting and Amplifying Inner Activations ((IA)^3) model from a pretrained transformers model. The method is described in detail in https://arxiv.org/abs/2205.05638 Args: model ([`~transformers.PreTrainedModel`]): The model to be adapted. config ([`IA3Config`]): The configuration of the (IA)^3 model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. Returns: `torch.nn.Module`: The (IA)^3 model. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM, ia3Config >>> from peft import IA3Model, IA3Config >>> config = IA3Config( ... peft_type="IA3", ... task_type="SEQ_2_SEQ_LM", ... target_modules=["k", "v", "w0"], ... feedforward_modules=["w0"], ... ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> ia3_model = IA3Model(config, model) ``` **Attributes**: - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`ia3Config`]): The configuration of the (IA)^3 model. """ prefix: str = "ia3_" def __init__(self, model, config, adapter_name): super().__init__(model, config, adapter_name) @staticmethod def _create_new_module(ia3_config, adapter_name, target, **kwargs): # avoid eager bnb import if is_bnb_available(): import bitsandbytes as bnb from .bnb import Linear8bitLt if is_bnb_4bit_available(): from .bnb import Linear4bit loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) is_feedforward = kwargs.pop("is_feedforward", False) if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): eightbit_kwargs = kwargs.copy() eightbit_kwargs.update( { "has_fp16_weights": target_base_layer.state.has_fp16_weights, "memory_efficient_backward": target_base_layer.state.memory_efficient_backward, "threshold": target_base_layer.state.threshold, "index": target_base_layer.index, } ) new_module = Linear8bitLt(target, adapter_name, is_feedforward=is_feedforward, **eightbit_kwargs) elif loaded_in_4bit and isinstance(target_base_layer, bnb.nn.Linear4bit): fourbit_kwargs = kwargs.copy() fourbit_kwargs.update( { "compute_dtype": target_base_layer.compute_dtype, "compress_statistics": target_base_layer.weight.compress_statistics, "quant_type": target_base_layer.weight.quant_type, } ) new_module = Linear4bit(target, adapter_name, is_feedforward=is_feedforward, **fourbit_kwargs) elif isinstance(target, torch.nn.Conv2d): new_module = Conv2d(target, adapter_name, is_feedforward=is_feedforward, **kwargs) elif isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = False new_module = Linear(target, adapter_name, is_feedforward=is_feedforward, **kwargs) elif isinstance(target_base_layer, Conv1D): if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = True new_module = Linear( target, adapter_name, is_feedforward=is_feedforward, is_target_conv_1d_layer=True, **kwargs ) else: raise ValueError( f"Target module {target} is not supported. " f"Currently, only `torch.nn.Linear`, `torch.nn.Conv2d`, and `Conv1D` are supported." ) return new_module @staticmethod def _check_target_module_exists(ia3_config, key): return check_target_module_exists(ia3_config, key) def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if self.prefix not in n: p.requires_grad = False def _create_and_replace( self, ia3_config, adapter_name, target, target_name, parent, current_key, ): # check if target module is in feedforward_modules is_feedforward = self._check_target_module_feedforward(ia3_config, current_key) kwargs = { "fan_in_fan_out": ia3_config.fan_in_fan_out, "init_ia3_weights": ia3_config.init_ia3_weights, "is_feedforward": is_feedforward, "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False), "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False), } if isinstance(target, IA3Layer): target.update_layer( adapter_name, ia3_config.init_ia3_weights, ) else: new_module = self._create_new_module(ia3_config, adapter_name, target, **kwargs) if adapter_name not in self.active_adapters: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) @staticmethod def _check_target_module_feedforward(ia3_config, key) -> bool: """ A helper private method that checks if the target module `key` matches with a feedforward module specified in `ia3_config` """ if isinstance(ia3_config.feedforward_modules, str): is_feedforward = bool(re.fullmatch(ia3_config.feedforward_modules, key)) else: is_feedforward = any(key.endswith(target_key) for target_key in ia3_config.feedforward_modules) return is_feedforward def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.base_layer # layers with base_layer don't need the weight to be copied, as they have a reference already if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) # dispatch to correct device for name, module in new_module.named_modules(): if self.prefix in name: module.to(child.weight.device) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) def get_peft_config_as_dict(self, inference: bool = False): config_dict = {} for key, value in self.peft_config.items(): config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} if inference: config["inference_mode"] = True config_dict[key] = config return config def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (IA3Layer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self) -> None: """Enable all adapters. Call this if you have previously disabled all adapters and want to re-enable them. """ self._set_adapter_layers(enabled=True) def disable_adapter_layers(self) -> None: """Disable all adapters. When disabling all adapters, the model output corresponds to the output of the base model. """ self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. """ for module in self.model.modules(): if isinstance(module, IA3Layer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING[model_config["model_type"]] ) if peft_config.feedforward_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING: raise ValueError("Please specify `feedforward_modules` in `peft_config`") peft_config.feedforward_modules = set( TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge: bool = True, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ): r""" This method merges the (IA)^3 layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: safe_merge (`bool`, `optional`, defaults to `False`): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ if getattr(self.model, "is_loaded_in_8bit", False): raise ValueError("Cannot merge ia3 layers when the model is loaded in 8-bit mode") if getattr(self.model, "is_loaded_in_4bit", False): raise ValueError("Cannot merge ia3 layers when the model is loaded in 4-bit mode") self._unloading_checks(adapter_names) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] for key in key_list: try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` new_module = target.modules_to_save[target.active_adapter] if hasattr(new_module, "base_layer"): # check if the module is itself a tuner layer if merge: new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names) new_module = new_module.get_base_layer() setattr(parent, target_name, new_module) return self.model def merge_and_unload(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> torch.nn.Module: r""" This method merges the IA³ layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModel >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b") >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample" >>> model = PeftModel.from_pretrained(base_model, peft_model_id) >>> merged_model = model.merge_and_unload() ``` """ return self._unload_and_optionally_merge(safe_merge=safe_merge, adapter_names=adapter_names) def unload(self) -> torch.nn.Module: """ Gets back the base model by removing all the IA³ modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False) def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (str): Name of the adapter to be deleted. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, IA3Layer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or [] def _check_add_weighted_adapter(self, adapters: list[str]) -> tuple[str, str]: """ Helper function to check if the arguments to add_weighted_adapter are valid and compatible with the underlying model. """ # Validate existence of adapters for adapter in adapters: if adapter not in self.peft_config: raise ValueError(f"Adapter {adapter} does not exist") # Check for conflicting modules_to_save modules_to_save_wrappers = [module for module in self.modules() if isinstance(module, ModulesToSaveWrapper)] if any( sum(adapter in wrapper.modules_to_save for adapter in adapters) > 1 for wrapper in modules_to_save_wrappers ): raise ValueError("Cannot add weighted adapters targeting the same module with modules_to_save.") # Ensure all adapters have compatible target and feedforward module types target_module_types = {type(self.peft_config[adapter].target_modules) for adapter in adapters} feedforward_module_types = {type(self.peft_config[adapter].feedforward_modules) for adapter in adapters} if len(target_module_types) > 1 or len(feedforward_module_types) > 1: raise ValueError("All adapter configs should have the same type for target and feedforward modules.") # Combine target and feedforward modules if str in target_module_types: new_target_modules = "|".join(f"({self.peft_config[adapter].target_modules})" for adapter in adapters) else: new_target_modules = set.union(*(self.peft_config[adapter].target_modules for adapter in adapters)) if str in feedforward_module_types: new_feedforward_modules = "|".join( f"({self.peft_config[adapter].feedforward_modules})" for adapter in adapters ) else: new_feedforward_modules = set.union( *(self.peft_config[adapter].feedforward_modules for adapter in adapters) ) return new_target_modules, new_feedforward_modules def add_weighted_adapter( self, adapters: list[str], weights: list[float], adapter_name: str, ) -> None: """ This method adds a new adapter by merging the given adapters with the given weights. Args: adapters (`list`): List of adapter names to be merged. weights (`list`): List of weights for each adapter. adapter_name (`str`): Name of the new adapter. """ if adapter_name in list(self.peft_config.keys()): return new_target_modules, new_feedforward_modules = self._check_add_weighted_adapter( adapters=adapters, ) self.peft_config[adapter_name] = replace( self.peft_config[adapters[0]], target_modules=new_target_modules, feedforward_modules=new_feedforward_modules, ) self.inject_adapter(self.model, adapter_name) # Do we really need that? _freeze_adapter(self.model, adapter_name) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, IA3Layer): if adapter_name in target.ia3_l: target_ia3_l = target.ia3_l[adapter_name] else: continue target_ia3_l.data = target_ia3_l.data.zero_() for adapter, weight in zip(adapters, weights): if adapter in target.ia3_l: current_adapter_ia3_l = target.ia3_l[adapter] else: continue target_ia3_l.data += current_adapter_ia3_l.data * weight
peft/src/peft/tuners/ia3/model.py/0
{ "file_path": "peft/src/peft/tuners/ia3/model.py", "repo_id": "peft", "token_count": 9266 }
206
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from typing import Any, Optional import bitsandbytes as bnb import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.integrations import dequantize_bnb_weight from peft.utils.other import transpose from .layer import LoraLayer if is_bnb_available(): class Linear8bitLt(torch.nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self.fan_in_fan_out = False self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Merge lora module to 8-bit linear may get different generations due to rounding errors." ) lora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight state = self.get_base_layer().state if state.SCB is None: state.SCB = weight.SCB # Dequantize the result of identity matrix and int8 weight because bitsandbytes does not support int8 # dequantization directly output = dequantize_bnb_weight(weight, state=state) if not self.use_dora[active_adapter]: w_data = output.to(lora_data.dtype).to(lora_data.device) + lora_data else: # handle dora # since output already includes scaling, set it to 1 here weight_norm = ( self.lora_magnitude_vector[active_adapter] .get_weight_norm(output, lora_data, scaling=1) .detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm w_data = dora_factor.view(-1, 1) * (output + lora_data) if safe_merge and not torch.isfinite(w_data).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) self.get_base_layer().weight = bnb.nn.Int8Params( w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights ).to(weight.device) state.reset_grads() self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Unmerge lora module to 8-bit linear may get different generations due to rounding errors." ) lora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight state = self.get_base_layer().state if state.SCB is None: state.SCB = weight.SCB output = dequantize_bnb_weight(weight, state=state) if not self.use_dora[active_adapter]: w_data = output.to(lora_data.dtype).to(lora_data.device) - lora_data else: weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm w_data = output.data / dora_factor.view(-1, 1) - lora_data self.get_base_layer().weight = bnb.nn.Int8Params( w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights ).to(weight.device) state.reset_grads() def get_delta_weight(self, adapter): return ( transpose( self.lora_B[adapter].weight @ self.lora_A[adapter].weight, False, ) * self.scaling[adapter] ) def _mixed_batch_forward( self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any ) -> torch.Tensor: # This is a special method that handles the case when users pass the argument `adapter_names`. This is an # extra argument that allows mixing different adapters in the same batch at inference time. result = self.base_layer(x, *args, **kwargs) unique_adapters = set(adapter_names) sub_batch_indices_list = [] for adapter in unique_adapters: sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) for i, active_adapter in enumerate(unique_adapters): if active_adapter == "__base__": continue if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype compute_dtype = lora_A.weight.dtype if x.dtype != compute_dtype: x = x.to(compute_dtype) # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear # layer output sub_batch = x[sub_batch_indices_list[i]] output = lora_B(lora_A(dropout(sub_batch))) * scaling if requires_conversion: output = output.to(expected_dtype) result[sub_batch_indices_list[i]] += output return result def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype compute_dtype = lora_A.weight.dtype if x.dtype != compute_dtype: x = x.to(compute_dtype) if not self.use_dora[active_adapter]: output = lora_B(lora_A(dropout(x))) * scaling else: x = dropout(x) output = self.lora_magnitude_vector[active_adapter]( x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=self.get_base_layer(), ) if requires_conversion: output = output.to(expected_dtype) result = result + output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, **kwargs): new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target loaded_in_8bit = kwargs.get("loaded_in_8bit", False) if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): eightbit_kwargs = kwargs.copy() eightbit_kwargs.update( { "has_fp16_weights": target.state.has_fp16_weights, "memory_efficient_backward": target.state.memory_efficient_backward, "threshold": target.state.threshold, "index": target.index, } ) new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs) return new_module if is_bnb_4bit_available(): class Linear4bit(torch.nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self.fan_in_fan_out = False self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Merge lora module to 4-bit linear may get different generations due to rounding errors." ) # Refer to https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930 weight = self.get_base_layer().weight kwargs = weight.__dict__ lora_data = self.get_delta_weight(active_adapter) output = dequantize_bnb_weight(weight, state=weight.quant_state) if not self.use_dora[active_adapter]: w_data = output + lora_data else: # handle dora # since output already includes scaling, set it to 1 here weight_norm = ( self.lora_magnitude_vector[active_adapter] .get_weight_norm(output, lora_data, scaling=1) .detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm w_data = dora_factor.view(-1, 1) * (output + lora_data) if safe_merge and not torch.isfinite(w_data).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) if "bnb_quantized" in kwargs: kwargs["bnb_quantized"] = False kwargs["requires_grad"] = False kwargs.pop("data", None) self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), **kwargs).to(weight.device) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Unmerge lora module to 4-bit linear may get different generations due to rounding errors." ) lora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight kwargs = weight.__dict__ output = dequantize_bnb_weight(weight, state=weight.quant_state) if not self.use_dora[active_adapter]: w_data = output - lora_data else: weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm w_data = output.data / dora_factor.view(-1, 1) - lora_data if "bnb_quantized" in kwargs: kwargs["bnb_quantized"] = False kwargs["requires_grad"] = False kwargs.pop("data", None) self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), **kwargs).to(weight.device) def get_delta_weight(self, adapter): return ( transpose( self.lora_B[adapter].weight @ self.lora_A[adapter].weight, False, ) * self.scaling[adapter] ) def _mixed_batch_forward( self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any ) -> torch.Tensor: # This is a special method that handles the case when users pass the argument `adapter_names`. This is an # extra argument that allows mixing different adapters in the same batch at inference time. result = self.base_layer(x, *args, **kwargs) unique_adapters = set(adapter_names) sub_batch_indices_list = [] for adapter in unique_adapters: sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) for i, active_adapter in enumerate(unique_adapters): if active_adapter == "__base__": continue if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = x.to(lora_A.weight.dtype) # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear # layer output sub_batch = x[sub_batch_indices_list[i]] output = lora_B(lora_A(dropout(sub_batch))) * scaling if requires_conversion: output = output.to(expected_dtype) result[sub_batch_indices_list[i]] += output return result def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) # As per Tim Dettmers, for 4bit, we need to defensively clone here. # The reason is that in some cases, an error can occur that backprop # does not work on a manipulated view. This issue may be solved with # newer PyTorch versions but this would need extensive testing to be # sure. result = result.clone() for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = x.to(lora_A.weight.dtype) if not self.use_dora[active_adapter]: output = lora_B(lora_A(dropout(x))) * scaling else: x = dropout(x) output = self.lora_magnitude_vector[active_adapter]( x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=self.get_base_layer(), ) if requires_conversion: output = output.to(expected_dtype) result = result + output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, **kwargs): new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target loaded_in_4bit = kwargs.get("loaded_in_4bit", False) if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit): fourbit_kwargs = kwargs.copy() fourbit_kwargs.update( { "compute_dtype": target_base_layer.compute_dtype, "compress_statistics": target_base_layer.weight.compress_statistics, "quant_type": target_base_layer.weight.quant_type, } ) new_module = Linear4bit(target, adapter_name, **fourbit_kwargs) return new_module
peft/src/peft/tuners/lora/bnb.py/0
{ "file_path": "peft/src/peft/tuners/lora/bnb.py", "repo_id": "peft", "token_count": 12146 }
207
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import List, Optional, Union from peft.tuners.lycoris_utils import LycorisConfig from peft.utils import PeftType @dataclass class OFTConfig(LycorisConfig): """ This is the configuration class to store the configuration of a [`OFTModel`]. Args: r (`int`): OFT rank. module_dropout (`int`): The dropout probability for disabling OFT modules during training. target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. init_weights (`bool`): Whether to perform initialization of OFT weights. layers_to_transform (`Union[List[int], int]`): The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices that are specified in this list. If a single integer is passed, it will apply the transformations on the layer at this index. layers_pattern (`str`): The layer pattern name, used only if `layers_to_transform` is different from `None`. rank_pattern (`dict`): The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. modules_to_save (`List[str]`): List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. coft (`bool`): Whether to use the constrained variant of OFT or not, off by default. eps (`float`): The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True. block_share (`bool`): Whether to share the OFT parameters between blocks or not. This is `False` by default. """ r: int = field(default=8, metadata={"help": "OFT rank"}) module_dropout: float = field( default=0.0, metadata={"help": "The dropout probability for disabling OFT modules during training"} ) target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with OFT." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." }, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the OFT layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) layers_to_transform: Optional[Union[List[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." }, ) layers_pattern: Optional[str] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." }, ) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from OFT layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) coft: bool = field( default=False, metadata={"help": "Whether to use the constrained variant of OFT or not."}, ) eps: float = field( default=6e-5, metadata={ "help": "The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True." }, ) block_share: bool = field( default=False, metadata={"help": "Whether to share the OFT parameters between blocks or not."}, ) def __post_init__(self): self.peft_type = PeftType.OFT self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules )
peft/src/peft/tuners/oft/config.py/0
{ "file_path": "peft/src/peft/tuners/oft/config.py", "repo_id": "peft", "token_count": 2079 }
208
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch from peft.utils.integrations import gather_params_ctx from .config import PromptTuningInit class PromptEmbedding(torch.nn.Module): """ The model to encode virtual tokens into prompt embeddings. Args: config ([`PromptTuningConfig`]): The configuration of the prompt embedding. word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model. **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding. Example: ```py >>> from peft import PromptEmbedding, PromptTuningConfig >>> config = PromptTuningConfig( ... peft_type="PROMPT_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... prompt_tuning_init="TEXT", ... prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral", ... tokenizer_name_or_path="t5-base", ... ) >>> # t5_model.shared is the word embeddings of the base model >>> prompt_embedding = PromptEmbedding(config, t5_model.shared) ``` Input Shape: (`batch_size`, `total_virtual_tokens`) Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`) """ def __init__(self, config, word_embeddings): super().__init__() total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim) if config.prompt_tuning_init == PromptTuningInit.TEXT and not config.inference_mode: from transformers import AutoTokenizer tokenizer_kwargs = config.tokenizer_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path, **tokenizer_kwargs) init_text = config.prompt_tuning_init_text init_token_ids = tokenizer(init_text)["input_ids"] # Trim or iterate until num_text_tokens matches total_virtual_tokens num_text_tokens = len(init_token_ids) if num_text_tokens > total_virtual_tokens: init_token_ids = init_token_ids[:total_virtual_tokens] elif num_text_tokens < total_virtual_tokens: num_reps = math.ceil(total_virtual_tokens / num_text_tokens) init_token_ids = init_token_ids * num_reps init_token_ids = init_token_ids[:total_virtual_tokens] init_token_ids = torch.LongTensor(init_token_ids).to(word_embeddings.weight.device) with gather_params_ctx(word_embeddings.parameters()): word_embedding_weights = word_embeddings(init_token_ids).detach().clone() word_embedding_weights = word_embedding_weights.to(torch.float32) self.embedding.weight = torch.nn.Parameter(word_embedding_weights) def forward(self, indices): # Just get embeddings prompt_embeddings = self.embedding(indices) return prompt_embeddings
peft/src/peft/tuners/prompt_tuning/model.py/0
{ "file_path": "peft/src/peft/tuners/prompt_tuning/model.py", "repo_id": "peft", "token_count": 1486 }
209
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from packaging import version from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_FOURIERFT_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, TRANSFORMERS_MODELS_TO_VERA_TARGET_MODULES_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) mlu_available = False if version.parse(accelerate.__version__) >= version.parse("0.29.0"): from accelerate.utils import is_mlu_available mlu_available = is_mlu_available() __all__ = [ "CONFIG_NAME", "EMBEDDING_LAYER_NAMES", "SAFETENSORS_WEIGHTS_NAME", "TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING", "TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING", "TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING", "TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING", "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING", "TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING", "TRANSFORMERS_MODELS_TO_VERA_TARGET_MODULES_MAPPING", "TRANSFORMERS_MODELS_TO_FOURIERFT_TARGET_MODULES_MAPPING", "WEIGHTS_NAME", "INCLUDE_LINEAR_LAYERS_SHORTHAND", "bloom_model_postprocess_past_key_value", "starcoder_model_postprocess_past_key_value", ] # Get current device name based on available devices def infer_device() -> str: if torch.cuda.is_available(): return "cuda" elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): return "mps" elif mlu_available: return "mlu" elif is_xpu_available(): return "xpu" elif is_npu_available(): return "npu" return "cpu" def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True, gradient_checkpointing_kwargs=None): r""" Note this method only works for `transformers` models. This method wraps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model (`transformers.PreTrainedModel`): The loaded model from `transformers` use_gradient_checkpointing (`bool`, *optional*, defaults to `True`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`): Keyword arguments to pass to the gradient checkpointing function, please refer to the documentation of `torch.utils.checkpoint.checkpoint` for more details about the arguments that you can pass to that method. Note this is only available in the latest transformers versions (> 4.34.1). """ loaded_in_kbit = getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) is_gptq_quantized = getattr(model, "quantization_method", None) == "gptq" is_aqlm_quantized = getattr(model, "quantization_method", None) == "aqlm" is_eetq_quantized = getattr(model, "quantization_method", None) == "eetq" is_hqq_quantized = getattr(model, "quantization_method", None) == "hqq" or getattr(model, "hqq_quantized", False) if gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {} for name, param in model.named_parameters(): # freeze base model's layers param.requires_grad = False if not is_gptq_quantized and not is_aqlm_quantized and not is_eetq_quantized and not is_hqq_quantized: # cast all non INT8 parameters to fp32 for param in model.parameters(): if ( (param.dtype == torch.float16) or (param.dtype == torch.bfloat16) ) and param.__class__.__name__ != "Params4bit": param.data = param.data.to(torch.float32) if ( loaded_in_kbit or is_gptq_quantized or is_aqlm_quantized or is_eetq_quantized or is_hqq_quantized ) and use_gradient_checkpointing: # When having `use_reentrant=False` + gradient_checkpointing, there is no need for this hack if "use_reentrant" not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs["use_reentrant"]: # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # To support older transformers versions, check if the model supports gradient_checkpointing_kwargs _supports_gc_kwargs = "gradient_checkpointing_kwargs" in list( inspect.signature(model.gradient_checkpointing_enable).parameters ) if not _supports_gc_kwargs and len(gradient_checkpointing_kwargs) > 0: warnings.warn( "gradient_checkpointing_kwargs is not supported in this version of transformers. The passed kwargs will be ignored." " if you want to use that feature, please upgrade to the latest version of transformers.", FutureWarning, ) gc_enable_kwargs = ( {} if not _supports_gc_kwargs else {"gradient_checkpointing_kwargs": gradient_checkpointing_kwargs} ) # enable gradient checkpointing for memory efficiency model.gradient_checkpointing_enable(**gc_enable_kwargs) return model # copied from transformers.models.bart.modeling_bart def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids pad_token_id (`int`): The id of the `padding` token. decoder_start_token_id (`int`): The id of the `start` token. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class ModulesToSaveWrapper(torch.nn.Module): def __init__(self, module_to_save, adapter_name): super().__init__() self.original_module = module_to_save self.modules_to_save = torch.nn.ModuleDict({}) self._active_adapter = adapter_name self._disable_adapters = False self.update(adapter_name) self.check_module() def check_module(self): """Perform some sanity checks on the module to ensure that it works""" # Try to anticipate some modules that users could try to target that would not work. # Note: It's not possible to check hasattr(module, "forward"), since that returns True for ModuleDict and # ModuleList, even though their forward methods cannot be called forbidden_classes = (torch.nn.ModuleDict, torch.nn.ModuleList, torch.nn.ParameterDict, torch.nn.ParameterList) if isinstance(self.original_module, forbidden_classes): cls_name = self.original_module.__class__ raise TypeError(f"modules_to_save cannot be applied to modules of type {cls_name}") # local import to avoid circular import from peft.tuners.tuners_utils import BaseTunerLayer if isinstance(self.original_module, BaseTunerLayer): # e.g. applying modules_to_save to a lora layer makes no sense cls_name = self.original_module.__class__ raise TypeError(f"modules_to_save cannot be applied to modules of type {cls_name}") @property def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters @property def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter @property def weight(self): if self.active_adapter not in self.modules_to_save: return self.original_module.weight return self.modules_to_save[self.active_adapter].weight def update(self, adapter_name): context_manager = nullcontext() for _, param in self.original_module.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): import deepspeed context_manager = deepspeed.zero.GatheredParameters(self.original_module.parameters(), modifier_rank=0) break with context_manager: self.modules_to_save.update(torch.nn.ModuleDict({adapter_name: copy.deepcopy(self.original_module)})) if hasattr(self.modules_to_save[adapter_name], "_hf_hook"): old_hook = self.modules_to_save[adapter_name]._hf_hook new_hook = self._create_new_hook(old_hook) remove_hook_from_module(self.modules_to_save[adapter_name]) add_hook_to_module(self.modules_to_save[adapter_name], new_hook) self.original_module.requires_grad_(False) if adapter_name == self.active_adapter: self.modules_to_save[adapter_name].requires_grad_(True) def _create_new_hook(self, old_hook): r""" Creates a new hook based on the old hook. Use it only if you know what you are doing ! """ old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__) old_hook_attr = old_hook.__dict__ filtered_old_hook_attr = {} old_hook_init_signature = inspect.signature(old_hook_cls.__init__) for k in old_hook_attr.keys(): if k in old_hook_init_signature.parameters: filtered_old_hook_attr[k] = old_hook_attr[k] new_hook = old_hook_cls(**filtered_old_hook_attr) return new_hook def forward(self, *args, **kwargs): if self.disable_adapters or (self.active_adapter not in self.modules_to_save): return self.original_module(*args, **kwargs) return self.modules_to_save[self.active_adapter](*args, **kwargs) def enable_adapters(self, enabled: bool): """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if self._disable_adapters is not enabled: # already in the desired state, do nothing return if enabled: self.original_module.requires_grad_(False) self.modules_to_save[self.active_adapter].requires_grad_(True) self._disable_adapters = False else: self.original_module.requires_grad_(True) self.modules_to_save.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_name: str): """Set the active adapter Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (str): The name of the adapter to set as active """ if adapter_name not in self.modules_to_save: raise ValueError(f"Adapter {adapter_name} not found in {self.modules_to_save.keys()}") self.modules_to_save[self.active_adapter].requires_grad_(False) self.modules_to_save[adapter_name].requires_grad_(True) self._active_adapter = adapter_name def _get_submodules(model, key): parent = model.get_submodule(".".join(key.split(".")[:-1])) target_name = key.split(".")[-1] target = model.get_submodule(key) return parent, target, target_name def _freeze_adapter(model, adapter_name): for n, p in model.named_parameters(): if adapter_name in n: p.requires_grad = False def _set_trainable(model, adapter_name): key_list = [key for key, _ in model.named_modules()] for key in key_list: target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save) if target_module_found: parent, target, target_name = _get_submodules(model, key) if isinstance(target, ModulesToSaveWrapper): target.update(adapter_name) target.set_adapter(target.active_adapter) else: new_module = ModulesToSaveWrapper(target, adapter_name) new_module.set_adapter(adapter_name) setattr(parent, target_name, new_module) def _set_adapter(model, adapter_name): def check_adapter_name(adapter_name): if isinstance(adapter_name, str): return adapter_name # adapter_name is a list of str if len(adapter_name) > 1: raise ValueError("Only one adapter can be set at a time for modules_to_save") elif len(adapter_name) == 0: raise ValueError("Please specify at least one adapter to set") adapter_name = adapter_name[0] return adapter_name for module in model.modules(): if isinstance(module, ModulesToSaveWrapper): # only check the adapter_name if we actually encounter a ModulesToSaveWrapper, otherwise we don't care adapter_name = check_adapter_name(adapter_name) # if the adapter is found in this module, set it as the active adapter, else disable the adapters of this # module if adapter_name in module.modules_to_save: module.set_adapter(adapter_name) else: module.enable_adapters(False) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads # For grouped-query attention, see #1901. if peft_config.peft_type == "PREFIX_TUNING" and "num_key_value_heads" in model_config: num_key_value_heads = model_config["num_key_value_heads"] peft_config.token_dim = peft_config.token_dim // peft_config.num_attention_heads * num_key_value_heads peft_config.num_attention_heads = num_key_value_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", peft_config.token_dim) return peft_config def fsdp_auto_wrap_policy(model): import functools import os from accelerate import FullyShardedDataParallelPlugin if hasattr(FullyShardedDataParallelPlugin, "get_module_class_from_name"): get_module_class_from_name = FullyShardedDataParallelPlugin.get_module_class_from_name else: from accelerate.utils.dataclasses import get_module_class_from_name from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy, transformer_auto_wrap_policy from ..tuners import PrefixEncoder, PromptEmbedding, PromptEncoder default_transformer_cls_names_to_wrap = ( ",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else "" ) transformer_cls_names_to_wrap = os.environ.get( "FSDP_TRANSFORMER_CLS_TO_WRAP", default_transformer_cls_names_to_wrap ).split(",") transformer_cls_to_wrap = {PrefixEncoder, PromptEncoder, PromptEmbedding} for layer_class in transformer_cls_names_to_wrap: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise Exception("Could not find the transformer layer class to wrap in the model.") else: transformer_cls_to_wrap.add(transformer_cls) def lambda_policy_fn(module): if ( len(list(module.named_children())) == 0 and getattr(module, "weight", None) is not None and module.weight.requires_grad ): return True return False lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn) transformer_wrap_policy = functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=transformer_cls_to_wrap, ) auto_wrap_policy = functools.partial(_or_policy, policies=[lambda_policy, transformer_wrap_policy]) return auto_wrap_policy def transpose(weight, fan_in_fan_out): if not fan_in_fan_out: return weight if isinstance(weight, torch.nn.Parameter): return torch.nn.Parameter(weight.T) return weight.T def _is_valid_match(key: str, target_key: str): """ Helper function to match module names target_key and key. Makes sure that either the key is exactly the target_key or the target_key is a submodule of key """ if key.endswith(target_key): if len(key) > len(target_key): return key.endswith("." + target_key) # must be a sub module return True return False def _get_batch_size(input_ids: Optional[torch.Tensor], inputs_embeds: Optional[torch.Tensor]) -> int: """Get the batch size based on either input_ids or input_embeds Raises an ValueError if both are None. """ if (input_ids is None) and (inputs_embeds is None): raise ValueError("You have to provide either input_ids or inputs_embeds") if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] return batch_size def get_quantization_config(model: torch.nn.Module, method: str): """ Get the quantization config of the related quantization method """ if ( hasattr(model, "config") and hasattr(model.config, "quantization_config") and (getattr(model, "quantization_method", None) == method) ): return model.config.quantization_config return None def get_auto_gptq_quant_linear(gptq_quantization_config): """ Get the right AutoGPTQQuantLinear class based on the quantization config file """ if gptq_quantization_config is not None and is_auto_gptq_available(): from auto_gptq.utils.import_utils import dynamically_import_QuantLinear desc_act = gptq_quantization_config.desc_act group_size = gptq_quantization_config.group_size bits = gptq_quantization_config.bits if hasattr(gptq_quantization_config, "use_exllama"): use_exllama = gptq_quantization_config.use_exllama else: use_exllama = not gptq_quantization_config.disable_exllama if hasattr(gptq_quantization_config, "exllama_config"): exllama_version = gptq_quantization_config.exllama_config["version"] else: exllama_version = 1 AutoGPTQQuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=desc_act, group_size=group_size, bits=bits, disable_exllama=not (use_exllama and exllama_version == 1), disable_exllamav2=not (use_exllama and exllama_version == 2), ) return AutoGPTQQuantLinear return None def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: """ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. This method is the exact same copy of https://github.com/huggingface/transformers/blob/main/src/transformers/pytorch_utils.py#L282C1-L300C58 but we added it here manually to avoid import issue with old versions of transformers. """ if tensor.device.type == "xla" and is_torch_tpu_available(): # NOTE: xla tensors dont have storage # use some other unique id to distinguish. # this is a XLA tensor, it must be created using torch_xla's # device. So the following import is safe: import torch_xla unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) else: unique_id = storage_ptr(tensor) return tensor.device, unique_id, storage_size(tensor) def cast_mixed_precision_params(model, dtype): """ Cast all non-trainable parameters of the model to the given `dtype`. The `dtype` can be `torch.float16` or `torch.bfloat16` as per the mixed-precision training you are performing. The trainable parameters are cast to full precision. This is meant to reduce the GPU memory usage when using PEFT methods by using half-precision dtype for non-trainable parameters. Having the trainable parameters in full-precision preserves training stability when using automatic mixed-precision training. Args: model (`torch.nn.Module`): The model to cast the non-trainable parameters of. dtype (`torch.dtype`): The dtype to cast the non-trainable parameters to. The `dtype` can be `torch.float16` or `torch.bfloat16` as per the mixed-precision training you are performing. """ for p in model.parameters(): if not p.requires_grad: p.data = p.to(dtype) else: p.data = p.to(torch.float32) def str_to_bool(value: str) -> int: """ Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; """ # same as function as in accelerate.utils, which replaces the deprecated distutils.util.strtobool value = value.lower() if value in ("y", "yes", "t", "true", "on", "1"): return 1 elif value in ("n", "no", "f", "false", "off", "0"): return 0 else: raise ValueError(f"invalid truth value {value}") def check_file_exists_on_hf_hub(repo_id: str, filename: str, **kwargs) -> Optional[bool]: """Check if a file exists on HF Hub, if check was not successful returns None instead of erroring. Respect offline mode if set. """ exists: Optional[bool] = None if str_to_bool(os.environ.get("HF_HUB_OFFLINE", "0")): # user set offline mode, cannot check return exists try: exists = file_exists(repo_id, filename, **kwargs) except (HFValidationError, EntryNotFoundError): # error, exists stays None pass except Exception as e: warnings.warn( f"Unable to fetch remote file due to the following error {e} - silently ignoring the lookup" f" for the file {filename} in {repo_id}." ) return exists
peft/src/peft/utils/other.py/0
{ "file_path": "peft/src/peft/utils/other.py", "repo_id": "peft", "token_count": 10670 }
210
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import torch from diffusers import StableDiffusionPipeline from transformers import AutoModelForCausalLM, AutoTokenizer from peft import LoraConfig, get_peft_model from peft.helpers import check_if_peft_model, rescale_adapter_scale from peft.tuners.lora.layer import LoraLayer class TestCheckIsPeftModel: def test_valid_hub_model(self): result = check_if_peft_model("peft-internal-testing/gpt2-lora-random") assert result is True def test_invalid_hub_model(self): result = check_if_peft_model("gpt2") assert result is False def test_nonexisting_hub_model(self): result = check_if_peft_model("peft-internal-testing/non-existing-model") assert result is False def test_local_model_valid(self, tmp_path): model = AutoModelForCausalLM.from_pretrained("gpt2") config = LoraConfig() model = get_peft_model(model, config) model.save_pretrained(tmp_path / "peft-gpt2-valid") result = check_if_peft_model(tmp_path / "peft-gpt2-valid") assert result is True def test_local_model_invalid(self, tmp_path): model = AutoModelForCausalLM.from_pretrained("gpt2") model.save_pretrained(tmp_path / "peft-gpt2-invalid") result = check_if_peft_model(tmp_path / "peft-gpt2-invalid") assert result is False def test_local_model_broken_config(self, tmp_path): with open(tmp_path / "adapter_config.json", "w") as f: f.write('{"foo": "bar"}') result = check_if_peft_model(tmp_path) assert result is False def test_local_model_non_default_name(self, tmp_path): model = AutoModelForCausalLM.from_pretrained("gpt2") config = LoraConfig() model = get_peft_model(model, config, adapter_name="other") model.save_pretrained(tmp_path / "peft-gpt2-other") # no default adapter here result = check_if_peft_model(tmp_path / "peft-gpt2-other") assert result is False # with adapter name result = check_if_peft_model(tmp_path / "peft-gpt2-other" / "other") assert result is True class TestScalingAdapters: @pytest.fixture(scope="class") def tokenizer(self): return AutoTokenizer.from_pretrained("facebook/opt-125m") def get_scale_from_modules(self, model): layer_to_scale_map = {} for name, module in model.named_modules(): if isinstance(module, LoraLayer): layer_to_scale_map[name] = module.scaling return layer_to_scale_map def test_rescale_adapter_scale(self, tokenizer): model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["k_proj", "v_proj"], lora_dropout=0.1, bias="none", init_lora_weights=False, ) model = get_peft_model(model, lora_config) model.eval() inputs = tokenizer("hello world", return_tensors="pt") with torch.no_grad(): logits_before_scaling = model(**inputs).logits scales_before_scaling = self.get_scale_from_modules(model) with rescale_adapter_scale(model=model, multiplier=0.5): scales_during_scaling = self.get_scale_from_modules(model) for key in scales_before_scaling.keys(): assert scales_before_scaling[key] != scales_during_scaling[key] with torch.no_grad(): logits_during_scaling = model(**inputs).logits assert not torch.allclose(logits_before_scaling, logits_during_scaling) scales_after_scaling = self.get_scale_from_modules(model) for key in scales_before_scaling.keys(): assert scales_before_scaling[key] == scales_after_scaling[key] with torch.no_grad(): logits_after_scaling = model(**inputs).logits assert torch.allclose(logits_before_scaling, logits_after_scaling) def test_wrong_scaling_datatype(self): model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["k_proj", "v_proj"], lora_dropout=0.1, bias="none", init_lora_weights=False, ) model = get_peft_model(model, lora_config) # we expect a type error here becuase of wrong datatpye of multiplier multiplier = "a" with pytest.raises(TypeError, match=f"Argument multiplier should be of type float, got {type(multiplier)}"): with rescale_adapter_scale(model=model, multiplier=multiplier): pass def test_not_lora_model(self): model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") # we expect a value error here because the model # does not have lora layers with pytest.raises(ValueError, match="scaling is only supported for models with `LoraLayer`s"): with rescale_adapter_scale(model=model, multiplier=0.5): pass def test_scaling_set_to_zero(self, tokenizer): base_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") inputs = tokenizer("hello world", return_tensors="pt") base_model.eval() with torch.no_grad(): logits_base_model = base_model(**inputs).logits lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["k_proj", "v_proj"], lora_dropout=0.1, bias="none", init_lora_weights=False, ) lora_model = get_peft_model(base_model, lora_config) lora_model.eval() with rescale_adapter_scale(model=lora_model, multiplier=0.0): with torch.no_grad(): logits_lora_model = lora_model(**inputs).logits assert torch.allclose(logits_base_model, logits_lora_model) def test_diffusers_pipeline(self): model_id = "hf-internal-testing/tiny-stable-diffusion-torch" pipeline = StableDiffusionPipeline.from_pretrained(model_id) text_encoder_kwargs = { "r": 8, "lora_alpha": 32, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "lora_dropout": 0.0, "bias": "none", } unet_kwargs = { "r": 8, "lora_alpha": 32, "target_modules": ["proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2"], "lora_dropout": 0.0, "bias": "none", } # Instantiate text_encoder adapter config_text_encoder = LoraConfig(**text_encoder_kwargs) pipeline.text_encoder = get_peft_model(pipeline.text_encoder, config_text_encoder) # Instantiate unet adapter config_unet = LoraConfig(**unet_kwargs) pipeline.unet = get_peft_model(pipeline.unet, config_unet) text_scales_before_scaling = self.get_scale_from_modules(pipeline.text_encoder) unet_scales_before_scaling = self.get_scale_from_modules(pipeline.unet) with rescale_adapter_scale(model=pipeline.text_encoder, multiplier=0.5), rescale_adapter_scale( model=pipeline.unet, multiplier=0.5 ): text_scales_during_scaling = self.get_scale_from_modules(pipeline.text_encoder) unet_scales_during_scaling = self.get_scale_from_modules(pipeline.unet) for key in text_scales_before_scaling.keys(): assert text_scales_before_scaling[key] != text_scales_during_scaling[key] for key in unet_scales_before_scaling.keys(): assert unet_scales_before_scaling[key] != unet_scales_during_scaling[key] text_scales_fter_scaling = self.get_scale_from_modules(pipeline.text_encoder) unet_scales_after_scaling = self.get_scale_from_modules(pipeline.unet) for key in text_scales_before_scaling.keys(): assert text_scales_before_scaling[key] == text_scales_fter_scaling[key] for key in unet_scales_before_scaling.keys(): assert unet_scales_before_scaling[key] == unet_scales_after_scaling[key] def test_transformers_pipeline(self, tmp_path, tokenizer): # this uses a transformers model that loads the adapter directly model_id = "facebook/opt-125m" model = AutoModelForCausalLM.from_pretrained(model_id) config = LoraConfig(init_lora_weights=False) model = get_peft_model(model, config) model.save_pretrained(tmp_path / "opt-lora") del model # load directly into transformers model model = AutoModelForCausalLM.from_pretrained(model_id) model.load_adapter(tmp_path / "opt-lora") inputs = tokenizer("hello world", return_tensors="pt") model = model.eval() with torch.no_grad(): logits_before_scaling = model(**inputs).logits scales_before_scaling = self.get_scale_from_modules(model) with rescale_adapter_scale(model=model, multiplier=0.5): scales_during_scaling = self.get_scale_from_modules(model) for key in scales_before_scaling.keys(): assert scales_before_scaling[key] != scales_during_scaling[key] with torch.no_grad(): logits_during_scaling = model(**inputs).logits assert not torch.allclose(logits_before_scaling, logits_during_scaling) scales_after_scaling = self.get_scale_from_modules(model) for key in scales_before_scaling.keys(): assert scales_before_scaling[key] == scales_after_scaling[key] with torch.no_grad(): logits_after_scaling = model(**inputs).logits assert torch.allclose(logits_before_scaling, logits_after_scaling) def test_multi_adapters(self, tokenizer): model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["k_proj", "v_proj"], lora_dropout=0.1, bias="none", init_lora_weights=False, ) model = get_peft_model(model, lora_config) inputs = tokenizer("hello world", return_tensors="pt") # add another adaper and activate it model.add_adapter("other", lora_config) model.set_adapter("other") scales_before_scaling = self.get_scale_from_modules(model) model.eval() with torch.no_grad(): logits_before = model(**inputs).logits with rescale_adapter_scale(model=model, multiplier=0.5): scales_during_scaling = self.get_scale_from_modules(model) for key in scales_before_scaling.keys(): assert scales_before_scaling[key] != scales_during_scaling[key] with torch.no_grad(): logits_during = model(**inputs).logits assert not torch.allclose(logits_before, logits_during) scales_after_scaling = self.get_scale_from_modules(model) for key in scales_before_scaling.keys(): assert scales_before_scaling[key] == scales_after_scaling[key] with torch.no_grad(): logits_after = model(**inputs).logits assert torch.allclose(logits_before, logits_after) def test_rank_alpha_pattern(self, tokenizer): model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["k_proj", "v_proj"], lora_dropout=0.1, bias="none", init_lora_weights=False, rank_pattern={"k_proj": 2}, alpha_pattern={"k_proj": 8}, ) model = get_peft_model(model, lora_config) model.eval() inputs = tokenizer("hello world", return_tensors="pt") with torch.no_grad(): logits_before_scaling = model(**inputs).logits scales_before_scaling = self.get_scale_from_modules(model) with rescale_adapter_scale(model=model, multiplier=0.5): scales_during_scaling = self.get_scale_from_modules(model) for key in scales_before_scaling.keys(): assert scales_before_scaling[key] != scales_during_scaling[key] with torch.no_grad(): logits_during_scaling = model(**inputs).logits assert not torch.allclose(logits_before_scaling, logits_during_scaling) scales_after_scaling = self.get_scale_from_modules(model) for key in scales_before_scaling.keys(): assert scales_before_scaling[key] == scales_after_scaling[key] with torch.no_grad(): logits_after_scaling = model(**inputs).logits assert torch.allclose(logits_before_scaling, logits_after_scaling) def test_merging_adapter(self, tokenizer): model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["k_proj", "v_proj"], lora_dropout=0.1, bias="none", init_lora_weights=False, ) model = get_peft_model(model, lora_config) model.eval() inputs = tokenizer("hello world", return_tensors="pt") with rescale_adapter_scale(model=model, multiplier=0.5): with torch.no_grad(): logits_unmerged_scaling = model(**inputs).logits model = model.merge_and_unload() with torch.no_grad(): logits_merged_scaling = model(**inputs).logits assert torch.allclose(logits_merged_scaling, logits_unmerged_scaling, atol=1e-4, rtol=1e-4)
peft/tests/test_helpers.py/0
{ "file_path": "peft/tests/test_helpers.py", "repo_id": "peft", "token_count": 6577 }
211
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import pickle import re import shutil import tempfile from collections import OrderedDict from dataclasses import replace import pytest import torch import yaml from diffusers import StableDiffusionPipeline from packaging import version from safetensors.torch import save_file from peft import ( AdaLoraConfig, BOFTConfig, FourierFTConfig, HRAConfig, IA3Config, LNTuningConfig, LoHaConfig, LoKrConfig, LoraConfig, PeftModel, PeftType, PrefixTuningConfig, PromptEncoderConfig, PromptLearningConfig, PromptTuningConfig, VeraConfig, get_peft_model, get_peft_model_state_dict, prepare_model_for_kbit_training, ) from peft.tuners.lora import LoraLayer from peft.utils import _get_submodules, infer_device from .testing_utils import get_state_dict CONFIG_TESTING_KWARGS = ( # IA³ { "target_modules": None, "feedforward_modules": None, }, # LoRA { "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", }, # prefix tuning { "num_virtual_tokens": 10, }, # prompt encoder { "num_virtual_tokens": 10, "encoder_hidden_size": 32, }, # prompt tuning { "num_virtual_tokens": 10, }, # AdaLoRA { "target_modules": None, }, # BOFT { "target_modules": None, }, # VeRA { "r": 8, "target_modules": None, "vera_dropout": 0.05, "projection_prng_key": 0xFF, "d_initial": 0.1, "save_projection": True, "bias": "none", }, # FourierFT { "n_frequency": 10, "target_modules": None, }, # HRA { "target_modules": None, }, ) CLASSES_MAPPING = { "ia3": (IA3Config, CONFIG_TESTING_KWARGS[0]), "lora": (LoraConfig, CONFIG_TESTING_KWARGS[1]), "prefix_tuning": (PrefixTuningConfig, CONFIG_TESTING_KWARGS[2]), "prompt_encoder": (PromptEncoderConfig, CONFIG_TESTING_KWARGS[3]), "prompt_tuning": (PromptTuningConfig, CONFIG_TESTING_KWARGS[4]), "adalora": (AdaLoraConfig, CONFIG_TESTING_KWARGS[5]), "boft": (BOFTConfig, CONFIG_TESTING_KWARGS[6]), "vera": (VeraConfig, CONFIG_TESTING_KWARGS[7]), "fourierft": (FourierFTConfig, CONFIG_TESTING_KWARGS[8]), "hra": (HRAConfig, CONFIG_TESTING_KWARGS[9]), } # Adapted from https://github.com/huggingface/transformers/blob/48327c57182fdade7f7797d1eaad2d166de5c55b/src/transformers/activations.py#LL166C7-L166C22 class ClassInstantier(OrderedDict): def __getitem__(self, key, *args, **kwargs): # check if any of the kwargs is inside the config class kwargs if any(kwarg in self[key][1] for kwarg in kwargs): new_config_kwargs = self[key][1].copy() new_config_kwargs.update(kwargs) return (self[key][0], new_config_kwargs) return super().__getitem__(key, *args, **kwargs) def get_grid_parameters(self, grid_parameters, filter_params_func=None): r""" Returns a list of all possible combinations of the parameters in the config classes. Args: grid_parameters (`dict`): A dictionary containing the parameters to be tested. There should be at least the key "model_ids" which contains a list of model ids to be tested. The other keys should be the name of the config class post-fixed with "_kwargs" and the value should be a dictionary containing the parameters to be tested for that config class. filter_params_func (`callable`, `optional`): A function that takes a list of tuples and returns a list of tuples. This function is used to filter out the tests that needs for example to be skipped. Returns: generated_tests (`list`): A list of tuples containing the name of the test, the model id, the config class and the config class kwargs. """ generated_tests = [] model_list = grid_parameters["model_ids"] task_type = grid_parameters["task_type"] if "task_type" in grid_parameters else None for model_id in model_list: for key, value in self.items(): if f"{key}_kwargs" in grid_parameters: peft_configs = [] current_peft_config = value[1].copy() for current_key, current_value in grid_parameters[f"{key}_kwargs"].items(): for kwarg in current_value: current_peft_config.update({current_key: kwarg}) if task_type is not None: current_peft_config.update({"task_type": task_type}) peft_configs.append(current_peft_config.copy()) else: current_peft_config = value[1].copy() if task_type is not None: current_peft_config.update({"task_type": task_type}) peft_configs = [current_peft_config] for peft_config in peft_configs: generated_tests.append((f"test_{model_id}_{key}", model_id, value[0], peft_config)) if filter_params_func is not None: generated_tests = filter_params_func(generated_tests) return generated_tests PeftTestConfigManager = ClassInstantier(CLASSES_MAPPING) class PeftCommonTester: r""" A large testing suite for testing common functionality of the PEFT models. Attributes: torch_device (`torch.device`): The device on which the tests will be run. transformers_class (`transformers.PreTrainedModel`): The transformers class that is being tested. """ torch_device = infer_device() transformers_class = None def prepare_inputs_for_common(self): raise NotImplementedError def check_modelcard(self, tmp_dirname, model): # check the generated README.md filename = os.path.join(tmp_dirname, "README.md") assert os.path.exists(filename) with open(filename, encoding="utf-8") as f: readme = f.read() metainfo = re.search(r"---\n(.*?)\n---", readme, re.DOTALL).group(1) dct = yaml.safe_load(metainfo) assert dct["library_name"] == "peft" if hasattr(model, "config"): assert dct["base_model"] == model.config.to_dict()["_name_or_path"] else: # a custom model assert "base_model" not in dct def check_config_json(self, tmp_dirname, model): # check the generated config.json filename = os.path.join(tmp_dirname, "adapter_config.json") assert os.path.exists(filename) with open(filename, encoding="utf-8") as f: config = json.load(f) if hasattr(model, "config"): # custom models don't have a config attribute assert config["base_model_name_or_path"] == model.config.to_dict()["_name_or_path"] def _test_model_attr(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) assert hasattr(model, "save_pretrained") assert hasattr(model, "from_pretrained") assert hasattr(model, "push_to_hub") def _test_adapter_name(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config, adapter_name="test-adapter") correctly_converted = False for n, _ in model.named_parameters(): if "test-adapter" in n: correctly_converted = True break assert correctly_converted def _test_prepare_for_training(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) dummy_input = self.prepare_inputs_for_testing() dummy_output = model.get_input_embeddings()(dummy_input["input_ids"]) assert not dummy_output.requires_grad # load with `prepare_model_for_kbit_training` model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) model = prepare_model_for_kbit_training(model) for param in model.parameters(): assert not param.requires_grad config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) dummy_input = self.prepare_inputs_for_testing() dummy_output = model.get_input_embeddings()(dummy_input["input_ids"]) assert dummy_output.requires_grad def _test_save_pretrained(self, model_id, config_cls, config_kwargs, safe_serialization=True): # ensure that the weights are randomly initialized if issubclass(config_cls, LoraConfig): config_kwargs = config_kwargs.copy() config_kwargs["init_lora_weights"] = False if issubclass(config_cls, IA3Config): config_kwargs = config_kwargs.copy() config_kwargs["init_ia3_weights"] = False if issubclass(config_cls, VeraConfig): config_kwargs = config_kwargs.copy() config_kwargs["init_weights"] = False model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: if safe_serialization: model.save_pretrained(tmp_dirname) else: model.save_pretrained(tmp_dirname, safe_serialization=False) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # check if the state dicts are equal if issubclass(config_cls, PromptEncoderConfig): # For prompt encoding, when loading the whole state_dict, there are differences, therefore, only load # adapter-specific weights for comparison. # TODO: is this expected? state_dict = get_peft_model_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained, unwrap_compiled=True) else: state_dict = get_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_state_dict(model_from_pretrained, unwrap_compiled=True) # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) target_adapter_filename = "adapter_model.safetensors" if safe_serialization else "adapter_model.bin" # check if `adapter_model.safetensors` is present assert os.path.exists(os.path.join(tmp_dirname, target_adapter_filename)) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) self.check_modelcard(tmp_dirname, model) self.check_config_json(tmp_dirname, model) def _test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs, safe_serialization=True): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return pytest.skip(f"Test not applicable for {config_cls}") # ensure that the weights are randomly initialized if issubclass(config_cls, LoraConfig): config_kwargs = config_kwargs.copy() config_kwargs["init_lora_weights"] = False elif issubclass(config_cls, IA3Config): config_kwargs = config_kwargs.copy() config_kwargs["init_ia3_weights"] = False elif hasattr(config_cls, "init_weights"): config_kwargs["init_weights"] = False model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) new_adapter_config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model.add_adapter("new_adapter", new_adapter_config) with tempfile.TemporaryDirectory() as tmp_dirname: if safe_serialization: model.save_pretrained(tmp_dirname) else: model.save_pretrained(tmp_dirname, safe_serialization=False) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) new_adapter_dir = os.path.join(tmp_dirname, "new_adapter") model_from_pretrained.load_adapter(new_adapter_dir, "new_adapter") # check if the state dicts are equal if issubclass(config_cls, PromptEncoderConfig): # For prompt encoding, when loading the whole state_dict, there are differences, therefore, only load # adapter-specific weights for comparison. # TODO: is this expected? state_dict = get_peft_model_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained, unwrap_compiled=True) else: state_dict = get_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_state_dict(model_from_pretrained, unwrap_compiled=True) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) target_adapter_filename = "adapter_model.safetensors" if safe_serialization else "adapter_model.bin" # check if `adapter_model.safetensors` is present assert os.path.exists(os.path.join(tmp_dirname, target_adapter_filename)) assert os.path.exists(os.path.join(new_adapter_dir, target_adapter_filename)) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) assert os.path.exists(os.path.join(new_adapter_dir, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) assert not os.path.exists(os.path.join(new_adapter_dir, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) assert not os.path.exists(os.path.join(new_adapter_dir, "config.json")) self.check_modelcard(tmp_dirname, model) self.check_config_json(tmp_dirname, model) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, selected_adapters=["default"]) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) assert "default" in model_from_pretrained.peft_config.keys() assert "new_adapter" not in model_from_pretrained.peft_config.keys() def _test_from_pretrained_config_construction(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls(base_model_name_or_path=model_id, **config_kwargs) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained( model_from_pretrained, tmp_dirname, is_trainable=False, config=config ) assert model_from_pretrained.peft_config["default"].inference_mode assert model_from_pretrained.peft_config["default"] is config def _test_merge_layers_fp16(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig, IA3Config, AdaLoraConfig, LoHaConfig, LoKrConfig): # Merge layers only supported for LoRA and IA³ return pytest.skip(f"Test not applicable for {config_cls}") if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") if (self.torch_device in ["cpu"]) and (version.parse(torch.__version__) <= version.parse("2.1")): self.skipTest("PyTorch 2.1 not supported for Half of addmm_impl_cpu_ ") model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.float16) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(device=self.torch_device, dtype=torch.float16) model.eval() # This should simply work _ = model.merge_and_unload() def _test_merge_layers_nan(self, model_id, config_cls, config_kwargs): if config_cls not in ( LoraConfig, IA3Config, AdaLoraConfig, LoHaConfig, LoKrConfig, VeraConfig, FourierFTConfig, ): # Merge layers only supported for LoRA and IA³ return if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() model.eval() # This should work logits_unmerged = model(**dummy_input)[0] model = model.merge_and_unload() logits_merged = model(**dummy_input)[0] assert torch.allclose(logits_unmerged, logits_merged, atol=1e-3, rtol=1e-3) model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) for name, module in model.named_parameters(): if ( "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name or "vera_lambda" in name or "fourierft_spectrum" in name ): module.data[0] = torch.nan with pytest.raises( ValueError, match="NaNs detected in the merged weights. The adapter default seems to be broken" ): model = model.merge_and_unload(safe_merge=True) for name, module in model.named_parameters(): if ( "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name or "vera_lambda" in name or "fourierft_spectrum" in name ): module.data[0] = torch.inf with pytest.raises( ValueError, match="NaNs detected in the merged weights. The adapter default seems to be broken" ): model = model.merge_and_unload(safe_merge=True) def _test_merge_layers(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") if issubclass(config_cls, BOFTConfig): return pytest.skip(f"Test not applicable for {config_cls}") if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() model.eval() logits = model(**dummy_input)[0] model.merge_adapter() logits_merged = model(**dummy_input)[0] model.unmerge_adapter() logits_unmerged = model(**dummy_input)[0] model = model.merge_and_unload() logits_merged_unloaded = model(**dummy_input)[0] atol, rtol = 1e-4, 1e-4 if self.torch_device in ["mlu"]: atol, rtol = 1e-3, 1e-3 # MLU if config.peft_type == "ADALORA": # AdaLoRA is a bit flaky on CI, but this cannot be reproduced locally atol, rtol = 1e-2, 1e-2 if (config.peft_type == "IA3") and (model_id == "Conv2d"): # for some reason, the IA³ Conv2d introduces a larger error atol, rtol = 0.3, 0.01 assert torch.allclose(logits, logits_merged, atol=atol, rtol=rtol) assert torch.allclose(logits, logits_unmerged, atol=atol, rtol=rtol) assert torch.allclose(logits, logits_merged_unloaded, atol=atol, rtol=rtol) # For this test to work, weights should not be initialized to identity transform (e.g. # init_lora_weights should be False). transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) logits_transformers = transformers_model(**dummy_input)[0] assert not torch.allclose(logits_merged, logits_transformers, atol=1e-10, rtol=1e-10) # test that the logits are identical after a save-load-roundtrip if hasattr(model, "save_pretrained"): # model is a transformers model tmp_dirname = tempfile.mkdtemp() # note: not using the context manager here because it fails on Windows CI for some reason try: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(tmp_dirname).to(self.torch_device) finally: try: shutil.rmtree(tmp_dirname) except PermissionError: # windows error pass else: # model is not a transformers model model_from_pretrained = pickle.loads(pickle.dumps(model)) logits_merged_from_pretrained = model_from_pretrained(**dummy_input)[0] assert torch.allclose(logits_merged, logits_merged_from_pretrained, atol=atol, rtol=rtol) def _test_merge_layers_multi(self, model_id, config_cls, config_kwargs): supported_peft_types = [ PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT, PeftType.BOFT, PeftType.HRA, ] if ("gpt2" in model_id.lower()) and (config_cls == IA3Config): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() model.eval() with torch.inference_mode(): logits_adapter_1 = model(**dummy_input)[0] model.add_adapter("adapter-2", config) model.set_adapter("adapter-2") model.eval() with torch.inference_mode(): logits_adapter_2 = model(**dummy_input)[0] assert not torch.allclose(logits_adapter_1, logits_adapter_2, atol=1e-3, rtol=1e-3) model.set_adapter("default") with torch.inference_mode(): logits_adapter_1_after_set = model(**dummy_input)[0] assert torch.allclose(logits_adapter_1_after_set, logits_adapter_1, atol=1e-3, rtol=1e-3) model_copy = copy.deepcopy(model) model_copy_2 = copy.deepcopy(model) model_merged_all = model.merge_and_unload(adapter_names=["adapter-2", "default"]) with torch.inference_mode(): logits_merged_all = model_merged_all(**dummy_input)[0] assert not torch.allclose(logits_merged_all, logits_adapter_2, atol=1e-3, rtol=1e-3) assert not torch.allclose(logits_merged_all, logits_adapter_1, atol=1e-3, rtol=1e-3) model_merged_adapter_2 = model_copy.merge_and_unload(adapter_names=["adapter-2"]) with torch.inference_mode(): logits_merged_adapter_2 = model_merged_adapter_2(**dummy_input)[0] assert torch.allclose(logits_merged_adapter_2, logits_adapter_2, atol=1e-3, rtol=1e-3) model_merged_adapter_default = model_copy_2.merge_and_unload(adapter_names=["default"]) with torch.inference_mode(): logits_merged_adapter_default = model_merged_adapter_default(**dummy_input)[0] assert torch.allclose(logits_merged_adapter_default, logits_adapter_1, atol=1e-3, rtol=1e-3) def _test_merge_layers_is_idempotent(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) model.eval() torch.manual_seed(0) model.merge_adapter() logits_0 = model(**self.prepare_inputs_for_testing())[0] # merging again should not change anything # also check warning: with pytest.warns(UserWarning, match="All adapters are already merged, nothing to do"): model.merge_adapter() logits_1 = model(**self.prepare_inputs_for_testing())[0] assert torch.allclose(logits_0, logits_1, atol=1e-6, rtol=1e-6) def _test_safe_merge(self, model_id, config_cls, config_kwargs): torch.manual_seed(0) model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = model.to(self.torch_device).eval() inputs = self.prepare_inputs_for_testing() logits_base = model(**inputs)[0] model = get_peft_model(model, config).eval() logits_peft = model(**inputs)[0] atol, rtol = 1e-6, 1e-6 # default # Initializing with LN tuning cannot be configured to change the outputs (unlike init_lora_weights=False) if not issubclass(config_cls, LNTuningConfig): # sanity check that the logits are different assert not torch.allclose(logits_base, logits_peft, atol=atol, rtol=rtol) model_unloaded = model.merge_and_unload(safe_merge=True) logits_unloaded = model_unloaded(**inputs)[0] if self.torch_device in ["mlu"]: atol, rtol = 1e-3, 1e-3 # MLU # check that the logits are the same after unloading assert torch.allclose(logits_peft, logits_unloaded, atol=atol, rtol=rtol) # Ensure that serializing with safetensors works, there was an error when weights were not contiguous with tempfile.TemporaryDirectory() as tmp_dirname: # serializing with torch.save works torch.save(model_unloaded.state_dict(), os.path.join(tmp_dirname, "model.bin")) # serializing with safetensors works save_file(model_unloaded.state_dict(), os.path.join(tmp_dirname, "model.safetensors")) def _test_mixed_adapter_batches(self, model_id, config_cls, config_kwargs): # Test for mixing different adapters in a single batch by passing the adapter_names argument if config_cls not in (LoraConfig,): return pytest.skip(f"Mixed adapter batches not supported for {config_cls}") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) torch.manual_seed(0) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, adapter_name="adapter0").eval() model.add_adapter("adapter1", config) model = model.to(self.torch_device).eval() dummy_input = self.prepare_inputs_for_testing() # ensure that we have at least 3 samples for this test dummy_input = {k: torch.cat([v for _ in range(3)]) for k, v in dummy_input.items()} with torch.inference_mode(): with model.disable_adapter(): output_base = model(**dummy_input)[0] logits_base = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] model.set_adapter("adapter0") with torch.inference_mode(): output_adapter0 = model(**dummy_input)[0] logits_adapter0 = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] model.set_adapter("adapter1") with torch.inference_mode(): output_adapter1 = model(**dummy_input)[0] logits_adapter1 = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] atol, rtol = 1e-4, 1e-4 # sanity check that there are enough outputs and that they are different assert len(output_base) == len(output_adapter0) == len(output_adapter1) >= 3 assert len(logits_base) == len(logits_adapter0) == len(logits_adapter1) >= 3 assert not torch.allclose(output_base, output_adapter0, atol=atol, rtol=rtol) assert not torch.allclose(output_base, output_adapter1, atol=atol, rtol=rtol) assert not torch.allclose(output_adapter0, output_adapter1, atol=atol, rtol=rtol) assert not torch.allclose(logits_base, logits_adapter0, atol=atol, rtol=rtol) assert not torch.allclose(logits_base, logits_adapter1, atol=atol, rtol=rtol) assert not torch.allclose(logits_adapter0, logits_adapter1, atol=atol, rtol=rtol) # alternate between base model, adapter0, and adapter1 adapters = ["__base__", "adapter0", "adapter1"] dummy_input["adapter_names"] = [adapters[i % 3] for i in (range(len(dummy_input["input_ids"])))] with torch.inference_mode(): output_mixed = model(**dummy_input)[0] logits_mixed = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] assert torch.allclose(output_base[::3], output_mixed[::3], atol=atol, rtol=rtol) assert torch.allclose(output_adapter0[1::3], output_mixed[1::3], atol=atol, rtol=rtol) assert torch.allclose(output_adapter1[2::3], output_mixed[2::3], atol=atol, rtol=rtol) assert torch.allclose(logits_base[::3], logits_mixed[::3], atol=atol, rtol=rtol) assert torch.allclose(logits_adapter0[1::3], logits_mixed[1::3], atol=atol, rtol=rtol) assert torch.allclose(logits_adapter1[2::3], logits_mixed[2::3], atol=atol, rtol=rtol) def _test_generate(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `generate` works _ = model.generate(**inputs) def _test_generate_pos_args(self, model_id, config_cls, config_kwargs, raises_err: bool): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() if raises_err: with pytest.raises(TypeError): # check if `generate` raises an error if positional arguments are passed _ = model.generate(inputs["input_ids"]) else: # check if `generate` works if positional arguments are passed _ = model.generate(inputs["input_ids"]) def _test_generate_half_prec(self, model_id, config_cls, config_kwargs): if config_cls not in (IA3Config, LoraConfig, PrefixTuningConfig): return pytest.skip(f"Test not applicable for {config_cls}") if self.torch_device == "mps": # BFloat16 is not supported on MPS return pytest.skip("BFloat16 is not supported on MPS") model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.bfloat16) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask) def _test_prefix_tuning_half_prec_conversion(self, model_id, config_cls, config_kwargs): if config_cls not in (PrefixTuningConfig,): return pytest.skip(f"Test not applicable for {config_cls}") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.half() assert model.base_model_torch_dtype == torch.float16 def _test_training(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") if (config_cls == AdaLoraConfig) and ("roberta" in model_id.lower()): # TODO: no gradients on the "dense" layer, other layers work, not sure why self.skipTest("AdaLora with RoBERTa does not work correctly") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() parameter_prefix = model.prefix for n, param in model.named_parameters(): if (parameter_prefix in n) or ("modules_to_save" in n): assert param.grad is not None else: assert param.grad is None def _test_inference_safetensors(self, model_id, config_cls, config_kwargs): if (config_cls == PrefixTuningConfig) and ("deberta" in model_id.lower()): # TODO: raises an error: # TypeError: DebertaModel.forward() got an unexpected keyword argument 'past_key_values' self.skipTest("DeBERTa with PrefixTuning does not work correctly") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] logits = output[0] loss = output.sum() loss.backward() # set to eval mode, since things like dropout can affect the output otherwise model.eval() logits = model(**inputs)[0][0] with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, safe_serialization=True) assert "adapter_model.safetensors" in os.listdir(tmp_dirname) assert "adapter_model.bin" not in os.listdir(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to(self.torch_device) logits_from_pretrained = model_from_pretrained(**inputs)[0][0] assert torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4) def _test_training_layer_indexing(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): return pytest.skip(f"Test not applicable for {config_cls}") config = config_cls( base_model_name_or_path=model_id, layers_to_transform=[0], **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] logits = output[0] loss = output.sum() loss.backward() nb_trainable = 0 for n, param in model.named_parameters(): if "lora" in n: assert param.grad is not None nb_trainable += 1 else: assert param.grad is None with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to(self.torch_device) logits_from_pretrained = model_from_pretrained(**inputs)[0][0] assert torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4) model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) nb_trainable_all = 0 for n, param in model.named_parameters(): if "lora" in n: nb_trainable_all += 1 assert nb_trainable < nb_trainable_all def _test_training_gradient_checkpointing(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") if (config_cls == AdaLoraConfig) and ("roberta" in model_id.lower()): # TODO: no gradients on the "dense" layer, other layers work, not sure why self.skipTest("AdaLora with RoBERTa does not work correctly") model = self.transformers_class.from_pretrained(model_id) if not getattr(model, "supports_gradient_checkpointing", False): return pytest.skip(f"Model {model_id} does not support gradient checkpointing") model.gradient_checkpointing_enable() config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() for n, param in model.named_parameters(): if model.prefix in n: assert param.grad is not None else: assert param.grad is None def _test_peft_model_device_map(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): return pytest.skip(f"Test not applicable for {config_cls}") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) _ = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname, device_map={"": "cpu"}).to( self.torch_device ) def _test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs): if not issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() # check that prompt encoder has grads for param in model.prompt_encoder.parameters(): assert param.grad is not None def _test_delete_adapter(self, model_id, config_cls, config_kwargs): supported_peft_types = [ PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT, PeftType.BOFT, PeftType.VERA, PeftType.FOURIERFT, PeftType.HRA, ] # IA3 does not support deleting adapters yet, but it just needs to be added # AdaLora does not support multiple adapters config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return pytest.skip(f"Test not applicable for {config.peft_type}") model = self.transformers_class.from_pretrained(model_id) adapter_to_delete = "delete_me" model = get_peft_model(model, config) model.add_adapter(adapter_to_delete, config) model.set_adapter(adapter_to_delete) model = model.to(self.torch_device) model.delete_adapter(adapter_to_delete) assert adapter_to_delete not in model.peft_config assert model.active_adapters == ["default"] key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) attributes_to_check = getattr(target, "adapter_layer_names", []) + getattr(target, "other_param_names", []) for attr in attributes_to_check: assert adapter_to_delete not in getattr(target, attr) # check that we can also delete the last remaining adapter model.delete_adapter("default") assert "default" not in model.peft_config assert model.active_adapters == [] input = self.prepare_inputs_for_testing() # note: we cannot call model(**input) because PeftModel always expects there to be at least one adapter model.base_model(**input) # should not raise an error def _test_delete_inactive_adapter(self, model_id, config_cls, config_kwargs): # same as test_delete_adapter, but this time an inactive adapter is deleted supported_peft_types = [ PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT, PeftType.BOFT, PeftType.FOURIERFT, PeftType.HRA, ] # IA3 does not support deleting adapters yet, but it just needs to be added # AdaLora does not support multiple adapters config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return pytest.skip(f"Test not applicable for {config.peft_type}") model = self.transformers_class.from_pretrained(model_id) adapter_to_delete = "delete_me" model = get_peft_model(model, config) model.add_adapter(adapter_to_delete, config) # "delete_me" is added but not activated model = model.to(self.torch_device) model.delete_adapter(adapter_to_delete) assert adapter_to_delete not in model.peft_config assert model.active_adapters == ["default"] key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) attributes_to_check = getattr(target, "adapter_layer_names", []) + getattr(target, "other_param_names", []) for attr in attributes_to_check: assert adapter_to_delete not in getattr(target, attr) # check that we can also delete the last remaining adapter model.delete_adapter("default") assert "default" not in model.peft_config assert model.active_adapters == [] input = self.prepare_inputs_for_testing() # note: we cannot call model(**input) because PeftModel always expects there to be at least one adapter model.base_model(**input) # should not raise an error def _test_unload_adapter(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) if config.peft_type not in ("LORA", "ADALORA", "IA3", "BOFT", "VERA", "FOURIERFT", "HRA"): with pytest.raises(AttributeError): model = model.unload() else: dummy_input = self.prepare_inputs_for_testing() logits_with_adapter = model(**dummy_input)[0] transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) logits_transformers = transformers_model(**dummy_input)[0] model.eval() model = model.unload() logits_unload = model(**dummy_input)[0] assert not torch.allclose(logits_with_adapter, logits_unload, atol=1e-10, rtol=1e-10) assert torch.allclose(logits_transformers, logits_unload, atol=1e-4, rtol=1e-4) def _test_weighted_combination_of_adapters_lora(self, model, config, adapter_list, weight_list): model.add_adapter(adapter_list[1], config) model.add_adapter(adapter_list[2], replace(config, r=20)) model = model.to(self.torch_device) # test re-weighting single adapter model.add_weighted_adapter([adapter_list[0]], [weight_list[0]], "single_adapter_reweighting") # test svd re-weighting with multiple adapters model.add_weighted_adapter(adapter_list[1:], weight_list[1:], "multi_adapter_svd_reweighting") # test ties_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_ties_svd_reweighting", combination_type="ties_svd", density=0.5, ) # test dare_linear_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_linear_svd_reweighting", combination_type="dare_linear_svd", density=0.5, ) # test dare_ties_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_ties_svd_reweighting", combination_type="dare_ties_svd", density=0.5, ) # test magnitude_prune_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_magnitude_prune_svd_reweighting", combination_type="magnitude_prune_svd", density=0.5, ) # test cat re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_cat_reweighting", combination_type="cat" ) # test linear re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_linear_reweighting", combination_type="linear" ) # test ties re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_ties_reweighting", combination_type="ties", density=0.5 ) # test dare_linear re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_dare_linear_reweighting", combination_type="dare_linear", density=0.5, ) # test dare_ties re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_dare_ties_reweighting", combination_type="dare_ties", density=0.5, ) # test magnitude_prune re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_magnitude_prune_reweighting", combination_type="magnitude_prune", density=0.5, ) # test linear re-weighting with multiple adapters with only first adapter having non zero weight model.add_weighted_adapter( adapter_list[:2], [weight_list[0], 0], "multi_adapter_linear_reweighting_single_enabled", combination_type="linear", ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_linear_reweighting_uneven_r", combination_type="linear", ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_ties_reweighting_uneven_r", combination_type="ties", density=0.5, ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_linear_reweighting_uneven_r", combination_type="dare_linear", density=0.5, ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_ties_reweighting_uneven_r", combination_type="dare_ties", density=0.5, ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_magnitude_prune_reweighting_uneven_r", combination_type="magnitude_prune", density=0.5, ) new_adapters = [ "single_adapter_reweighting", "multi_adapter_svd_reweighting", "multi_adapter_ties_svd_reweighting", "multi_adapter_dare_linear_svd_reweighting", "multi_adapter_dare_ties_svd_reweighting", "multi_adapter_magnitude_prune_svd_reweighting", "multi_adapter_cat_reweighting", "multi_adapter_linear_reweighting", "multi_adapter_linear_reweighting_single_enabled", "multi_adapter_ties_reweighting", "multi_adapter_dare_linear_reweighting", "multi_adapter_dare_ties_reweighting", "multi_adapter_magnitude_prune_reweighting", ] for new_adapter in new_adapters: assert new_adapter in model.peft_config key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) if isinstance(target, LoraLayer): for adapter_name in new_adapters: if "single" in adapter_name: new_delta_weight = target.get_delta_weight(adapter_name) weighted_original_delta_weights = target.get_delta_weight(adapter_list[0]) * weight_list[0] assert torch.allclose(new_delta_weight, weighted_original_delta_weights, atol=1e-4, rtol=1e-4) elif "svd" in adapter_name: assert target.r[adapter_name] == 20 elif "linear" in adapter_name: assert target.r[adapter_name] == 8 elif "cat" in adapter_name: assert target.r[adapter_name] == 28 dummy_input = self.prepare_inputs_for_testing() model.eval() for adapter_name in new_adapters: # ensuring new adapters pass the forward loop model.set_adapter(adapter_name) assert model.active_adapter == adapter_name assert model.active_adapters == [adapter_name] model(**dummy_input)[0] def _test_weighted_combination_of_adapters_ia3(self, model, config, adapter_list, weight_list): model.add_adapter(adapter_list[1], config) model.add_adapter(adapter_list[2], config) model = model.to(self.torch_device) # test re-weighting single adapter model.add_weighted_adapter([adapter_list[0]], [weight_list[0]], "single_adapter_reweighting") # test re-weighting with multiple adapters model.add_weighted_adapter(adapter_list[1:], weight_list[1:], "multi_adapter_reweighting") new_adapters = [ "single_adapter_reweighting", "multi_adapter_reweighting", ] for new_adapter in new_adapters: assert new_adapter in model.peft_config dummy_input = self.prepare_inputs_for_testing() model.eval() for adapter_name in new_adapters: # ensuring new adapters pass the forward loop model.set_adapter(adapter_name) assert model.active_adapter == adapter_name assert model.active_adapters == [adapter_name] model(**dummy_input)[0] def _test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return pytest.skip(f"Test not applicable for {config_cls}") if model_id.endswith("qwen2"): # Qwen2 fails with weighted adapter combinations using SVD return pytest.skip(f"Test does not work with model {model_id}") adapter_list = ["adapter1", "adapter_2", "adapter_3"] weight_list = [0.5, 1.5, 1.5] # Initialize the config config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if not isinstance(config, (LoraConfig, IA3Config)): # This test is only applicable for Lora and IA3 configs return pytest.skip(f"Test not applicable for {config}") model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, adapter_list[0]) if isinstance(config, LoraConfig): self._test_weighted_combination_of_adapters_lora(model, config, adapter_list, weight_list) elif isinstance(config, IA3Config): self._test_weighted_combination_of_adapters_ia3(model, config, adapter_list, weight_list) else: pytest.skip(f"Test not applicable for {config}") def _test_disable_adapter(self, model_id, config_cls, config_kwargs): task_type = config_kwargs.get("task_type") if (task_type == "SEQ_2_SEQ_LM") and (config_cls in (PromptTuningConfig, PromptEncoderConfig)): self.skipTest("Seq2Seq + prompt tuning/prompt encoder does not work with disabling adapters") def get_output(model): # helper function that works with different model types torch.manual_seed(0) if hasattr(model, "generate"): # let's check the scores, not the output ids, since the latter can easily be identical even if the # weights are slightly changed output = model.generate(**input, return_dict_in_generate=True, output_scores=True).scores[0] # take element 0, as output is a tuple else: output = model(**input) if hasattr(output, "images"): # for SD import numpy as np img = output.images[0] return torch.from_numpy(np.array(img)) return output # initialize model model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) # output from BASE MODEL input = self.prepare_inputs_for_testing() output_before = get_output(model) # output from PEFT MODEL if hasattr(self, "instantiate_sd_peft"): # SD models are instantiated differently peft_model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) else: config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) peft_model = get_peft_model(model, config) output_peft = get_output(peft_model) # first check trivial case is not true that peft does not affect the output; for this to work, init_lora_weight # must be False if isinstance(peft_model, StableDiffusionPipeline): # for SD, check that most pixels have different values assert (output_before != output_peft).float().mean() > 0.8 else: assert not torch.allclose(output_before, output_peft) # output with DISABLED ADAPTER if isinstance(peft_model, StableDiffusionPipeline): with peft_model.unet.disable_adapter(): with peft_model.text_encoder.disable_adapter(): output_peft_disabled = get_output(peft_model) # for SD, very rarely, a pixel can differ assert (output_before != output_peft_disabled).float().mean() < 1e-4 else: with peft_model.disable_adapter(): output_peft_disabled = get_output(peft_model) assert torch.allclose(output_before, output_peft_disabled, atol=1e-6, rtol=1e-6) # after leaving the disable_adapter context, the output should be the same as with enabled adapter again # see #1501 output_peft_after_disabled = get_output(peft_model) assert torch.allclose(output_peft, output_peft_after_disabled, atol=1e-6, rtol=1e-6) # TODO: add tests to check if disabling adapters works after calling merge_adapter def _test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls, config_kwargs): # When trying to add multiple adapters with bias in Lora, AdaLora or BOFTConfig, an error should be # raised. Also, the peft model should not be left in a half-initialized state. if not issubclass(config_cls, (LoraConfig, AdaLoraConfig, BOFTConfig)): return pytest.skip(f"Test not applicable for {config_cls}") config_kwargs = config_kwargs.copy() config_kwargs["bias"] = "all" config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, "adapter0") if config_cls == LoraConfig or config_cls == AdaLoraConfig: with pytest.raises(ValueError): model.add_adapter("adapter1", replace(config, r=20)) if config_cls == BOFTConfig: with pytest.raises(ValueError): model.add_adapter("adapter1", replace(config, boft_block_num=1, boft_block_size=0)) # (superficial) test that the model is not left in a half-initialized state when adding an adapter fails assert "adapter1" not in model.peft_config assert "adapter1" not in model.base_model.peft_config def _test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs): # https://github.com/huggingface/peft/issues/727 model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config, adapter_name="test-adapter").to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() inputs_embeds = model.get_input_embeddings()(dummy_input["input_ids"]) # just check that no error is raised model.forward(inputs_embeds=inputs_embeds)
peft/tests/testing_common.py/0
{ "file_path": "peft/tests/testing_common.py", "repo_id": "peft", "token_count": 29970 }
212
import argparse import hashlib import os import mxnet as mx import gluoncv import torch from timm import create_model parser = argparse.ArgumentParser(description='Convert from MXNet') parser.add_argument('--model', default='all', type=str, metavar='MODEL', help='Name of model to train (default: "all"') def convert(mxnet_name, torch_name): # download and load the pre-trained model net = gluoncv.model_zoo.get_model(mxnet_name, pretrained=True) # create corresponding torch model torch_net = create_model(torch_name) mxp = [(k, v) for k, v in net.collect_params().items() if 'running' not in k] torchp = list(torch_net.named_parameters()) torch_params = {} # convert parameters # NOTE: we are relying on the fact that the order of parameters # are usually exactly the same between these models, thus no key name mapping # is necessary. Asserts will trip if this is not the case. for (tn, tv), (mn, mv) in zip(torchp, mxp): m_split = mn.split('_') t_split = tn.split('.') print(t_split, m_split) print(tv.shape, mv.shape) # ensure ordering of BN params match since their sizes are not specific if m_split[-1] == 'gamma': assert t_split[-1] == 'weight' if m_split[-1] == 'beta': assert t_split[-1] == 'bias' # ensure shapes match assert all(t == m for t, m in zip(tv.shape, mv.shape)) torch_tensor = torch.from_numpy(mv.data().asnumpy()) torch_params[tn] = torch_tensor # convert buffers (batch norm running stats) mxb = [(k, v) for k, v in net.collect_params().items() if any(x in k for x in ['running_mean', 'running_var'])] torchb = [(k, v) for k, v in torch_net.named_buffers() if 'num_batches' not in k] for (tn, tv), (mn, mv) in zip(torchb, mxb): print(tn, mn) print(tv.shape, mv.shape) # ensure ordering of BN params match since their sizes are not specific if 'running_var' in tn: assert 'running_var' in mn if 'running_mean' in tn: assert 'running_mean' in mn torch_tensor = torch.from_numpy(mv.data().asnumpy()) torch_params[tn] = torch_tensor torch_net.load_state_dict(torch_params) torch_filename = './%s.pth' % torch_name torch.save(torch_net.state_dict(), torch_filename) with open(torch_filename, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() final_filename = os.path.splitext(torch_filename)[0] + '-' + sha_hash[:8] + '.pth' os.rename(torch_filename, final_filename) print("=> Saved converted model to '{}, SHA256: {}'".format(final_filename, sha_hash)) def map_mx_to_torch_model(mx_name): torch_name = mx_name.lower() if torch_name.startswith('se_'): torch_name = torch_name.replace('se_', 'se') elif torch_name.startswith('senet_'): torch_name = torch_name.replace('senet_', 'senet') elif torch_name.startswith('inceptionv3'): torch_name = torch_name.replace('inceptionv3', 'inception_v3') torch_name = 'gluon_' + torch_name return torch_name ALL = ['resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b', 'resnet101_v1b', 'resnet152_v1b', 'resnet50_v1c', 'resnet101_v1c', 'resnet152_v1c', 'resnet50_v1d', 'resnet101_v1d', 'resnet152_v1d', #'resnet50_v1e', 'resnet101_v1e', 'resnet152_v1e', 'resnet50_v1s', 'resnet101_v1s', 'resnet152_v1s', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d', 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnext101_64x4d', 'senet_154', 'inceptionv3'] def main(): args = parser.parse_args() if not args.model or args.model == 'all': for mx_model in ALL: torch_model = map_mx_to_torch_model(mx_model) convert(mx_model, torch_model) else: mx_model = args.model torch_model = map_mx_to_torch_model(mx_model) convert(mx_model, torch_model) if __name__ == '__main__': main()
pytorch-image-models/convert/convert_from_mxnet.py/0
{ "file_path": "pytorch-image-models/convert/convert_from_mxnet.py", "repo_id": "pytorch-image-models", "token_count": 1786 }
213
# HRNet **HRNet**, or **High-Resolution Net**, is a general purpose convolutional neural network for tasks like semantic segmentation, object detection and image classification. It is able to maintain high resolution representations through the whole process. We start from a high-resolution convolution stream, gradually add high-to-low resolution convolution streams one by one, and connect the multi-resolution streams in parallel. The resulting network consists of several (\\( 4 \\) in the paper) stages and the \\( n \\)th stage contains \\( n \\) streams corresponding to \\( n \\) resolutions. The authors conduct repeated multi-resolution fusions by exchanging the information across the parallel streams over and over. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('hrnet_w18', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `hrnet_w18`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('hrnet_w18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{sun2019highresolution, title={High-Resolution Representations for Labeling Pixels and Regions}, author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang}, year={2019}, eprint={1904.04514}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: HRNet Paper: Title: Deep High-Resolution Representation Learning for Visual Recognition URL: https://paperswithcode.com/paper/190807919 Models: - Name: hrnet_w18 In Collection: HRNet Metadata: FLOPs: 5547205500 Parameters: 21300000 File Size: 85718883 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w18 Epochs: 100 Layers: 18 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L800 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w18-8cb57bb9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.76% Top 5 Accuracy: 93.44% - Name: hrnet_w18_small In Collection: HRNet Metadata: FLOPs: 2071651488 Parameters: 13190000 File Size: 52934302 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w18_small Epochs: 100 Layers: 18 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L790 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v1-f460c6bc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.34% Top 5 Accuracy: 90.68% - Name: hrnet_w18_small_v2 In Collection: HRNet Metadata: FLOPs: 3360023160 Parameters: 15600000 File Size: 62682879 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w18_small_v2 Epochs: 100 Layers: 18 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L795 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v2-4c50a8cb.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.11% Top 5 Accuracy: 92.41% - Name: hrnet_w30 In Collection: HRNet Metadata: FLOPs: 10474119492 Parameters: 37710000 File Size: 151452218 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w30 Epochs: 100 Layers: 30 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L805 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w30-8d7f8dab.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.21% Top 5 Accuracy: 94.22% - Name: hrnet_w32 In Collection: HRNet Metadata: FLOPs: 11524528320 Parameters: 41230000 File Size: 165547812 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs Training Time: 60 hours ID: hrnet_w32 Epochs: 100 Layers: 32 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L810 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w32-90d8c5fb.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.45% Top 5 Accuracy: 94.19% - Name: hrnet_w40 In Collection: HRNet Metadata: FLOPs: 16381182192 Parameters: 57560000 File Size: 230899236 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w40 Epochs: 100 Layers: 40 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L815 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w40-7cd397a4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.93% Top 5 Accuracy: 94.48% - Name: hrnet_w44 In Collection: HRNet Metadata: FLOPs: 19202520264 Parameters: 67060000 File Size: 268957432 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w44 Epochs: 100 Layers: 44 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L820 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w44-c9ac8c18.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.89% Top 5 Accuracy: 94.37% - Name: hrnet_w48 In Collection: HRNet Metadata: FLOPs: 22285865760 Parameters: 77470000 File Size: 310603710 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs Training Time: 80 hours ID: hrnet_w48 Epochs: 100 Layers: 48 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L825 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w48-abd2e6ab.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.32% Top 5 Accuracy: 94.51% - Name: hrnet_w64 In Collection: HRNet Metadata: FLOPs: 37239321984 Parameters: 128060000 File Size: 513071818 Architecture: - Batch Normalization - Convolution - ReLU - Residual Connection Tasks: - Image Classification Training Techniques: - Nesterov Accelerated Gradient - Weight Decay Training Data: - ImageNet Training Resources: 4x NVIDIA V100 GPUs ID: hrnet_w64 Epochs: 100 Layers: 64 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/hrnet.py#L830 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w64-b47cc881.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.46% Top 5 Accuracy: 94.65% -->
pytorch-image-models/hfdocs/source/models/hrnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/hrnet.mdx", "repo_id": "pytorch-image-models", "token_count": 5056 }
214
# RegNetY **RegNetY** is a convolutional network design space with simple, regular models with parameters: depth \\( d \\), initial width \\( w\_{0} > 0 \\), and slope \\( w\_{a} > 0 \\), and generates a different block width \\( u\_{j} \\) for each block \\( j < d \\). The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): \\( \\) u\_{j} = w\_{0} + w\_{a}\cdot{j} \\( \\) For **RegNetX** authors have additional restrictions: we set \\( b = 1 \\) (the bottleneck ratio), \\( 12 \leq d \leq 28 \\), and \\( w\_{m} \geq 2 \\) (the width multiplier). For **RegNetY** authors make one change, which is to include [Squeeze-and-Excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('regnety_002', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `regnety_002`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('regnety_002', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{radosavovic2020designing, title={Designing Network Design Spaces}, author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, year={2020}, eprint={2003.13678}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: RegNetY Paper: Title: Designing Network Design Spaces URL: https://paperswithcode.com/paper/designing-network-design-spaces Models: - Name: regnety_002 In Collection: RegNetY Metadata: FLOPs: 255754236 Parameters: 3160000 File Size: 12782926 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_002 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L409 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_002-e68ca334.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 70.28% Top 5 Accuracy: 89.55% - Name: regnety_004 In Collection: RegNetY Metadata: FLOPs: 515664568 Parameters: 4340000 File Size: 17542753 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_004 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L415 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_004-0db870e6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.02% Top 5 Accuracy: 91.76% - Name: regnety_006 In Collection: RegNetY Metadata: FLOPs: 771746928 Parameters: 6060000 File Size: 24394127 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_006 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L421 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_006-c67e57ec.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.27% Top 5 Accuracy: 92.53% - Name: regnety_008 In Collection: RegNetY Metadata: FLOPs: 1023448952 Parameters: 6260000 File Size: 25223268 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_008 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L427 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_008-dc900dbe.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.32% Top 5 Accuracy: 93.07% - Name: regnety_016 In Collection: RegNetY Metadata: FLOPs: 2070895094 Parameters: 11200000 File Size: 45115589 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_016 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L433 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_016-54367f74.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.87% Top 5 Accuracy: 93.73% - Name: regnety_032 In Collection: RegNetY Metadata: FLOPs: 4081118714 Parameters: 19440000 File Size: 78084523 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_032 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L439 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.01% Top 5 Accuracy: 95.91% - Name: regnety_040 In Collection: RegNetY Metadata: FLOPs: 5105933432 Parameters: 20650000 File Size: 82913909 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_040 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L445 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_040-f0d569f9.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.23% Top 5 Accuracy: 94.64% - Name: regnety_064 In Collection: RegNetY Metadata: FLOPs: 8167730444 Parameters: 30580000 File Size: 122751416 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_064 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L451 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_064-0a48325c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.73% Top 5 Accuracy: 94.76% - Name: regnety_080 In Collection: RegNetY Metadata: FLOPs: 10233621420 Parameters: 39180000 File Size: 157124671 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_080 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L457 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_080-e7f3eb93.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.87% Top 5 Accuracy: 94.83% - Name: regnety_120 In Collection: RegNetY Metadata: FLOPs: 15542094856 Parameters: 51820000 File Size: 207743949 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_120 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L463 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_120-721ba79a.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.38% Top 5 Accuracy: 95.12% - Name: regnety_160 In Collection: RegNetY Metadata: FLOPs: 20450196852 Parameters: 83590000 File Size: 334916722 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_160 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 512 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L469 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_160-d64013cd.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.28% Top 5 Accuracy: 94.97% - Name: regnety_320 In Collection: RegNetY Metadata: FLOPs: 41492618394 Parameters: 145050000 File Size: 580891965 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - ReLU - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA V100 GPUs ID: regnety_320 Epochs: 100 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 5.0e-05 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/regnet.py#L475 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_320-ba464b29.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.8% Top 5 Accuracy: 95.25% -->
pytorch-image-models/hfdocs/source/models/regnety.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/regnety.mdx", "repo_id": "pytorch-image-models", "token_count": 6770 }
215
# SWSL ResNeXt A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) \\( C \\), as an essential factor in addition to the dimensions of depth and width. The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('swsl_resnext101_32x16d', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `swsl_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('swsl_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-00546, author = {I. Zeki Yalniz and Herv{\'{e}} J{\'{e}}gou and Kan Chen and Manohar Paluri and Dhruv Mahajan}, title = {Billion-scale semi-supervised learning for image classification}, journal = {CoRR}, volume = {abs/1905.00546}, year = {2019}, url = {http://arxiv.org/abs/1905.00546}, archivePrefix = {arXiv}, eprint = {1905.00546}, timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: SWSL ResNext Paper: Title: Billion-scale semi-supervised learning for image classification URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for Models: - Name: swsl_resnext101_32x16d In Collection: SWSL ResNext Metadata: FLOPs: 46623691776 Parameters: 194030000 File Size: 777518664 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x16d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L1009 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.34% Top 5 Accuracy: 96.84% - Name: swsl_resnext101_32x4d In Collection: SWSL ResNext Metadata: FLOPs: 10298145792 Parameters: 44180000 File Size: 177341913 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x4d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L987 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.22% Top 5 Accuracy: 96.77% - Name: swsl_resnext101_32x8d In Collection: SWSL ResNext Metadata: FLOPs: 21180417024 Parameters: 88790000 File Size: 356056638 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext101_32x8d LR: 0.0015 Epochs: 30 Layers: 101 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L998 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.27% Top 5 Accuracy: 97.17% - Name: swsl_resnext50_32x4d In Collection: SWSL ResNext Metadata: FLOPs: 5472648192 Parameters: 25030000 File Size: 100428550 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Grouped Convolution - Max Pooling - ReLU - ResNeXt Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - IG-1B-Targeted - ImageNet Training Resources: 64x GPUs ID: swsl_resnext50_32x4d LR: 0.0015 Epochs: 30 Layers: 50 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L976 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.17% Top 5 Accuracy: 96.23% -->
pytorch-image-models/hfdocs/source/models/swsl-resnext.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/swsl-resnext.mdx", "repo_id": "pytorch-image-models", "token_count": 3472 }
216
# Scripts A train, validation, inference, and checkpoint cleaning script included in the github root folder. Scripts are not currently packaged in the pip release. The training and validation scripts evolved from early versions of the [PyTorch Imagenet Examples](https://github.com/pytorch/examples). I have added significant functionality over time, including CUDA specific performance enhancements based on [NVIDIA's APEX Examples](https://github.com/NVIDIA/apex/tree/master/examples). ## Training Script The variety of training args is large and not all combinations of options (or even options) have been fully tested. For the training dataset folder, specify the folder to the base that contains a `train` and `validation` folder. To train an SE-ResNet34 on ImageNet, locally distributed, 4 GPUs, one process per GPU w/ cosine schedule, random-erasing prob of 50% and per-pixel random value: ```bash ./distributed_train.sh 4 /data/imagenet --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4 ``` <Tip> It is recommended to use PyTorch 1.9+ w/ PyTorch native AMP and DDP instead of APEX AMP. --amp defaults to native AMP as of timm ver 0.4.3. --apex-amp will force use of APEX components if they are installed. </Tip> ## Validation / Inference Scripts Validation and inference scripts are similar in usage. One outputs metrics on a validation set and the other outputs topk class ids in a csv. Specify the folder containing validation images, not the base as in training script. To validate with the model's pretrained weights (if they exist): ```bash python validate.py /imagenet/validation/ --model seresnext26_32x4d --pretrained ``` To run inference from a checkpoint: ```bash python inference.py /imagenet/validation/ --model mobilenetv3_large_100 --checkpoint ./output/train/model_best.pth.tar ``` ## Training Examples ### EfficientNet-B2 with RandAugment - 80.4 top-1, 95.1 top-5 These params are for dual Titan RTX cards with NVIDIA Apex installed: ```bash ./distributed_train.sh 2 /imagenet/ --model efficientnet_b2 -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .016 ``` ### MixNet-XL with RandAugment - 80.5 top-1, 94.9 top-5 This params are for dual Titan RTX cards with NVIDIA Apex installed: ```bash ./distributed_train.sh 2 /imagenet/ --model mixnet_xl -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .969 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.3 --amp --lr .016 --dist-bn reduce ``` ### SE-ResNeXt-26-D and SE-ResNeXt-26-T These hparams (or similar) work well for a wide range of ResNet architecture, generally a good idea to increase the epoch # as the model size increases... ie approx 180-200 for ResNe(X)t50, and 220+ for larger. Increase batch size and LR proportionally for better GPUs or with AMP enabled. These params were for 2 1080Ti cards: ```bash ./distributed_train.sh 2 /imagenet/ --model seresnext26t_32x4d --lr 0.1 --warmup-epochs 5 --epochs 160 --weight-decay 1e-4 --sched cosine --reprob 0.4 --remode pixel -b 112 ``` ### EfficientNet-B3 with RandAugment - 81.5 top-1, 95.7 top-5 The training of this model started with the same command line as EfficientNet-B2 w/ RA above. After almost three weeks of training the process crashed. The results weren't looking amazing so I resumed the training several times with tweaks to a few params (increase RE prob, decrease rand-aug, increase ema-decay). Nothing looked great. I ended up averaging the best checkpoints from all restarts. The result is mediocre at default res/crop but oddly performs much better with a full image test crop of 1.0. ### EfficientNet-B0 with RandAugment - 77.7 top-1, 95.3 top-5 [Michael Klachko](https://github.com/michaelklachko) achieved these results with the command line for B2 adapted for larger batch size, with the recommended B0 dropout rate of 0.2. ```bash ./distributed_train.sh 2 /imagenet/ --model efficientnet_b0 -b 384 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .048 ``` ### ResNet50 with JSD loss and RandAugment (clean + 2x RA augs) - 79.04 top-1, 94.39 top-5 Trained on two older 1080Ti cards, this took a while. Only slightly, non statistically better ImageNet validation result than my first good AugMix training of 78.99. However, these weights are more robust on tests with ImageNetV2, ImageNet-Sketch, etc. Unlike my first AugMix runs, I've enabled SplitBatchNorm, disabled random erasing on the clean split, and cranked up random erasing prob on the 2 augmented paths. ```bash ./distributed_train.sh 2 /imagenet -b 64 --model resnet50 --sched cosine --epochs 200 --lr 0.05 --amp --remode pixel --reprob 0.6 --aug-splits 3 --aa rand-m9-mstd0.5-inc1 --resplit --split-bn --jsd --dist-bn reduce ``` ### EfficientNet-ES (EdgeTPU-Small) with RandAugment - 78.066 top-1, 93.926 top-5 Trained by [Andrew Lavin](https://github.com/andravin) with 8 V100 cards. Model EMA was not used, final checkpoint is the average of 8 best checkpoints during training. ```bash ./distributed_train.sh 8 /imagenet --model efficientnet_es -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064 ``` ### MobileNetV3-Large-100 - 75.766 top-1, 92,542 top-5 ```bash ./distributed_train.sh 2 /imagenet/ --model mobilenetv3_large_100 -b 512 --sched step --epochs 600 --decay-epochs 2.4 --decay-rate .973 --opt rmsproptf --opt-eps .001 -j 7 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-path 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064 --lr-noise 0.42 0.9 ``` ### ResNeXt-50 32x4d w/ RandAugment - 79.762 top-1, 94.60 top-5 These params will also work well for SE-ResNeXt-50 and SK-ResNeXt-50 and likely 101. I used them for the SK-ResNeXt-50 32x4d that I trained with 2 GPU using a slightly higher LR per effective batch size (lr=0.18, b=192 per GPU). The cmd line below are tuned for 8 GPU training. ```bash ./distributed_train.sh 8 /imagenet --model resnext50_32x4d --lr 0.6 --warmup-epochs 5 --epochs 240 --weight-decay 1e-4 --sched cosine --reprob 0.4 --recount 3 --remode pixel --aa rand-m7-mstd0.5-inc1 -b 192 -j 6 --amp --dist-bn reduce ```
pytorch-image-models/hfdocs/source/training_script.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/training_script.mdx", "repo_id": "pytorch-image-models", "token_count": 2320 }
217
""" Dataset Factory Hacked together by / Copyright 2021, Ross Wightman """ import os from typing import Optional from torchvision.datasets import CIFAR100, CIFAR10, MNIST, KMNIST, FashionMNIST, ImageFolder try: from torchvision.datasets import Places365 has_places365 = True except ImportError: has_places365 = False try: from torchvision.datasets import INaturalist has_inaturalist = True except ImportError: has_inaturalist = False try: from torchvision.datasets import QMNIST has_qmnist = True except ImportError: has_qmnist = False try: from torchvision.datasets import ImageNet has_imagenet = True except ImportError: has_imagenet = False from .dataset import IterableImageDataset, ImageDataset _TORCH_BASIC_DS = dict( cifar10=CIFAR10, cifar100=CIFAR100, mnist=MNIST, kmnist=KMNIST, fashion_mnist=FashionMNIST, ) _TRAIN_SYNONYM = dict(train=None, training=None) _EVAL_SYNONYM = dict(val=None, valid=None, validation=None, eval=None, evaluation=None) def _search_split(root, split): # look for sub-folder with name of split in root and use that if it exists split_name = split.split('[')[0] try_root = os.path.join(root, split_name) if os.path.exists(try_root): return try_root def _try(syn): for s in syn: try_root = os.path.join(root, s) if os.path.exists(try_root): return try_root return root if split_name in _TRAIN_SYNONYM: root = _try(_TRAIN_SYNONYM) elif split_name in _EVAL_SYNONYM: root = _try(_EVAL_SYNONYM) return root def create_dataset( name: str, root: Optional[str] = None, split: str = 'validation', search_split: bool = True, class_map: dict = None, load_bytes: bool = False, is_training: bool = False, download: bool = False, batch_size: int = 1, num_samples: Optional[int] = None, seed: int = 42, repeats: int = 0, input_img_mode: str = 'RGB', **kwargs, ): """ Dataset factory method In parentheses after each arg are the type of dataset supported for each arg, one of: * folder - default, timm folder (or tar) based ImageDataset * torch - torchvision based datasets * HFDS - Hugging Face Datasets * TFDS - Tensorflow-datasets wrapper in IterabeDataset interface via IterableImageDataset * WDS - Webdataset * all - any of the above Args: name: dataset name, empty is okay for folder based datasets root: root folder of dataset (all) split: dataset split (all) search_split: search for split specific child fold from root so one can specify `imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (folder, torch/folder) class_map: specify class -> index mapping via text file or dict (folder) load_bytes: load data, return images as undecoded bytes (folder) download: download dataset if not present and supported (HFDS, TFDS, torch) is_training: create dataset in train mode, this is different from the split. For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS, WDS) batch_size: batch size hint for (TFDS, WDS) seed: seed for iterable datasets (TFDS, WDS) repeats: dataset repeats per iteration i.e. epoch (TFDS, WDS) input_img_mode: Input image color conversion mode e.g. 'RGB', 'L' (folder, TFDS, WDS, HFDS) **kwargs: other args to pass to dataset Returns: Dataset object """ kwargs = {k: v for k, v in kwargs.items() if v is not None} name = name.lower() if name.startswith('torch/'): name = name.split('/', 2)[-1] torch_kwargs = dict(root=root, download=download, **kwargs) if name in _TORCH_BASIC_DS: ds_class = _TORCH_BASIC_DS[name] use_train = split in _TRAIN_SYNONYM ds = ds_class(train=use_train, **torch_kwargs) elif name == 'inaturalist' or name == 'inat': assert has_inaturalist, 'Please update to PyTorch 1.10, torchvision 0.11+ for Inaturalist' target_type = 'full' split_split = split.split('/') if len(split_split) > 1: target_type = split_split[0].split('_') if len(target_type) == 1: target_type = target_type[0] split = split_split[-1] if split in _TRAIN_SYNONYM: split = '2021_train' elif split in _EVAL_SYNONYM: split = '2021_valid' ds = INaturalist(version=split, target_type=target_type, **torch_kwargs) elif name == 'places365': assert has_places365, 'Please update to a newer PyTorch and torchvision for Places365 dataset.' if split in _TRAIN_SYNONYM: split = 'train-standard' elif split in _EVAL_SYNONYM: split = 'val' ds = Places365(split=split, **torch_kwargs) elif name == 'qmnist': assert has_qmnist, 'Please update to a newer PyTorch and torchvision for QMNIST dataset.' use_train = split in _TRAIN_SYNONYM ds = QMNIST(train=use_train, **torch_kwargs) elif name == 'imagenet': assert has_imagenet, 'Please update to a newer PyTorch and torchvision for ImageNet dataset.' if split in _EVAL_SYNONYM: split = 'val' ds = ImageNet(split=split, **torch_kwargs) elif name == 'image_folder' or name == 'folder': # in case torchvision ImageFolder is preferred over timm ImageDataset for some reason if search_split and os.path.isdir(root): # look for split specific sub-folder in root root = _search_split(root, split) ds = ImageFolder(root, **kwargs) else: assert False, f"Unknown torchvision dataset {name}" elif name.startswith('hfds/'): # NOTE right now, HF datasets default arrow format is a random-access Dataset, # There will be a IterableDataset variant too, TBD ds = ImageDataset( root, reader=name, split=split, class_map=class_map, input_img_mode=input_img_mode, **kwargs, ) elif name.startswith('hfids/'): ds = IterableImageDataset( root, reader=name, split=split, class_map=class_map, is_training=is_training, download=download, batch_size=batch_size, num_samples=num_samples, repeats=repeats, seed=seed, input_img_mode=input_img_mode, **kwargs ) elif name.startswith('tfds/'): ds = IterableImageDataset( root, reader=name, split=split, class_map=class_map, is_training=is_training, download=download, batch_size=batch_size, num_samples=num_samples, repeats=repeats, seed=seed, input_img_mode=input_img_mode, **kwargs ) elif name.startswith('wds/'): ds = IterableImageDataset( root, reader=name, split=split, class_map=class_map, is_training=is_training, batch_size=batch_size, num_samples=num_samples, repeats=repeats, seed=seed, input_img_mode=input_img_mode, **kwargs ) else: # FIXME support more advance split cfg for ImageFolder/Tar datasets in the future if search_split and os.path.isdir(root): # look for split specific sub-folder in root root = _search_split(root, split) ds = ImageDataset( root, reader=name, class_map=class_map, load_bytes=load_bytes, input_img_mode=input_img_mode, **kwargs, ) return ds
pytorch-image-models/timm/data/dataset_factory.py/0
{ "file_path": "pytorch-image-models/timm/data/dataset_factory.py", "repo_id": "pytorch-image-models", "token_count": 3864 }
218
""" A dataset reader that reads single tarfile based datasets This reader can read datasets consisting if a single tarfile containing images. I am planning to deprecated it in favour of ParerImageInTar. Hacked together by / Copyright 2020 Ross Wightman """ import os import tarfile from timm.utils.misc import natural_key from .class_map import load_class_map from .img_extensions import get_img_extensions from .reader import Reader def extract_tarinfo(tarfile, class_to_idx=None, sort=True): extensions = get_img_extensions(as_set=True) files = [] labels = [] for ti in tarfile.getmembers(): if not ti.isfile(): continue dirname, basename = os.path.split(ti.path) label = os.path.basename(dirname) ext = os.path.splitext(basename)[1] if ext.lower() in extensions: files.append(ti) labels.append(label) if class_to_idx is None: unique_labels = set(labels) sorted_labels = list(sorted(unique_labels, key=natural_key)) class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx] if sort: tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) return tarinfo_and_targets, class_to_idx class ReaderImageTar(Reader): """ Single tarfile dataset where classes are mapped to folders within tar NOTE: This class is being deprecated in favour of the more capable ReaderImageInTar that can operate on folders of tars or tars in tars. """ def __init__(self, root, class_map=''): super().__init__() class_to_idx = None if class_map: class_to_idx = load_class_map(class_map, root) assert os.path.isfile(root) self.root = root with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later self.samples, self.class_to_idx = extract_tarinfo(tf, class_to_idx) self.imgs = self.samples self.tarfile = None # lazy init in __getitem__ def __getitem__(self, index): if self.tarfile is None: self.tarfile = tarfile.open(self.root) tarinfo, target = self.samples[index] fileobj = self.tarfile.extractfile(tarinfo) return fileobj, target def __len__(self): return len(self.samples) def _filename(self, index, basename=False, absolute=False): filename = self.samples[index][0].name if basename: filename = os.path.basename(filename) return filename
pytorch-image-models/timm/data/readers/reader_image_tar.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_image_tar.py", "repo_id": "pytorch-image-models", "token_count": 1071 }
219
""" Bottleneck Self Attention (Bottleneck Transformers) Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 @misc{2101.11605, Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, Title = {Bottleneck Transformers for Visual Recognition}, Year = {2021}, } Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 This impl is a WIP but given that it is based on the ref gist likely not too far off. Hacked together by / Copyright 2021 Ross Wightman """ from typing import List import torch import torch.nn as nn import torch.nn.functional as F from .helpers import to_2tuple, make_divisible from .weight_init import trunc_normal_ from .trace_utils import _assert def rel_logits_1d(q, rel_k, permute_mask: List[int]): """ Compute relative logits along one dimension As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 Args: q: (batch, heads, height, width, dim) rel_k: (2 * width - 1, dim) permute_mask: permute output dim according to this """ B, H, W, dim = q.shape x = (q @ rel_k.transpose(-1, -2)) x = x.reshape(-1, W, 2 * W -1) # pad to shift from relative to absolute indexing x_pad = F.pad(x, [0, 1]).flatten(1) x_pad = F.pad(x_pad, [0, W - 1]) # reshape and slice out the padded elements x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) x = x_pad[:, :W, W - 1:] # reshape and tile x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) return x.permute(permute_mask) class PosEmbedRel(nn.Module): """ Relative Position Embedding As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 """ def __init__(self, feat_size, dim_head, scale): super().__init__() self.height, self.width = to_2tuple(feat_size) self.dim_head = dim_head self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) def forward(self, q): B, HW, _ = q.shape # relative logits in width dimension. q = q.reshape(B, self.height, self.width, -1) rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) # relative logits in height dimension. q = q.transpose(1, 2) rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) rel_logits = rel_logits_h + rel_logits_w rel_logits = rel_logits.reshape(B, HW, HW) return rel_logits class BottleneckAttn(nn.Module): """ Bottleneck Attention Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 The internal dimensions of the attention module are controlled by the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query and key (qk) dimensions are determined by * num_heads * dim_head if dim_head is not None * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set stride (int): output stride of the module, avg pool used if stride == 2 (default: 1). num_heads (int): parallel attention heads (default: 4) dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool): add bias to q, k, and v projections scale_pos_embed (bool): scale the position embedding as well as Q @ K """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): super().__init__() assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' dim_out = dim_out or dim assert dim_out % num_heads == 0 self.num_heads = num_heads self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.dim_head_v = dim_out // self.num_heads self.dim_out_qk = num_heads * self.dim_head_qk self.dim_out_v = num_heads * self.dim_head_v self.scale = self.dim_head_qk ** -0.5 self.scale_pos_embed = scale_pos_embed self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) # NOTE I'm only supporting relative pos embedding for now self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() self.reset_parameters() def reset_parameters(self): trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in trunc_normal_(self.pos_embed.height_rel, std=self.scale) trunc_normal_(self.pos_embed.width_rel, std=self.scale) def forward(self, x): B, C, H, W = x.shape _assert(H == self.pos_embed.height, '') _assert(W == self.pos_embed.width, '') x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted. q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) if self.scale_pos_embed: attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W else: attn = (q @ k) * self.scale + self.pos_embed(q) attn = attn.softmax(dim=-1) out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W out = self.pool(out) return out
pytorch-image-models/timm/layers/bottleneck_attn.py/0
{ "file_path": "pytorch-image-models/timm/layers/bottleneck_attn.py", "repo_id": "pytorch-image-models", "token_count": 2907 }
220
""" Filter Response Norm in PyTorch Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737 Hacked together by / Copyright 2021 Ross Wightman """ import torch import torch.nn as nn from .create_act import create_act_layer from .trace_utils import _assert def inv_instance_rms(x, eps: float = 1e-5): rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype) return rms.expand(x.shape) class FilterResponseNormTlu2d(nn.Module): def __init__(self, num_features, apply_act=True, eps=1e-5, rms=True, **_): super(FilterResponseNormTlu2d, self).__init__() self.apply_act = apply_act # apply activation (non-linearity) self.rms = rms self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.tau is not None: nn.init.zeros_(self.tau) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = x * inv_instance_rms(x, self.eps) x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x class FilterResponseNormAct2d(nn.Module): def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-5, **_): super(FilterResponseNormAct2d, self).__init__() if act_layer is not None and apply_act: self.act = create_act_layer(act_layer, inplace=inplace) else: self.act = nn.Identity() self.rms = rms self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = x * inv_instance_rms(x, self.eps) x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) return self.act(x)
pytorch-image-models/timm/layers/filter_response_norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/filter_response_norm.py", "repo_id": "pytorch-image-models", "token_count": 1182 }
221
from typing import Optional import torch from torch import nn from torch import nn, Tensor from torch.nn.modules.transformer import _get_activation_fn def add_ml_decoder_head(model): if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # most CNN models, like Resnet50 model.global_pool = nn.Identity() del model.fc num_classes = model.num_classes num_features = model.num_features model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif hasattr(model, 'global_pool') and hasattr(model, 'classifier'): # EfficientNet model.global_pool = nn.Identity() del model.classifier num_classes = model.num_classes num_features = model.num_features model.classifier = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif 'RegNet' in model._get_name() or 'TResNet' in model._get_name(): # hasattr(model, 'head') del model.head num_classes = model.num_classes num_features = model.num_features model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features) else: print("Model code-writing is not aligned currently with ml-decoder") exit(-1) if hasattr(model, 'drop_rate'): # Ml-Decoder has inner dropout model.drop_rate = 0 return model class TransformerDecoderLayerOptimal(nn.Module): def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1, activation="relu", layer_norm_eps=1e-5) -> None: super(TransformerDecoderLayerOptimal, self).__init__() self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) # Implementation of Feedforward model self.linear1 = nn.Linear(d_model, dim_feedforward) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = torch.nn.functional.relu super(TransformerDecoderLayerOptimal, self).__setstate__(state) def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor: tgt = tgt + self.dropout1(tgt) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(tgt, memory, memory)[0] tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) return tgt # class ExtrapClasses(object): # def __init__(self, num_queries: int, group_size: int): # self.num_queries = num_queries # self.group_size = group_size # # def __call__(self, h: torch.Tensor, class_embed_w: torch.Tensor, class_embed_b: torch.Tensor, out_extrap: # torch.Tensor): # # h = h.unsqueeze(-1).expand(-1, -1, -1, self.group_size) # h = h[..., None].repeat(1, 1, 1, self.group_size) # torch.Size([bs, 5, 768, groups]) # w = class_embed_w.view((self.num_queries, h.shape[2], self.group_size)) # out = (h * w).sum(dim=2) + class_embed_b # out = out.view((h.shape[0], self.group_size * self.num_queries)) # return out class MLDecoder(nn.Module): def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768, initial_num_features=2048): super(MLDecoder, self).__init__() embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups if embed_len_decoder > num_classes: embed_len_decoder = num_classes self.embed_len_decoder = embed_len_decoder # switching to 768 initial embeddings decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding self.embed_standart = nn.Linear(initial_num_features, decoder_embedding) # decoder decoder_dropout = 0.1 num_layers_decoder = 1 dim_feedforward = 2048 layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding, dim_feedforward=dim_feedforward, dropout=decoder_dropout) self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder) # non-learnable queries self.query_embed = nn.Embedding(embed_len_decoder, decoder_embedding) self.query_embed.requires_grad_(False) # group fully-connected self.num_classes = num_classes self.duplicate_factor = int(num_classes / embed_len_decoder + 0.999) self.duplicate_pooling = torch.nn.Parameter( torch.Tensor(embed_len_decoder, decoder_embedding, self.duplicate_factor)) self.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes)) torch.nn.init.xavier_normal_(self.duplicate_pooling) torch.nn.init.constant_(self.duplicate_pooling_bias, 0) def forward(self, x): if len(x.shape) == 4: # [bs,2048, 7,7] embedding_spatial = x.flatten(2).transpose(1, 2) else: # [bs, 197,468] embedding_spatial = x embedding_spatial_786 = self.embed_standart(embedding_spatial) embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True) bs = embedding_spatial_786.shape[0] query_embed = self.query_embed.weight # tgt = query_embed.unsqueeze(1).repeat(1, bs, 1) tgt = query_embed.unsqueeze(1).expand(-1, bs, -1) # no allocation of memory with expand h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768] h = h.transpose(0, 1) out_extrap = torch.zeros(h.shape[0], h.shape[1], self.duplicate_factor, device=h.device, dtype=h.dtype) for i in range(self.embed_len_decoder): # group FC h_i = h[:, i, :] w_i = self.duplicate_pooling[i, :, :] out_extrap[:, i, :] = torch.matmul(h_i, w_i) h_out = out_extrap.flatten(1)[:, :self.num_classes] h_out += self.duplicate_pooling_bias logits = h_out return logits
pytorch-image-models/timm/layers/ml_decoder.py/0
{ "file_path": "pytorch-image-models/timm/layers/ml_decoder.py", "repo_id": "pytorch-image-models", "token_count": 3048 }
222
""" Split BatchNorm A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through a separate BN layer. The first split is passed through the parent BN layers with weight/bias keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' namespace. This allows easily removing the auxiliary BN layers after training to efficiently achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, 'Disentangled Learning via An Auxiliary BN' Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn class SplitBatchNorm2d(torch.nn.BatchNorm2d): def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, num_splits=2): super().__init__(num_features, eps, momentum, affine, track_running_stats) assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' self.num_splits = num_splits self.aux_bn = nn.ModuleList([ nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) def forward(self, input: torch.Tensor): if self.training: # aux BN only relevant while training split_size = input.shape[0] // self.num_splits assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" split_input = input.split(split_size) x = [super().forward(split_input[0])] for i, a in enumerate(self.aux_bn): x.append(a(split_input[i + 1])) return torch.cat(x, dim=0) else: return super().forward(input) def convert_splitbn_model(module, num_splits=2): """ Recursively traverse module and its children to replace all instances of ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. Args: module (torch.nn.Module): input module num_splits: number of separate batchnorm layers to split input across Example:: >>> # model is an instance of torch.nn.Module >>> model = timm.models.convert_splitbn_model(model, num_splits=2) """ mod = module if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): return module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): mod = SplitBatchNorm2d( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, num_splits=num_splits) mod.running_mean = module.running_mean mod.running_var = module.running_var mod.num_batches_tracked = module.num_batches_tracked if module.affine: mod.weight.data = module.weight.data.clone().detach() mod.bias.data = module.bias.data.clone().detach() for aux in mod.aux_bn: aux.running_mean = module.running_mean.clone() aux.running_var = module.running_var.clone() aux.num_batches_tracked = module.num_batches_tracked.clone() if module.affine: aux.weight.data = module.weight.data.clone().detach() aux.bias.data = module.bias.data.clone().detach() for name, child in module.named_children(): mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) del module return mod
pytorch-image-models/timm/layers/split_batchnorm.py/0
{ "file_path": "pytorch-image-models/timm/layers/split_batchnorm.py", "repo_id": "pytorch-image-models", "token_count": 1394 }
223
import os from typing import Any, Dict, Optional, Union from urllib.parse import urlsplit from timm.layers import set_layer_config from ._helpers import load_checkpoint from ._hub import load_model_config_from_hf from ._pretrained import PretrainedCfg from ._registry import is_model, model_entrypoint, split_model_name_tag __all__ = ['parse_model_name', 'safe_model_name', 'create_model'] def parse_model_name(model_name: str): if model_name.startswith('hf_hub'): # NOTE for backwards compat, deprecate hf_hub use model_name = model_name.replace('hf_hub', 'hf-hub') parsed = urlsplit(model_name) assert parsed.scheme in ('', 'timm', 'hf-hub') if parsed.scheme == 'hf-hub': # FIXME may use fragment as revision, currently `@` in URI path return parsed.scheme, parsed.path else: model_name = os.path.split(parsed.path)[-1] return 'timm', model_name def safe_model_name(model_name: str, remove_source: bool = True): # return a filename / path safe model name def make_safe(name): return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_') if remove_source: model_name = parse_model_name(model_name)[-1] return make_safe(model_name) def create_model( model_name: str, pretrained: bool = False, pretrained_cfg: Optional[Union[str, Dict[str, Any], PretrainedCfg]] = None, pretrained_cfg_overlay: Optional[Dict[str, Any]] = None, checkpoint_path: str = '', scriptable: Optional[bool] = None, exportable: Optional[bool] = None, no_jit: Optional[bool] = None, **kwargs, ): """Create a model. Lookup model's entrypoint function and pass relevant args to create a new model. <Tip> **kwargs will be passed through entrypoint fn to ``timm.models.build_model_with_cfg()`` and then the model class __init__(). kwargs values set to None are pruned before passing. </Tip> Args: model_name: Name of model to instantiate. pretrained: If set to `True`, load pretrained ImageNet-1k weights. pretrained_cfg: Pass in an external pretrained_cfg for model. pretrained_cfg_overlay: Replace key-values in base pretrained_cfg with these. checkpoint_path: Path of checkpoint to load _after_ the model is initialized. scriptable: Set layer config so that model is jit scriptable (not working for all models yet). exportable: Set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet). no_jit: Set layer config so that model doesn't utilize jit scripted layers (so far activations only). Keyword Args: drop_rate (float): Classifier dropout rate for training. drop_path_rate (float): Stochastic depth drop rate for training. global_pool (str): Classifier global pooling type. Example: ```py >>> from timm import create_model >>> # Create a MobileNetV3-Large model with no pretrained weights. >>> model = create_model('mobilenetv3_large_100') >>> # Create a MobileNetV3-Large model with pretrained weights. >>> model = create_model('mobilenetv3_large_100', pretrained=True) >>> model.num_classes 1000 >>> # Create a MobileNetV3-Large model with pretrained weights and a new head with 10 classes. >>> model = create_model('mobilenetv3_large_100', pretrained=True, num_classes=10) >>> model.num_classes 10 ``` """ # Parameters that aren't supported by all models or are intended to only override model defaults if set # should default to None in command line args/cfg. Remove them if they are present and not set so that # non-supporting models don't break and default args remain in effect. kwargs = {k: v for k, v in kwargs.items() if v is not None} model_source, model_name = parse_model_name(model_name) if model_source == 'hf-hub': assert not pretrained_cfg, 'pretrained_cfg should not be set when sourcing model from Hugging Face Hub.' # For model names specified in the form `hf-hub:path/architecture_name@revision`, # load model weights + pretrained_cfg from Hugging Face hub. pretrained_cfg, model_name, model_args = load_model_config_from_hf(model_name) if model_args: for k, v in model_args.items(): kwargs.setdefault(k, v) else: model_name, pretrained_tag = split_model_name_tag(model_name) if pretrained_tag and not pretrained_cfg: # a valid pretrained_cfg argument takes priority over tag in model name pretrained_cfg = pretrained_tag if not is_model(model_name): raise RuntimeError('Unknown model (%s)' % model_name) create_fn = model_entrypoint(model_name) with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit): model = create_fn( pretrained=pretrained, pretrained_cfg=pretrained_cfg, pretrained_cfg_overlay=pretrained_cfg_overlay, **kwargs, ) if checkpoint_path: load_checkpoint(model, checkpoint_path) return model
pytorch-image-models/timm/models/_factory.py/0
{ "file_path": "pytorch-image-models/timm/models/_factory.py", "repo_id": "pytorch-image-models", "token_count": 1944 }
224
""" Bring-Your-Own-Blocks Network A flexible network w/ dataclass based config for stacking those NN blocks. This model is currently used to implement the following networks: GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)). Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0 RepVGG - repvgg_* Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT MobileOne - mobileone_* Paper: `MobileOne: An Improved One millisecond Mobile Backbone` - https://arxiv.org/abs/2206.04040 Code and weights: https://github.com/apple/ml-mobileone, licensed MIT In all cases the models have been modified to fit within the design of ByobNet. I've remapped the original weights and verified accuracies. For GPU Efficient nets, I used the original names for the blocks since they were for the most part the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some changes introduced in RegNet were also present in the stem and bottleneck blocks for this model. A significant number of different network archs can be implemented here, including variants of the above nets that include attention. Hacked together by / copyright Ross Wightman, 2021. """ import math from dataclasses import dataclass, field, replace from functools import partial from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import ( ClassifierHead, NormMlpClassifierHead, ConvNormAct, BatchNormAct2d, EvoNorm2dS0a, AttentionPool2d, RotAttentionPool2d, DropPath, AvgPool2dSame, create_conv2d, get_act_layer, get_norm_act_layer, get_attn, make_divisible, to_2tuple, ) from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block'] @dataclass class ByoBlockCfg: type: Union[str, nn.Module] d: int # block depth (number of block repeats in stage) c: int # number of output channels for each block in stage s: int = 2 # stride of stage (first block) gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1 br: float = 1. # bottleneck-ratio of blocks in stage # NOTE: these config items override the model cfgs that are applied to all blocks by default attn_layer: Optional[str] = None attn_kwargs: Optional[Dict[str, Any]] = None self_attn_layer: Optional[str] = None self_attn_kwargs: Optional[Dict[str, Any]] = None block_kwargs: Optional[Dict[str, Any]] = None @dataclass class ByoModelCfg: blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...] downsample: str = 'conv1x1' stem_type: str = '3x3' stem_pool: Optional[str] = 'maxpool' stem_chs: Union[int, List[int], Tuple[int, ...]] = 32 width_factor: float = 1.0 num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0 zero_init_last: bool = True # zero init last weight (usually bn) in residual path fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation # layer config act_layer: str = 'relu' norm_layer: str = 'batchnorm' aa_layer: str = '' # Head config head_hidden_size: Optional[int] = None # feat dim of MLP head or AttentionPool output head_type: str = 'classifier' # Block config # NOTE: these config items will be overridden by the block cfg (per-block) if they are set there attn_layer: Optional[str] = None attn_kwargs: dict = field(default_factory=lambda: dict()) self_attn_layer: Optional[str] = None self_attn_kwargs: dict = field(default_factory=lambda: dict()) block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict()) def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0): c = (64, 128, 256, 512) group_size = 0 if groups > 0: group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0 bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)]) return bcfg def _mobileone_bcfg(d=(2, 8, 10, 1), wf=(1., 1., 1., 1.), se_blocks=(), num_conv_branches=1): c = (64, 128, 256, 512) prev_c = min(64, c[0] * wf[0]) se_blocks = se_blocks or (0,) * len(d) bcfg = [] for d, c, w, se in zip(d, c, wf, se_blocks): scfg = [] for i in range(d): out_c = c * w bk = dict(num_conv_branches=num_conv_branches) ak = {} if i >= d - se: ak['attn_layer'] = 'se' scfg += [ByoBlockCfg(type='one', d=1, c=prev_c, gs=1, block_kwargs=bk, **ak)] # depthwise block scfg += [ByoBlockCfg( type='one', d=1, c=out_c, gs=0, block_kwargs=dict(kernel_size=1, **bk), **ak)] # pointwise block prev_c = out_c bcfg += [scfg] return bcfg def interleave_blocks( types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs, ) -> Tuple[ByoBlockCfg]: """ interleave 2 block types in stack """ assert len(types) == 2 if isinstance(every, int): every = list(range(0 if first else every, d, every + 1)) if not every: every = [d - 1] set(every) blocks = [] for i in range(d): block_type = types[1] if i in every else types[0] blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)] return tuple(blocks) def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]: if not isinstance(stage_blocks_cfg, Sequence): stage_blocks_cfg = (stage_blocks_cfg,) block_cfgs = [] for i, cfg in enumerate(stage_blocks_cfg): block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)] return block_cfgs def num_groups(group_size, channels): if not group_size: # 0 or None return 1 # normal conv with 1 group else: # NOTE group_size == 1 -> depthwise conv assert channels % group_size == 0 return channels // group_size @dataclass class LayerFn: conv_norm_act: Callable = ConvNormAct norm_act: Callable = BatchNormAct2d act: Callable = nn.ReLU attn: Optional[Callable] = None self_attn: Optional[Callable] = None class DownsampleAvg(nn.Module): def __init__( self, in_chs: int, out_chs: int, stride: int = 1, dilation: int = 1, apply_act: bool = False, layers: LayerFn = None, ): """ AvgPool Downsampling as in 'D' ResNet variants.""" super(DownsampleAvg, self).__init__() layers = layers or LayerFn() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act) def forward(self, x): return self.conv(self.pool(x)) def create_shortcut( downsample_type: str, in_chs: int, out_chs: int, stride: int, dilation: Tuple[int, int], layers: LayerFn, **kwargs, ): assert downsample_type in ('avg', 'conv1x1', '') if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: if not downsample_type: return None # no shortcut elif downsample_type == 'avg': return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs) else: return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs) else: return nn.Identity() # identity shortcut class BasicBlock(nn.Module): """ ResNet Basic Block - kxk + kxk """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), group_size: Optional[int] = None, bottle_ratio: float = 1.0, downsample: str = 'avg', attn_last: bool = True, linear_out: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(BasicBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0]) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_kxk = layers.conv_norm_act( mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_layer=drop_block, apply_act=False, ) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2_kxk.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_kxk(x) x = self.attn(x) x = self.conv2_kxk(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class BottleneckBlock(nn.Module): """ ResNet-like Bottleneck Block - 1x1 - kxk - 1x1 """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1., group_size: Optional[int] = None, downsample: str = 'avg', attn_last: bool = False, linear_out: bool = False, extra_conv: bool = False, bottle_in: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(BottleneckBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) self.conv2_kxk = layers.conv_norm_act( mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, ) if extra_conv: self.conv2b_kxk = layers.conv_norm_act( mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups) else: self.conv2b_kxk = nn.Identity() self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None: nn.init.zeros_(self.conv3_1x1.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.conv2_kxk(x) x = self.conv2b_kxk(x) x = self.attn(x) x = self.conv3_1x1(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class DarkBlock(nn.Module): """ DarkNet-like (1x1 + 3x3 w/ stride) block The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models. This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats). If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1) for more optimal compute. """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, group_size: Optional[int] = None, downsample: str = 'avg', attn_last: bool = True, linear_out: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(DarkBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_kxk = layers.conv_norm_act( mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False, ) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2_kxk.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.attn(x) x = self.conv2_kxk(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class EdgeBlock(nn.Module): """ EdgeResidual-like (3x3 + 1x1) block A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed. Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs. FIXME is there a more common 3x3 + 1x1 conv block to name this after? """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, group_size: Optional[int] = None, downsample: str = 'avg', attn_last: bool = False, linear_out: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(EdgeBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_kxk = layers.conv_norm_act( in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, ) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv2_1x1.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2_1x1.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_kxk(x) x = self.attn(x) x = self.conv2_1x1(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class RepVggBlock(nn.Module): """ RepVGG Block. Adapted from impl at https://github.com/DingXiaoH/RepVGG """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, group_size: Optional[int] = None, downsample: str = '', layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., inference_mode: bool = False ): super(RepVggBlock, self).__init__() self.groups = groups = num_groups(group_size, in_chs) layers = layers or LayerFn() if inference_mode: self.reparam_conv = nn.Conv2d( in_channels=in_chs, out_channels=out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=groups, bias=True, ) else: self.reparam_conv = None use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None self.conv_kxk = layers.conv_norm_act( in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False, ) self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) self.act = layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): # NOTE this init overrides that base model init with specific changes for the block type for m in self.modules(): if isinstance(m, nn.BatchNorm2d): nn.init.normal_(m.weight, .1, .1) nn.init.normal_(m.bias, 0, .1) if hasattr(self.attn, 'reset_parameters'): self.attn.reset_parameters() def forward(self, x): if self.reparam_conv is not None: return self.act(self.attn(self.reparam_conv(x))) if self.identity is None: x = self.conv_1x1(x) + self.conv_kxk(x) else: identity = self.identity(x) x = self.conv_1x1(x) + self.conv_kxk(x) x = self.drop_path(x) # not in the paper / official impl, experimental x += identity x = self.attn(x) # no attn in the paper / official impl, experimental return self.act(x) def reparameterize(self): """ Following works like `RepVGG: Making VGG-style ConvNets Great Again` - https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched architecture used at training time to obtain a plain CNN-like structure for inference. """ if self.reparam_conv is not None: return kernel, bias = self._get_kernel_bias() self.reparam_conv = nn.Conv2d( in_channels=self.conv_kxk.conv.in_channels, out_channels=self.conv_kxk.conv.out_channels, kernel_size=self.conv_kxk.conv.kernel_size, stride=self.conv_kxk.conv.stride, padding=self.conv_kxk.conv.padding, dilation=self.conv_kxk.conv.dilation, groups=self.conv_kxk.conv.groups, bias=True, ) self.reparam_conv.weight.data = kernel self.reparam_conv.bias.data = bias # Delete un-used branches for name, para in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('conv_kxk') self.__delattr__('conv_1x1') self.__delattr__('identity') self.__delattr__('drop_path') def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to obtain re-parameterized kernel and bias. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 """ # get weights and bias of scale branch kernel_1x1 = 0 bias_1x1 = 0 if self.conv_1x1 is not None: kernel_1x1, bias_1x1 = self._fuse_bn_tensor(self.conv_1x1) # Pad scale branch kernel to match conv branch kernel size. pad = self.conv_kxk.conv.kernel_size[0] // 2 kernel_1x1 = torch.nn.functional.pad(kernel_1x1, [pad, pad, pad, pad]) # get weights and bias of skip branch kernel_identity = 0 bias_identity = 0 if self.identity is not None: kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) # get weights and bias of conv branches kernel_conv, bias_conv = self._fuse_bn_tensor(self.conv_kxk) kernel_final = kernel_conv + kernel_1x1 + kernel_identity bias_final = bias_conv + bias_1x1 + bias_identity return kernel_final, bias_final def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to fuse batchnorm layer with preceeding conv layer. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 """ if isinstance(branch, ConvNormAct): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps else: assert isinstance(branch, nn.BatchNorm2d) if not hasattr(self, 'id_tensor'): in_chs = self.conv_kxk.conv.in_channels input_dim = in_chs // self.groups kernel_size = self.conv_kxk.conv.kernel_size kernel_value = torch.zeros_like(self.conv_kxk.conv.weight) for i in range(in_chs): kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1 self.id_tensor = kernel_value kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std class MobileOneBlock(nn.Module): """ MobileOne building block. This block has a multi-branched architecture at train-time and plain-CNN style architecture at inference time For more details, please refer to our paper: `An Improved One millisecond Mobile Backbone` - https://arxiv.org/pdf/2206.04040.pdf """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, # unused group_size: Optional[int] = None, downsample: str = '', # unused inference_mode: bool = False, num_conv_branches: int = 1, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ) -> None: """ Construct a MobileOneBlock module. """ super(MobileOneBlock, self).__init__() self.num_conv_branches = num_conv_branches self.groups = groups = num_groups(group_size, in_chs) layers = layers or LayerFn() if inference_mode: self.reparam_conv = nn.Conv2d( in_channels=in_chs, out_channels=out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=groups, bias=True) else: self.reparam_conv = None # Re-parameterizable skip connection use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None # Re-parameterizable conv branches convs = [] for _ in range(self.num_conv_branches): convs.append(layers.conv_norm_act( in_chs, out_chs, kernel_size=kernel_size, stride=stride, groups=groups, apply_act=False)) self.conv_kxk = nn.ModuleList(convs) # Re-parameterizable scale branch self.conv_scale = None if kernel_size > 1: self.conv_scale = layers.conv_norm_act( in_chs, out_chs, kernel_size=1, stride=stride, groups=groups, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) self.act = layers.act(inplace=True) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Apply forward pass. """ # Inference mode forward pass. if self.reparam_conv is not None: return self.act(self.attn(self.reparam_conv(x))) # Multi-branched train-time forward pass. # Skip branch output identity_out = 0 if self.identity is not None: identity_out = self.identity(x) # Scale branch output scale_out = 0 if self.conv_scale is not None: scale_out = self.conv_scale(x) # Other branches out = scale_out for ck in self.conv_kxk: out += ck(x) out = self.drop_path(out) out += identity_out return self.act(self.attn(out)) def reparameterize(self): """ Following works like `RepVGG: Making VGG-style ConvNets Great Again` - https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched architecture used at training time to obtain a plain CNN-like structure for inference. """ if self.reparam_conv is not None: return kernel, bias = self._get_kernel_bias() self.reparam_conv = nn.Conv2d( in_channels=self.conv_kxk[0].conv.in_channels, out_channels=self.conv_kxk[0].conv.out_channels, kernel_size=self.conv_kxk[0].conv.kernel_size, stride=self.conv_kxk[0].conv.stride, padding=self.conv_kxk[0].conv.padding, dilation=self.conv_kxk[0].conv.dilation, groups=self.conv_kxk[0].conv.groups, bias=True) self.reparam_conv.weight.data = kernel self.reparam_conv.bias.data = bias # Delete un-used branches for name, para in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('conv_kxk') self.__delattr__('conv_scale') self.__delattr__('identity') self.__delattr__('drop_path') def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to obtain re-parameterized kernel and bias. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 """ # get weights and bias of scale branch kernel_scale = 0 bias_scale = 0 if self.conv_scale is not None: kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale) # Pad scale branch kernel to match conv branch kernel size. pad = self.conv_kxk[0].conv.kernel_size[0] // 2 kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad]) # get weights and bias of skip branch kernel_identity = 0 bias_identity = 0 if self.identity is not None: kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) # get weights and bias of conv branches kernel_conv = 0 bias_conv = 0 for ix in range(self.num_conv_branches): _kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix]) kernel_conv += _kernel bias_conv += _bias kernel_final = kernel_conv + kernel_scale + kernel_identity bias_final = bias_conv + bias_scale + bias_identity return kernel_final, bias_final def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to fuse batchnorm layer with preceeding conv layer. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 """ if isinstance(branch, ConvNormAct): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps else: assert isinstance(branch, nn.BatchNorm2d) if not hasattr(self, 'id_tensor'): in_chs = self.conv_kxk[0].conv.in_channels input_dim = in_chs // self.groups kernel_size = self.conv_kxk[0].conv.kernel_size kernel_value = torch.zeros_like(self.conv_kxk[0].conv.weight) for i in range(in_chs): kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1 self.id_tensor = kernel_value kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std class SelfAttnBlock(nn.Module): """ ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1 """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1., group_size: Optional[int] = None, downsample: str = 'avg', extra_conv: bool = False, linear_out: bool = False, bottle_in: bool = False, post_attn_na: bool = True, feat_size: Optional[Tuple[int, int]] = None, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(SelfAttnBlock, self).__init__() assert layers is not None mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) if extra_conv: self.conv2_kxk = layers.conv_norm_act( mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, ) stride = 1 # striding done via conv if enabled else: self.conv2_kxk = nn.Identity() opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size) # FIXME need to dilate self attn to have dilated network support, moop moop self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs) self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity() self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None: nn.init.zeros_(self.conv3_1x1.bn.weight) if hasattr(self.self_attn, 'reset_parameters'): self.self_attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.conv2_kxk(x) x = self.self_attn(x) x = self.post_attn(x) x = self.conv3_1x1(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) _block_registry = dict( basic=BasicBlock, bottle=BottleneckBlock, dark=DarkBlock, edge=EdgeBlock, rep=RepVggBlock, one=MobileOneBlock, self_attn=SelfAttnBlock, ) def register_block(block_type:str, block_fn: nn.Module): _block_registry[block_type] = block_fn def create_block(block: Union[str, nn.Module], **kwargs): if isinstance(block, (nn.Module, partial)): return block(**kwargs) assert block in _block_registry, f'Unknown block type ({block}' return _block_registry[block](**kwargs) class Stem(nn.Sequential): def __init__( self, in_chs: int, out_chs: Union[int, List[int], Tuple[int, ...]], kernel_size: int = 3, stride: int = 4, pool: str = 'maxpool', num_rep: int = 3, num_act: Optional[int] = None, chs_decay: float = 0.5, layers: LayerFn = None, ): super().__init__() assert stride in (2, 4) layers = layers or LayerFn() if isinstance(out_chs, (list, tuple)): num_rep = len(out_chs) stem_chs = out_chs else: stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1] self.stride = stride self.feature_info = [] # track intermediate features prev_feat = '' stem_strides = [2] + [1] * (num_rep - 1) if stride == 4 and not pool: # set last conv in stack to be strided if stride == 4 and no pooling layer stem_strides[-1] = 2 num_act = num_rep if num_act is None else num_act # if num_act < num_rep, first convs in stack won't have bn + act stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act prev_chs = in_chs curr_stride = 1 last_feat_idx = -1 for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)): layer_fn = layers.conv_norm_act if na else create_conv2d conv_name = f'conv{i + 1}' if i > 0 and s > 1: last_feat_idx = i - 1 self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s)) prev_chs = ch curr_stride *= s prev_feat = conv_name if pool: pool = pool.lower() assert pool in ('max', 'maxpool', 'avg', 'avgpool', 'max2', 'avg2') last_feat_idx = i self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) if pool == 'max2': self.add_module('pool', nn.MaxPool2d(2)) elif pool == 'avg2': self.add_module('pool', nn.AvgPool2d(2)) elif 'max' in pool: self.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) elif 'avg' in pool: self.add_module('pool', nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False)) curr_stride *= 2 prev_feat = 'pool' self.last_feat_idx = last_feat_idx if last_feat_idx >= 0 else None self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) assert curr_stride == stride def forward_intermediates(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: intermediate: Optional[torch.Tensor] = None for i, m in enumerate(self): x = m(x) if self.last_feat_idx is not None and i == self.last_feat_idx: intermediate = x return x, intermediate def create_byob_stem( in_chs: int, out_chs: int, stem_type: str = '', pool_type: str = '', feat_prefix: str = 'stem', layers: LayerFn = None, ): layers = layers or LayerFn() assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', 'one', '7x7', '3x3') if 'quad' in stem_type: # based on NFNet stem, stack of 4 3x3 convs num_act = 2 if 'quad2' in stem_type else None stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers) elif 'tiered' in stem_type: # 3x3 stack of 3 convs as in my ResNet-T stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers) elif 'deep' in stem_type: # 3x3 stack of 3 convs as in ResNet-D stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers) elif 'rep' in stem_type: stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers) elif 'one' in stem_type: stem = MobileOneBlock(in_chs, out_chs, kernel_size=3, stride=2, layers=layers) elif '7x7' in stem_type: # 7x7 stem conv as in ResNet if pool_type: stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers) else: stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2) else: if isinstance(out_chs, (tuple, list)): stem = Stem(in_chs, out_chs, 3, pool=pool_type, layers=layers) else: # 3x3 stem conv as in RegNet is the default if pool_type: stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers) else: stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2) if isinstance(stem, Stem): feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info] else: feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix, stage=0)] return stem, feature_info def reduce_feat_size(feat_size, stride=2): return None if feat_size is None else tuple([s // stride for s in feat_size]) def override_kwargs(block_kwargs, model_kwargs): """ Override model level attn/self-attn/block kwargs w/ block level NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs for the block if set to anything that isn't None. i.e. an empty block_kwargs dict will remove kwargs set at model level for that block """ out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs return out_kwargs or {} # make sure None isn't returned def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ): layer_fns = block_kwargs['layers'] # override attn layer / args with block local config attn_set = block_cfg.attn_layer is not None if attn_set or block_cfg.attn_kwargs is not None: # override attn layer config if attn_set and not block_cfg.attn_layer: # empty string for attn_layer type will disable attn for this block attn_layer = None else: attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs) attn_layer = block_cfg.attn_layer or model_cfg.attn_layer attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None layer_fns = replace(layer_fns, attn=attn_layer) # override self-attn layer / args with block local cfg self_attn_set = block_cfg.self_attn_layer is not None if self_attn_set or block_cfg.self_attn_kwargs is not None: # override attn layer config if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == '' # empty string for self_attn_layer type will disable attn for this block self_attn_layer = None else: self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs) self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \ if self_attn_layer is not None else None layer_fns = replace(layer_fns, self_attn=self_attn_layer) block_kwargs['layers'] = layer_fns # add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs)) def create_byob_stages( cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any], feat_size: Optional[int] = None, layers: Optional[LayerFn] = None, block_kwargs_fn: Optional[Callable] = update_block_kwargs, ): layers = layers or LayerFn() feature_info = [] block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks] depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs] dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] dilation = 1 net_stride = stem_feat['reduction'] prev_chs = stem_feat['num_chs'] prev_feat = stem_feat stages = [] for stage_idx, stage_block_cfgs in enumerate(block_cfgs): stride = stage_block_cfgs[0].s if stride != 1 and prev_feat: feature_info.append(prev_feat) if net_stride >= output_stride and stride > 1: dilation *= stride stride = 1 net_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 blocks = [] for block_idx, block_cfg in enumerate(stage_block_cfgs): out_chs = make_divisible(block_cfg.c * cfg.width_factor) group_size = block_cfg.gs if isinstance(group_size, Callable): group_size = group_size(out_chs, block_idx) block_kwargs = dict( # Blocks used in this model must accept these arguments in_chs=prev_chs, out_chs=out_chs, stride=stride if block_idx == 0 else 1, dilation=(first_dilation, dilation), group_size=group_size, bottle_ratio=block_cfg.br, downsample=cfg.downsample, drop_path_rate=dpr[stage_idx][block_idx], layers=layers, ) if block_cfg.type in ('self_attn',): # add feat_size arg for blocks that support/need it block_kwargs['feat_size'] = feat_size block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg) blocks += [create_block(block_cfg.type, **block_kwargs)] first_dilation = dilation prev_chs = out_chs if stride > 1 and block_idx == 0: feat_size = reduce_feat_size(feat_size, stride) stages += [nn.Sequential(*blocks)] prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}', stage=stage_idx + 1) feature_info.append(prev_feat) return nn.Sequential(*stages), feature_info, feat_size def get_layer_fns(cfg: ByoModelCfg, allow_aa: bool = True): act = get_act_layer(cfg.act_layer) norm_act = get_norm_act_layer(norm_layer=cfg.norm_layer, act_layer=act) if cfg.aa_layer and allow_aa: conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act, aa_layer=cfg.aa_layer) else: conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act) attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn) return layer_fn class ByobNet(nn.Module): """ 'Bring-your-own-blocks' Net A flexible network backbone that allows building model stem + blocks via dataclass cfg definition w/ factory functions for module instantiation. Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act). """ def __init__( self, cfg: ByoModelCfg, num_classes: int = 1000, in_chans: int = 3, global_pool: Optional[str] = None, output_stride: int = 32, img_size: Optional[Union[int, Tuple[int, int]]] = None, drop_rate: float = 0., drop_path_rate: float =0., zero_init_last: bool = True, **kwargs, ): """ Args: cfg: Model architecture configuration. num_classes: Number of classifier classes. in_chans: Number of input channels. global_pool: Global pooling type. output_stride: Output stride of network, one of (8, 16, 32). img_size: Image size for fixed image size models (i.e. self-attn). drop_rate: Classifier dropout rate. drop_path_rate: Stochastic depth drop-path rate. zero_init_last: Zero-init last weight of residual path. **kwargs: Extra kwargs overlayed onto cfg. """ super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False cfg = replace(cfg, **kwargs) # overlay kwargs onto cfg stem_layers = get_layer_fns(cfg, allow_aa=False) # keep aa off for stem-layers stage_layers = get_layer_fns(cfg) if cfg.fixed_input_size: assert img_size is not None, 'img_size argument is required for fixed input size model' feat_size = to_2tuple(img_size) if img_size is not None else None self.feature_info = [] if isinstance(cfg.stem_chs, (list, tuple)): stem_chs = [int(round(c * cfg.width_factor)) for c in cfg.stem_chs] else: stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor)) self.stem, stem_feat = create_byob_stem( in_chs=in_chans, out_chs=stem_chs, stem_type=cfg.stem_type, pool_type=cfg.stem_pool, layers=stem_layers, ) self.feature_info.extend(stem_feat[:-1]) feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction']) self.stages, stage_feat, feat_size = create_byob_stages( cfg, drop_path_rate, output_stride, stem_feat[-1], layers=stage_layers, feat_size=feat_size, ) self.feature_info.extend(stage_feat[:-1]) reduction = stage_feat[-1]['reduction'] prev_chs = stage_feat[-1]['num_chs'] if cfg.num_features: self.num_features = int(round(cfg.width_factor * cfg.num_features)) self.final_conv = stage_layers.conv_norm_act(prev_chs, self.num_features, 1) else: self.num_features = prev_chs self.final_conv = nn.Identity() self.feature_info += [ dict(num_chs=self.num_features, reduction=reduction, module='final_conv', stage=len(self.stages))] self.stage_ends = [f['stage'] for f in self.feature_info] self.head_hidden_size = self.num_features assert cfg.head_type in ('', 'classifier', 'mlp', 'attn_abs', 'attn_rot') if cfg.head_type == 'mlp': if global_pool is None: global_pool = 'avg' self.head = NormMlpClassifierHead( self.num_features, num_classes, hidden_size=cfg.head_hidden_size, pool_type=global_pool, norm_layer=cfg.norm_layer, act_layer=cfg.act_layer, drop_rate=self.drop_rate, ) self.head_hidden_size = self.head.hidden_size elif cfg.head_type == 'attn_abs': if global_pool is None: global_pool = 'token' assert global_pool in ('', 'token') self.head = AttentionPool2d( self.num_features, embed_dim=cfg.head_hidden_size, out_features=num_classes, feat_size=feat_size, pool_type=global_pool, drop_rate=self.drop_rate, qkv_separate=True, ) self.head_hidden_size = self.head.embed_dim elif cfg.head_type =='attn_rot': if global_pool is None: global_pool = 'token' assert global_pool in ('', 'token') self.head = RotAttentionPool2d( self.num_features, embed_dim=cfg.head_hidden_size, out_features=num_classes, ref_feat_size=feat_size, pool_type=global_pool, drop_rate=self.drop_rate, qkv_separate=True, ) self.head_hidden_size = self.head.embed_dim else: if global_pool is None: global_pool = 'avg' assert cfg.head_hidden_size is None self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) self.global_pool = global_pool # init weights named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', blocks=[ (r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None), (r'^final_conv', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, exclude_final_conv: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features exclude_final_conv: Exclude final_conv from last intermediate Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] # forward pass feat_idx = 0 # stem is index 0 if hasattr(self.stem, 'forward_intermediates'): # returns last intermediate features in stem (before final stride in stride > 2 stems) x, x_inter = self.stem.forward_intermediates(x) else: x, x_inter = self.stem(x), None if feat_idx in take_indices: intermediates.append(x if x_inter is None else x_inter) last_idx = self.stage_ends[-1] if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index] for stage in stages: feat_idx += 1 x = stage(x) if not exclude_final_conv and feat_idx == last_idx: # default feature_info for this model uses final_conv as the last feature output (if present) x = self.final_conv(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates if exclude_final_conv and feat_idx == last_idx: x = self.final_conv(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0 if max_index < self.stage_ends[-1]: self.final_conv = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.final_conv(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name='', zero_init_last=False): if isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Linear): nn.init.normal_(module.weight, mean=0.0, std=0.01) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.BatchNorm2d): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights(zero_init_last=zero_init_last) model_cfgs = dict( gernet_l=ByoModelCfg( blocks=( ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.), ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.), ), stem_chs=32, stem_pool=None, num_features=2560, ), gernet_m=ByoModelCfg( blocks=( ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.), ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.), ), stem_chs=32, stem_pool=None, num_features=2560, ), gernet_s=ByoModelCfg( blocks=( ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.), ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.), ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.), ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.), ), stem_chs=13, stem_pool=None, num_features=1920, ), repvgg_a0=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(0.75, 0.75, 0.75, 2.5)), stem_type='rep', stem_chs=48, ), repvgg_a1=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1, 1, 1, 2.5)), stem_type='rep', stem_chs=64, ), repvgg_a2=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)), stem_type='rep', stem_chs=64, ), repvgg_b0=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)), stem_type='rep', stem_chs=64, ), repvgg_b1=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)), stem_type='rep', stem_chs=64, ), repvgg_b1g4=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4), stem_type='rep', stem_chs=64, ), repvgg_b2=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)), stem_type='rep', stem_chs=64, ), repvgg_b2g4=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4), stem_type='rep', stem_chs=64, ), repvgg_b3=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)), stem_type='rep', stem_chs=64, ), repvgg_b3g4=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4), stem_type='rep', stem_chs=64, ), repvgg_d2se=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(8, 14, 24, 1), wf=(2.5, 2.5, 2.5, 5.)), stem_type='rep', stem_chs=64, attn_layer='se', attn_kwargs=dict(rd_ratio=0.0625, rd_divisor=1), ), # 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks # DW convs in last block, 2048 pre-FC, silu act resnet51q=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), ), stem_chs=128, stem_type='quad2', stem_pool=None, num_features=2048, act_layer='silu', ), # 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks # DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act resnet61q=ByoModelCfg( blocks=( ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), ), stem_chs=128, stem_type='quad', stem_pool=None, num_features=2048, act_layer='silu', block_kwargs=dict(extra_conv=True), ), # A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act, # and a tiered stem w/ maxpool resnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', ), gcresnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='gca', ), seresnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='se', ), eca_resnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='eca', ), bat_resnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='bat', attn_kwargs=dict(block_size=8) ), # ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool resnet32ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=0, act_layer='silu', ), # ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool resnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', ), # A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat # and a tiered stem w/ no maxpool gcresnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='gca', ), seresnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='se', ), eca_resnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='eca', ), gcresnet50t=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', attn_layer='gca', ), gcresnext50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='gca', ), # experimental models, closer to a RegNetZ than a ResNet. Similar to EfficientNets but w/ groups instead of DW regnetz_b16=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_c16=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_d32=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=32, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=32, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=32, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=32, br=4), ), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=1792, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_d8=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), ), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=1792, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_e8=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=96, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=8, c=192, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=16, c=384, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=8, br=4), ), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=2048, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), # experimental EvoNorm configs regnetz_b16_evos=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_c16_evos=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_d8_evos=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), ), stem_chs=64, stem_type='deep', stem_pool='', downsample='', num_features=1792, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), mobileone_s0=ByoModelCfg( blocks=_mobileone_bcfg(wf=(0.75, 1.0, 1.0, 2.), num_conv_branches=4), stem_type='one', stem_chs=48, ), mobileone_s1=ByoModelCfg( blocks=_mobileone_bcfg(wf=(1.5, 1.5, 2.0, 2.5)), stem_type='one', stem_chs=64, ), mobileone_s2=ByoModelCfg( blocks=_mobileone_bcfg(wf=(1.5, 2.0, 2.5, 4.0)), stem_type='one', stem_chs=64, ), mobileone_s3=ByoModelCfg( blocks=_mobileone_bcfg(wf=(2.0, 2.5, 3.0, 4.0)), stem_type='one', stem_chs=64, ), mobileone_s4=ByoModelCfg( blocks=_mobileone_bcfg(wf=(3.0, 3.5, 3.5, 4.0), se_blocks=(0, 0, 5, 1)), stem_type='one', stem_chs=64, ), resnet50_clip=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), ), stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs', ), resnet101_clip=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=23, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), ), stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs', ), resnet50x4_clip=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=4, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=6, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=10, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=2048, s=2, br=0.25), ), width_factor=1.25, stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs', ), resnet50x16_clip=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=6, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=8, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=18, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=8, c=2048, s=2, br=0.25), ), width_factor=1.5, stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs', ), resnet50x64_clip=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=15, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=36, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=10, c=2048, s=2, br=0.25), ), width_factor=2.0, stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs', ), resnet50_mlp=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), ), stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_hidden_size=1024, head_type='mlp', ), test_byobnet=ByoModelCfg( blocks=( ByoBlockCfg(type='edge', d=1, c=32, s=2, gs=0, br=0.5), ByoBlockCfg(type='dark', d=1, c=64, s=2, gs=0, br=0.5), ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=1, c=256, s=2, gs=64, br=0.25), ), stem_chs=24, downsample='avg', stem_pool='', act_layer='relu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), ), ) for k in ('resnet50_clip', 'resnet101_clip', 'resnet50x4_clip', 'resnet50x16_clip', 'resnet50x64_clip'): model_cfgs[k + '_gap'] = replace(model_cfgs[k], head_type='classifier') def _convert_openai_clip( state_dict: Dict[str, torch.Tensor], model: ByobNet, prefix: str = 'visual.', ) -> Dict[str, torch.Tensor]: model_has_attn_pool = isinstance(model.head, (RotAttentionPool2d, AttentionPool2d)) import re def _stage_sub(m): stage_idx = int(m.group(1)) - 1 layer_idx, layer_type, layer_id = int(m.group(2)), m.group(3), int(m.group(4)) prefix_str = f'stages.{stage_idx}.{layer_idx}.' id_map = {1: 'conv1_1x1.', 2: 'conv2_kxk.', 3: 'conv3_1x1.'} suffix_str = id_map[layer_id] + layer_type return prefix_str + suffix_str def _down_sub(m): stage_idx = int(m.group(1)) - 1 layer_idx, layer_id = int(m.group(2)), int(m.group(3)) return f'stages.{stage_idx}.{layer_idx}.shortcut.' + ('conv.conv' if layer_id == 0 else 'conv.bn') out_dict = {} for k, v in state_dict.items(): if not k.startswith(prefix): continue k = re.sub(rf'{prefix}conv([0-9])', r'stem.conv\1.conv', k) k = re.sub(rf'{prefix}bn([0-9])', r'stem.conv\1.bn', k) k = re.sub(rf'{prefix}layer([0-9])\.([0-9]+)\.([a-z]+)([0-9])', _stage_sub, k) k = re.sub(rf'{prefix}layer([0-9])\.([0-9]+)\.downsample\.([0-9])', _down_sub, k) if k.startswith(f'{prefix}attnpool'): if not model_has_attn_pool: continue k = k.replace(prefix + 'attnpool', 'head') #'attn_pool') k = k.replace('positional_embedding', 'pos_embed') k = k.replace('q_proj', 'q') k = k.replace('k_proj', 'k') k = k.replace('v_proj', 'v') k = k.replace('c_proj', 'proj') out_dict[k] = v return out_dict def checkpoint_filter_fn( state_dict: Dict[str, torch.Tensor], model: ByobNet ): if 'visual.conv1.weight' in state_dict: state_dict = _convert_openai_clip(state_dict, model) return state_dict def _create_byobnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant], pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs } def _cfgr(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ # GPU-Efficient (ResNet) weights 'gernet_s.idstcv_in1k': _cfg(hf_hub_id='timm/'), 'gernet_m.idstcv_in1k': _cfg(hf_hub_id='timm/'), 'gernet_l.idstcv_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), # RepVGG weights 'repvgg_a0.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_a1.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_a2.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b0.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b1.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b1g4.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b2.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b2g4.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b3.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b3g4.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_d2se.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, ), # experimental ResNet configs 'resnet51q.ra2_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth', first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnet61q.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), # ResNeXt-26 models with different attention in Bottleneck blocks 'resnext26ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'seresnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_resnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'bat_resnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth', min_input_size=(3, 256, 256)), # ResNet-32 / 33 models with different attention in Bottleneck blocks 'resnet32ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'seresnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_resnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnet50t.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnext50ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), # custom `timm` specific RegNetZ inspired models w/ different sizing from paper 'regnetz_b16.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_b_raa-677d9606.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.94, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'regnetz_c16.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_c_rab2_256-a54bf36a.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_d32.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d_rab_256-b8073a89.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)), 'regnetz_d8.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d8_bh-afc03c55.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_e8.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_e8_bh-aace8e6e.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_b16_evos.untrained': _cfgr( first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.95, test_input_size=(3, 288, 288)), 'regnetz_c16_evos.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_c16_evos_ch-d8311942.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)), 'regnetz_d8_evos.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_d8_evos_ch-2bc12646.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'mobileone_s0.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.875, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s1.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s2.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s3.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s4.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), # original attention pool head variants 'resnet50_clip.openai': _cfgr( hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), classifier='head.proj', ), 'resnet101_clip.openai': _cfgr( hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=512, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), classifier='head.proj', ), 'resnet50x4_clip.openai': _cfgr( hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=640, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 288, 288), pool_size=(9, 9), classifier='head.proj', ), 'resnet50x16_clip.openai': _cfgr( hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=768, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 384, 384), pool_size=(12, 12), classifier='head.proj', ), 'resnet50x64_clip.openai': _cfgr( hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 448, 448), pool_size=(14, 14), classifier='head.proj', ), # avg-pool w/ optional standard classifier head variants 'resnet50_clip_gap.openai': _cfgr( hf_hub_id='timm/resnet50_clip.openai', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 224, 224), pool_size=(7, 7), ), 'resnet101_clip_gap.openai': _cfgr( hf_hub_id='timm/resnet101_clip.openai', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 224, 224), pool_size=(7, 7), ), 'resnet50x4_clip_gap.openai': _cfgr( hf_hub_id='timm/resnet50x4_clip.openai', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 288, 288), pool_size=(9, 9), ), 'resnet50x16_clip_gap.openai': _cfgr( hf_hub_id='timm/resnet50x16_clip.openai', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), ), 'resnet50x64_clip_gap.openai': _cfgr( hf_hub_id='timm/resnet50x64_clip.openai', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 448, 448), pool_size=(14, 14), ), 'resnet50_mlp.untrained': _cfgr( input_size=(3, 256, 256), pool_size=(8, 8), ), 'test_byobnet.r160_in1k': _cfgr( hf_hub_id='timm/', first_conv='stem.conv', input_size=(3, 160, 160), crop_pct=0.875, pool_size=(5, 5), ), }) @register_model def gernet_l(pretrained=False, **kwargs) -> ByobNet: """ GEResNet-Large (GENet-Large from official impl) `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 """ return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs) @register_model def gernet_m(pretrained=False, **kwargs) -> ByobNet: """ GEResNet-Medium (GENet-Normal from official impl) `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 """ return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs) @register_model def gernet_s(pretrained=False, **kwargs) -> ByobNet: """ EResNet-Small (GENet-Small from official impl) `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 """ return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs) @register_model def repvgg_a0(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-A0 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_a0', pretrained=pretrained, **kwargs) @register_model def repvgg_a1(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-A1 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_a1', pretrained=pretrained, **kwargs) @register_model def repvgg_a2(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-A2 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs) @register_model def repvgg_b0(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B0 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs) @register_model def repvgg_b1(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B1 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs) @register_model def repvgg_b1g4(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B1g4 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs) @register_model def repvgg_b2(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B2 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs) @register_model def repvgg_b2g4(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B2g4 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs) @register_model def repvgg_b3(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B3 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs) @register_model def repvgg_b3g4(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B3g4 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs) @register_model def repvgg_d2se(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-D2se `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_d2se', pretrained=pretrained, **kwargs) @register_model def resnet51q(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs) @register_model def resnet61q(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs) @register_model def resnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs) @register_model def gcresnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs) @register_model def seresnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs) @register_model def eca_resnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs) @register_model def bat_resnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs) @register_model def resnet32ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs) @register_model def resnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs) @register_model def gcresnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs) @register_model def seresnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs) @register_model def eca_resnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs) @register_model def gcresnet50t(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs) @register_model def gcresnext50ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs) @register_model def regnetz_b16(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_b16', pretrained=pretrained, **kwargs) @register_model def regnetz_c16(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_c16', pretrained=pretrained, **kwargs) @register_model def regnetz_d32(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_d32', pretrained=pretrained, **kwargs) @register_model def regnetz_d8(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_d8', pretrained=pretrained, **kwargs) @register_model def regnetz_e8(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_e8', pretrained=pretrained, **kwargs) @register_model def regnetz_b16_evos(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_b16_evos', pretrained=pretrained, **kwargs) @register_model def regnetz_c16_evos(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_c16_evos', pretrained=pretrained, **kwargs) @register_model def regnetz_d8_evos(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_d8_evos', pretrained=pretrained, **kwargs) @register_model def mobileone_s0(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s0', pretrained=pretrained, **kwargs) @register_model def mobileone_s1(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s1', pretrained=pretrained, **kwargs) @register_model def mobileone_s2(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s2', pretrained=pretrained, **kwargs) @register_model def mobileone_s3(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s3', pretrained=pretrained, **kwargs) @register_model def mobileone_s4(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s4', pretrained=pretrained, **kwargs) @register_model def resnet50_clip(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50 CLIP image tower """ return _create_byobnet('resnet50_clip', pretrained=pretrained, **kwargs) @register_model def resnet101_clip(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-101 CLIP image tower """ return _create_byobnet('resnet101_clip', pretrained=pretrained, **kwargs) @register_model def resnet50x4_clip(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x4 CLIP image tower """ return _create_byobnet('resnet50x4_clip', pretrained=pretrained, **kwargs) @register_model def resnet50x16_clip(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x16 CLIP image tower """ return _create_byobnet('resnet50x16_clip', pretrained=pretrained, **kwargs) @register_model def resnet50x64_clip(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x64 CLIP image tower """ return _create_byobnet('resnet50x64_clip', pretrained=pretrained, **kwargs) @register_model def resnet50_clip_gap(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50 CLIP image tower w/ avg pool (no attention pool) """ return _create_byobnet('resnet50_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet101_clip_gap(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-101 CLIP image tower w/ avg pool (no attention pool) """ return _create_byobnet('resnet101_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50x4_clip_gap(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x4 CLIP image tower w/ avg pool (no attention pool) """ return _create_byobnet('resnet50x4_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50x16_clip_gap(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x16 CLIP image tower w/ avg pool (no attention pool) """ return _create_byobnet('resnet50x16_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50x64_clip_gap(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x64 CLIP image tower w/ avg pool (no attention pool) """ return _create_byobnet('resnet50x64_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50_mlp(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet50_mlp', pretrained=pretrained, **kwargs) @register_model def test_byobnet(pretrained=False, **kwargs) -> ByobNet: """ Minimal test ResNet (BYOB based) model. """ return _create_byobnet('test_byobnet', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/byobnet.py/0
{ "file_path": "pytorch-image-models/timm/models/byobnet.py", "repo_id": "pytorch-image-models", "token_count": 52579 }
225
""" The EfficientNet Family in PyTorch An implementation of EfficienNet that covers variety of related models with efficient architectures: * EfficientNet-V2 - `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 * EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 * MixNet (Small, Medium, and Large) - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 * MNasNet B1, A1 (SE), Small - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 * FBNet-C - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 * Single-Path NAS Pixel1 - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 * TinyNet - Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819 - Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch * And likely more... The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing the models and weights open source! Hacked together by / Copyright 2019, Ross Wightman """ from functools import partial from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_conv2d, create_classifier, get_norm_act_layer, GroupNormAct, LayerType from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \ round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT from ._features import FeatureInfo, FeatureHooks, feature_take_indices from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['EfficientNet', 'EfficientNetFeatures'] class EfficientNet(nn.Module): """ EfficientNet A flexible and performant PyTorch implementation of efficient network architectures, including: * EfficientNet-V2 Small, Medium, Large, XL & B0-B3 * EfficientNet B0-B8, L2 * EfficientNet-EdgeTPU * EfficientNet-CondConv * MixNet S, M, L, XL * MnasNet A1, B1, and small * MobileNet-V2 * FBNet C * Single-Path NAS Pixel1 * TinyNet """ def __init__( self, block_args: BlockArgs, num_classes: int = 1000, num_features: int = 1280, in_chans: int = 3, stem_size: int = 32, stem_kernel_size: int = 3, fix_stem: bool = False, output_stride: int = 32, pad_type: str = '', act_layer: Optional[LayerType] = None, norm_layer: Optional[LayerType] = None, aa_layer: Optional[LayerType] = None, se_layer: Optional[LayerType] = None, round_chs_fn: Callable = round_channels, drop_rate: float = 0., drop_path_rate: float = 0., global_pool: str = 'avg' ): super(EfficientNet, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, stem_kernel_size, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = builder.features self.stage_ends = [f['stage'] for f in self.feature_info] head_chs = builder.in_chs # Head + Pooling if num_features > 0: self.conv_head = create_conv2d(head_chs, num_features, 1, padding=pad_type) self.bn2 = norm_act_layer(num_features, inplace=True) self.num_features = self.head_hidden_size = num_features else: self.conv_head = nn.Identity() self.bn2 = nn.Identity() self.num_features = self.head_hidden_size = head_chs self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) efficientnet_init_weights(self) def as_sequential(self): layers = [self.conv_stem, self.bn1] layers.extend(self.blocks) layers.extend([self.conv_head, self.bn2, self.global_pool]) layers.extend([nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^conv_stem|bn1', blocks=[ (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), (r'conv_head|bn2', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, extra_blocks: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features extra_blocks: Include outputs of all blocks and head conv in output, does not align with feature_info Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] if extra_blocks: take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices) else: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] # forward pass feat_idx = 0 # stem is index 0 x = self.conv_stem(x) x = self.bn1(x) if feat_idx in take_indices: intermediates.append(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index] for blk in blocks: feat_idx += 1 x = blk(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates if feat_idx == self.stage_ends[-1]: x = self.conv_head(x) x = self.bn2(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, extra_blocks: bool = False, ): """ Prune layers not required for specified intermediates. """ if extra_blocks: take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices) else: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] self.blocks = self.blocks[:max_index] # truncate blocks w/ stem as idx 0 if prune_norm or max_index < len(self.blocks): self.conv_head = nn.Identity() self.bn2 = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) x = self.conv_head(x) x = self.bn2(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.classifier(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x class EfficientNetFeatures(nn.Module): """ EfficientNet Feature Extractor A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation and object detection models. """ def __init__( self, block_args: BlockArgs, out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4), feature_location: str = 'bottleneck', in_chans: int = 3, stem_size: int = 32, stem_kernel_size: int = 3, fix_stem: bool = False, output_stride: int = 32, pad_type: str = '', act_layer: Optional[LayerType] = None, norm_layer: Optional[LayerType] = None, aa_layer: Optional[LayerType] = None, se_layer: Optional[LayerType] = None, round_chs_fn: Callable = round_channels, drop_rate: float = 0., drop_path_rate: float = 0., ): super(EfficientNetFeatures, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, stem_kernel_size, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, feature_location=feature_location, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = FeatureInfo(builder.features, out_indices) self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()} efficientnet_init_weights(self) # Register feature extraction hooks with FeatureHooks helper self.feature_hooks = None if feature_location != 'bottleneck': hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) self.feature_hooks = FeatureHooks(hooks, self.named_modules()) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x) -> List[torch.Tensor]: x = self.conv_stem(x) x = self.bn1(x) if self.feature_hooks is None: features = [] if 0 in self._stage_out_idx: features.append(x) # add stem out for i, b in enumerate(self.blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(b, x) else: x = b(x) if i + 1 in self._stage_out_idx: features.append(x) return features else: self.blocks(x) out = self.feature_hooks.get_output(x.device) return list(out.values()) def _create_effnet(variant, pretrained=False, **kwargs): features_mode = '' model_cls = EfficientNet kwargs_filter = None if kwargs.pop('features_only', False): if 'feature_cfg' in kwargs or 'feature_cls' in kwargs: features_mode = 'cfg' else: kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') model_cls = EfficientNetFeatures features_mode = 'cls' model = build_model_with_cfg( model_cls, variant, pretrained, features_only=features_mode == 'cfg', pretrained_strict=features_mode != 'cls', kwargs_filter=kwargs_filter, **kwargs, ) if features_mode == 'cls': model.pretrained_cfg = model.default_cfg = pretrained_cfg_for_features(model.pretrained_cfg) return model def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-a1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16_noskip'], # stage 1, 112x112 in ['ir_r2_k3_s2_e6_c24'], # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40_se0.25'], # stage 3, 28x28 in ['ir_r4_k3_s2_e6_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c112_se0.25'], # stage 5, 14x14in ['ir_r3_k5_s2_e6_c160_se0.25'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-b1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_c16_noskip'], # stage 1, 112x112 in ['ir_r3_k3_s2_e3_c24'], # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40'], # stage 3, 28x28 in ['ir_r3_k5_s2_e6_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c96'], # stage 5, 14x14in ['ir_r4_k5_s2_e6_c192'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320_noskip'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-b1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ ['ds_r1_k3_s1_c8'], ['ir_r1_k3_s2_e3_c16'], ['ir_r2_k3_s2_e6_c16'], ['ir_r4_k5_s2_e6_c32_se0.25'], ['ir_r3_k3_s1_e6_c32_se0.25'], ['ir_r3_k5_s2_e6_c88_se0.25'], ['ir_r1_k3_s1_e6_c144'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=8, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v1( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, fix_stem_head=False, head_conv=False, pretrained=False, **kwargs ): """ Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py Paper: https://arxiv.org/abs/1801.04381 """ arch_def = [ ['dsa_r1_k3_s1_c64'], ['dsa_r2_k3_s2_c128'], ['dsa_r2_k3_s2_c256'], ['dsa_r6_k3_s2_c512'], ['dsa_r2_k3_s2_c1024'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) head_features = (1024 if fix_stem_head else max(1024, round_chs_fn(1024))) if head_conv else 0 model_kwargs = dict( block_args=decode_arch_def( arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head, group_size=group_size, ), num_features=head_features, stem_size=32, fix_stem=fix_stem_head, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu6'), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v2( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, fix_stem_head=False, pretrained=False, **kwargs ): """ Generate MobileNet-V2 network Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py Paper: https://arxiv.org/abs/1801.04381 """ arch_def = [ ['ds_r1_k3_s1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k3_s2_e6_c32'], ['ir_r4_k3_s2_e6_c64'], ['ir_r3_k3_s1_e6_c96'], ['ir_r3_k3_s2_e6_c160'], ['ir_r1_k3_s1_e6_c320'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def( arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head, group_size=group_size, ), num_features=1280 if fix_stem_head else max(1280, round_chs_fn(1280)), stem_size=32, fix_stem=fix_stem_head, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu6'), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """ FBNet-C Paper: https://arxiv.org/abs/1812.03443 Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, it was used to confirm some building block details """ arch_def = [ ['ir_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], ['ir_r4_k5_s2_e6_c184'], ['ir_r1_k3_s1_e6_c352'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=16, num_features=1984, # paper suggests this, but is not 100% clear round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates the Single-Path NAS model from search targeted for Pixel1 phone. Paper: https://arxiv.org/abs/1904.02877 Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_c16_noskip'], # stage 1, 112x112 in ['ir_r3_k3_s2_e3_c24'], # stage 2, 56x56 in ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], # stage 3, 28x28 in ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], # stage 4, 14x14in ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], # stage 5, 14x14in ['ir_r4_k5_s2_e6_c192'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320_noskip'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet( variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, group_size=None, pretrained=False, **kwargs ): """Creates an EfficientNet model. Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py Paper: https://arxiv.org/abs/1905.11946 EfficientNet params name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) 'efficientnet-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5), Args: channel_multiplier: multiplier to number of channels per layer depth_multiplier: multiplier to number of repeats per stage """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_edge( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs ): """ Creates an EfficientNet-EdgeTPU model Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu """ arch_def = [ # NOTE `fc` is present to override a mismatch between stem channels and in chs not # present in other models ['er_r1_k3_s1_e4_c24_fc24_noskip'], ['er_r2_k3_s2_e8_c32'], ['er_r4_k3_s2_e8_c48'], ['ir_r5_k5_s2_e8_c96'], ['ir_r4_k5_s1_e8_c144'], ['ir_r2_k5_s2_e8_c192'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_condconv( variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs ): """Creates an EfficientNet-CondConv model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], ] # NOTE unlike official impl, this one uses `cc<x>` option where x is the base number of experts for each stage and # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'swish'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """Creates an EfficientNet-Lite model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite Paper: https://arxiv.org/abs/1905.11946 EfficientNet params name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), Args: channel_multiplier: multiplier to number of channels per layer depth_multiplier: multiplier to number of repeats per stage """ arch_def = [ ['ds_r1_k3_s1_e1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r2_k5_s2_e6_c40'], ['ir_r3_k3_s2_e6_c80'], ['ir_r3_k5_s1_e6_c112'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), num_features=1280, stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), act_layer=resolve_act_layer(kwargs, 'relu6'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_base( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs ): """ Creates an EfficientNet-V2 base model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r1_k3_s1_e1_c16_skip'], ['er_r2_k3_s2_e4_c32'], ['er_r2_k3_s2_e4_c48'], ['ir_r3_k3_s2_e4_c96_se0.25'], ['ir_r5_k3_s1_e6_c112_se0.25'], ['ir_r8_k3_s2_e6_c192_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_s( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, rw=False, pretrained=False, **kwargs ): """ Creates an EfficientNet-V2 Small model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model, before ref the impl was released. """ arch_def = [ ['cn_r2_k3_s1_e1_c24_skip'], ['er_r4_k3_s2_e4_c48'], ['er_r4_k3_s2_e4_c64'], ['ir_r6_k3_s2_e4_c128_se0.25'], ['ir_r9_k3_s1_e6_c160_se0.25'], ['ir_r15_k3_s2_e6_c256_se0.25'], ] num_features = 1280 if rw: # my original variant, based on paper figure differs from the official release arch_def[0] = ['er_r2_k3_s1_e1_c24'] arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] num_features = 1792 round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(num_features), stem_size=24, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_m( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs ): """ Creates an EfficientNet-V2 Medium model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r3_k3_s1_e1_c24_skip'], ['er_r5_k3_s2_e4_c48'], ['er_r5_k3_s2_e4_c80'], ['ir_r7_k3_s2_e4_c160_se0.25'], ['ir_r14_k3_s1_e6_c176_se0.25'], ['ir_r18_k3_s2_e6_c304_se0.25'], ['ir_r5_k3_s1_e6_c512_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=1280, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_l( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs ): """ Creates an EfficientNet-V2 Large model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r4_k3_s1_e1_c32_skip'], ['er_r7_k3_s2_e4_c64'], ['er_r7_k3_s2_e4_c96'], ['ir_r10_k3_s2_e4_c192_se0.25'], ['ir_r19_k3_s1_e6_c224_se0.25'], ['ir_r25_k3_s2_e6_c384_se0.25'], ['ir_r7_k3_s1_e6_c640_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=1280, stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_xl( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs ): """ Creates an EfficientNet-V2 Xtra-Large model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r4_k3_s1_e1_c32_skip'], ['er_r8_k3_s2_e4_c64'], ['er_r8_k3_s2_e4_c96'], ['ir_r16_k3_s2_e4_c192_se0.25'], ['ir_r24_k3_s1_e6_c256_se0.25'], ['ir_r32_k3_s2_e6_c512_se0.25'], ['ir_r8_k3_s1_e6_c640_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=1280, stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_x( variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, group_size=None, version=1, pretrained=False, **kwargs ): """Creates an EfficientNet model. Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py Paper: https://arxiv.org/abs/1905.11946 EfficientNet params name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) 'efficientnet-x-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-x-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-x-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-x-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-x-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-x-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-x-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-x-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-x-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5), Args: channel_multiplier: multiplier to number of channels per layer depth_multiplier: multiplier to number of repeats per stage """ """ if version == 1: blocks_args = [ 'r1_k3_s11_e1_i32_o16_se0.25_d1_a0', 'r2_k3_s22_e6_i16_o24_se0.25_f1_d2_a1', 'r2_k5_s22_e6_i24_o40_se0.25_f1_a1', 'r3_k3_s22_e6_i40_o80_se0.25_a0', 'r3_k5_s11_e6_i80_o112_se0.25_a0', 'r4_k5_s22_e6_i112_o192_se0.25_a0', 'r1_k3_s11_e6_i192_o320_se0.25_a0', ] elif version == 2: blocks_args = [ 'r1_k3_s11_e1_i32_o16_se0.25_d1_a0', 'r2_k3_s22_e4_i16_o24_se0.25_f1_d2_a1', 'r2_k5_s22_e4_i24_o40_se0.25_f1_a1', 'r3_k3_s22_e4_i40_o80_se0.25_a0', 'r3_k5_s11_e6_i80_o112_se0.25_a0', 'r4_k5_s22_e6_i112_o192_se0.25_a0', 'r1_k3_s11_e6_i192_o320_se0.25_a0', ] """ if version == 1: arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25_d1'], ['er_r2_k3_s2_e6_c24_se0.25_nre'], ['er_r2_k5_s2_e6_c40_se0.25_nre'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] else: arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25_d1'], ['er_r2_k3_s2_e4_c24_se0.25_nre'], ['er_r2_k5_s2_e4_c40_se0.25_nre'], ['ir_r3_k3_s2_e4_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, act_layer=resolve_act_layer(kwargs, 'silu'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a MixNet Small model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet Paper: https://arxiv.org/abs/1907.09595 """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16'], # relu # stage 1, 112x112 in ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu # stage 2, 56x56 in ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish # stage 3, 28x28 in ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish # stage 4, 14x14in ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish # stage 5, 14x14in ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def), num_features=1536, stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """Creates a MixNet Medium-Large model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet Paper: https://arxiv.org/abs/1907.09595 """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c24'], # relu # stage 1, 112x112 in ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu # stage 2, 56x56 in ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish # stage 3, 28x28 in ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish # stage 4, 14x14in ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish # stage 5, 14x14in ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=1536, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_tinynet(variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """Creates a TinyNet model. """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=max(1280, round_channels(1280, model_width, 8, None)), stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=model_width), act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_edgetpu(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Based on definitions in: https://github.com/tensorflow/models/tree/d2427a562f401c9af118e47af2f030a0a5599f55/official/projects/edgetpu/vision """ if 'edgetpu_v2' in variant: stem_size = 64 stem_kernel_size = 5 group_size = 64 num_features = 1280 act_layer = resolve_act_layer(kwargs, 'relu') def _arch_def(chs: List[int], group_size: int): return [ # stage 0, 112x112 in [f'cn_r1_k1_s1_c{chs[0]}'], # NOTE with expansion==1, official impl block ends just 1x1 pwl # stage 1, 112x112 in [f'er_r1_k3_s2_e8_c{chs[1]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[1]}'], # stage 2, 56x56 in [ f'er_r1_k3_s2_e8_c{chs[2]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}', f'er_r1_k3_s1_e4_c{chs[2]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}', ], # stage 3, 28x28 in [f'er_r1_k3_s2_e8_c{chs[3]}', f'ir_r3_k3_s1_e4_c{chs[3]}'], # stage 4, 14x14in [f'ir_r1_k3_s1_e8_c{chs[4]}', f'ir_r3_k3_s1_e4_c{chs[4]}'], # stage 5, 14x14in [f'ir_r1_k3_s2_e8_c{chs[5]}', f'ir_r3_k3_s1_e4_c{chs[5]}'], # stage 6, 7x7 in [f'ir_r1_k3_s1_e8_c{chs[6]}'], ] if 'edgetpu_v2_xs' in variant: stem_size = 32 stem_kernel_size = 3 channels = [16, 32, 48, 96, 144, 160, 192] elif 'edgetpu_v2_s' in variant: channels = [24, 48, 64, 128, 160, 192, 256] elif 'edgetpu_v2_m' in variant: channels = [32, 64, 80, 160, 192, 240, 320] num_features = 1344 elif 'edgetpu_v2_l' in variant: stem_kernel_size = 7 group_size = 128 channels = [32, 64, 96, 192, 240, 256, 384] num_features = 1408 else: assert False arch_def = _arch_def(channels, group_size) else: # v1 stem_size = 32 stem_kernel_size = 3 num_features = 1280 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [ # stage 0, 112x112 in ['cn_r1_k1_s1_c16'], # stage 1, 112x112 in ['er_r1_k3_s2_e8_c32', 'er_r3_k3_s1_e4_c32'], # stage 2, 56x56 in ['er_r1_k3_s2_e8_c48', 'er_r3_k3_s1_e4_c48'], # stage 3, 28x28 in ['ir_r1_k3_s2_e8_c96', 'ir_r3_k3_s1_e4_c96'], # stage 4, 14x14in ['ir_r1_k3_s1_e8_c96_noskip', 'ir_r3_k3_s1_e4_c96'], # stage 5, 14x14in ['ir_r1_k5_s2_e8_c160', 'ir_r3_k5_s1_e4_c160'], # stage 6, 7x7 in ['ir_r1_k3_s1_e8_c192'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=num_features, stem_size=stem_size, stem_kernel_size=stem_kernel_size, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_test_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Minimal test EfficientNet generator. """ arch_def = [ ['cn_r1_k3_s1_e1_c16_skip'], ['er_r1_k3_s2_e4_c24'], ['er_r1_k3_s2_e4_c32'], ['ir_r1_k3_s2_e4_c48_se0.25'], ['ir_r1_k3_s2_e4_c64_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_chs_fn(256), stem_size=24, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'mnasnet_050.untrained': _cfg(), 'mnasnet_075.untrained': _cfg(), 'mnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth', hf_hub_id='timm/'), 'mnasnet_140.untrained': _cfg(), 'semnasnet_050.untrained': _cfg(), 'semnasnet_075.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/semnasnet_075-18710866.pth', hf_hub_id='timm/'), 'semnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth', hf_hub_id='timm/'), 'semnasnet_140.untrained': _cfg(), 'mnasnet_small.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth', hf_hub_id='timm/'), 'mobilenetv1_100.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, test_input_size=(3, 256, 256), test_crop_pct=0.95, ), 'mobilenetv1_100h.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, test_input_size=(3, 256, 256), test_crop_pct=0.95, ), 'mobilenetv1_125.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=1.0, ), 'mobilenetv2_035.untrained': _cfg(), 'mobilenetv2_050.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth', hf_hub_id='timm/', interpolation='bicubic', ), 'mobilenetv2_075.untrained': _cfg(), 'mobilenetv2_100.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth', hf_hub_id='timm/'), 'mobilenetv2_110d.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth', hf_hub_id='timm/'), 'mobilenetv2_120d.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth', hf_hub_id='timm/'), 'mobilenetv2_140.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth', hf_hub_id='timm/'), 'fbnetc_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', hf_hub_id='timm/', interpolation='bilinear'), 'spnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', hf_hub_id='timm/', interpolation='bilinear'), # NOTE experimenting with alternate attention 'efficientnet_b0.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth', hf_hub_id='timm/'), 'efficientnet_b0.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=1.0), 'efficientnet_b1.ra4_e3600_r240_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), crop_pct=0.9, pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'efficientnet_b1.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), test_crop_pct=1.0), 'efficientnet_b2.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'efficientnet_b3.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), test_crop_pct=1.0), 'efficientnet_b4.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', hf_hub_id='timm/', input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), test_crop_pct=1.0), 'efficientnet_b5.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, crop_mode='squash'), 'efficientnet_b5.sw_in12k': _cfg( hf_hub_id='timm/', input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.95, num_classes=11821), 'efficientnet_b6.untrained': _cfg( url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'efficientnet_b7.untrained': _cfg( url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'efficientnet_b8.untrained': _cfg( url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'efficientnet_l2.untrained': _cfg( url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), # FIXME experimental 'efficientnet_b0_gn.untrained': _cfg(), 'efficientnet_b0_g8_gn.untrained': _cfg(), 'efficientnet_b0_g16_evos.untrained': _cfg(), 'efficientnet_b3_gn.untrained': _cfg( input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_b3_g8_gn.untrained': _cfg( input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_blur_b0.untrained': _cfg(), 'efficientnet_es.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth', hf_hub_id='timm/'), 'efficientnet_em.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_el.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el-3b455510.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_es_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_pruned75-1b7248cf.pth', hf_hub_id='timm/'), 'efficientnet_el_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el_pruned70-ef2a2ccf.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_cc_b0_4e.untrained': _cfg(), 'efficientnet_cc_b0_8e.untrained': _cfg(), 'efficientnet_cc_b1_8e.untrained': _cfg(input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_lite0.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth', hf_hub_id='timm/'), 'efficientnet_lite1.untrained': _cfg( input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_lite2.untrained': _cfg( input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'efficientnet_lite3.untrained': _cfg( input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_lite4.untrained': _cfg( input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'efficientnet_b1_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb1_pruned-bea43a3a.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnet_b2_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb2_pruned-08c1b27c.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnet_b3_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb3_pruned-59ecf72d.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnetv2_rw_t.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', hf_hub_id='timm/', input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), 'gc_efficientnetv2_rw_t.agc_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', hf_hub_id='timm/', input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), 'efficientnetv2_rw_s.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', hf_hub_id='timm/', input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), 'efficientnetv2_rw_m.agc_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', hf_hub_id='timm/', input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), 'efficientnetv2_s.untrained': _cfg( input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), 'efficientnetv2_m.untrained': _cfg( input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), 'efficientnetv2_l.untrained': _cfg( input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), 'efficientnetv2_xl.untrained': _cfg( input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), 'tf_efficientnet_b0.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', hf_hub_id='timm/', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_l2.ns_jft_in1k_475': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', hf_hub_id='timm/', input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), 'tf_efficientnet_l2.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', hf_hub_id='timm/', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), 'tf_efficientnet_b0.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), 'tf_efficientnet_b1.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b8.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'tf_efficientnet_b5.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b7.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b8.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', hf_hub_id='timm/', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'tf_efficientnet_b0.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.aa_in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_aa-99018a74.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', hf_hub_id='timm/', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.aa_in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_aa-076e3472.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b0.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0-0af12548.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1-5c1377c4.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2-e393ef04.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3-e3bd6955.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4-74ee3bed.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5-c6949ce9.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_es.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), ), 'tf_efficientnet_em.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_el.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_cc_b0_4e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_efficientnet_cc_b0_8e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_efficientnet_cc_b1_8e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_lite0.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite1.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite2.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite3.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), 'tf_efficientnet_lite4.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), 'tf_efficientnetv2_s.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_xl.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_s.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_s.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_xl.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_b0.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', hf_hub_id='timm/', input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), 'tf_efficientnetv2_b1.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', hf_hub_id='timm/', input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), 'tf_efficientnetv2_b2.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', hf_hub_id='timm/', input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890), 'tf_efficientnetv2_b3.in21k_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.9, crop_mode='squash'), 'tf_efficientnetv2_b3.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', hf_hub_id='timm/', input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), 'tf_efficientnetv2_b3.in21k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=21843, input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), 'mixnet_s.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth', hf_hub_id='timm/'), 'mixnet_m.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth', hf_hub_id='timm/'), 'mixnet_l.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth', hf_hub_id='timm/'), 'mixnet_xl.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth', hf_hub_id='timm/'), 'mixnet_xxl.untrained': _cfg(), 'tf_mixnet_s.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth', hf_hub_id='timm/'), 'tf_mixnet_m.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth', hf_hub_id='timm/'), 'tf_mixnet_l.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth', hf_hub_id='timm/'), "tinynet_a.in1k": _cfg( input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth', hf_hub_id='timm/'), "tinynet_b.in1k": _cfg( input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth', hf_hub_id='timm/'), "tinynet_c.in1k": _cfg( input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth', hf_hub_id='timm/'), "tinynet_d.in1k": _cfg( input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth', hf_hub_id='timm/'), "tinynet_e.in1k": _cfg( input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth', hf_hub_id='timm/'), 'mobilenet_edgetpu_100.untrained': _cfg( # hf_hub_id='timm/', input_size=(3, 224, 224), crop_pct=0.9), 'mobilenet_edgetpu_v2_xs.untrained': _cfg( # hf_hub_id='timm/', input_size=(3, 224, 224), crop_pct=0.9), 'mobilenet_edgetpu_v2_s.untrained': _cfg( #hf_hub_id='timm/', input_size=(3, 224, 224), crop_pct=0.9), 'mobilenet_edgetpu_v2_m.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=0.95, ), 'mobilenet_edgetpu_v2_l.untrained': _cfg( #hf_hub_id='timm/', input_size=(3, 224, 224), crop_pct=0.9), "test_efficientnet.r160_in1k": _cfg( hf_hub_id='timm/', input_size=(3, 160, 160), pool_size=(5, 5)), }) @register_model def mnasnet_050(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 0.5. """ model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_075(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 0.75. """ model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 1.0. """ model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_140(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 1.4 """ model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_050(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_075(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_140(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_small(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet Small, depth multiplier of 1.0. """ model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv1_100(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V1 """ model = _gen_mobilenet_v1('mobilenetv1_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv1_100h(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V1 """ model = _gen_mobilenet_v1('mobilenetv1_100h', 1.0, head_conv=True, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv1_125(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V1 """ model = _gen_mobilenet_v1('mobilenetv1_125', 1.25, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_035(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.35 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_035', 0.35, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_050(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.5 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_075(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.75 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_100(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.0 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_140(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.4 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_110d(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" model = _gen_mobilenet_v2( 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_120d(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ model = _gen_mobilenet_v2( 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) return model @register_model def fbnetc_100(pretrained=False, **kwargs) -> EfficientNet: """ FBNet-C """ if pretrained: # pretrained model trained with non-default BN epsilon kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def spnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ Single-Path NAS Pixel1""" model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B6 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B7 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B8 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-L2.""" # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model # FIXME experimental group cong / GroupNorm / EvoNorm experiments @register_model def efficientnet_b0_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 + GroupNorm""" model = _gen_efficientnet( 'efficientnet_b0_gn', norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_g8_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 w/ group conv + GroupNorm""" model = _gen_efficientnet( 'efficientnet_b0_g8_gn', group_size=8, norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_g16_evos(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 w/ group 16 conv + EvoNorm""" model = _gen_efficientnet( 'efficientnet_b0_g16_evos', group_size=16, channel_divisor=16, pretrained=pretrained, **kwargs) #norm_layer=partial(EvoNorm2dS0, group_size=16), return model @register_model def efficientnet_b3_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 w/ GroupNorm """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3_gn', channel_multiplier=1.2, depth_multiplier=1.4, channel_divisor=16, norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_g8_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 w/ grouped conv + BN""" # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3_g8_gn', channel_multiplier=1.2, depth_multiplier=1.4, group_size=8, channel_divisor=16, norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_blur_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 w/ BlurPool """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_blur_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, aa_layer='blurpc', **kwargs ) return model @register_model def efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small. """ model = _gen_efficientnet_edge( 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_es_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" model = _gen_efficientnet_edge( 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Medium. """ model = _gen_efficientnet_edge( 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large. """ model = _gen_efficientnet_edge( 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_el_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" model = _gen_efficientnet_edge( 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B1 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b1_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') variant = 'efficientnet_b1_pruned' model = _gen_efficientnet( variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """ model = _gen_efficientnetv2_s( 'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) return model @register_model def gc_efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """ model = _gen_efficientnetv2_s( 'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, se_layer='gc', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small (RW variant). NOTE: This is my initial (pre official code release) w/ some differences. See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding """ model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium (RW variant). """ model = _gen_efficientnetv2_s( 'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small. """ model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium. """ model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Large. """ model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Xtra-Large. """ model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B4. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B6. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B7. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B8. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge( 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge( 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge( 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Large. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Xtra-Large. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B0. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B1. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B2. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B3. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_x_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet_x( 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_x_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5 """ model = _gen_efficientnet_x( 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_h_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5 """ model = _gen_efficientnet_x( 'efficientnet_b5', channel_multiplier=1.92, depth_multiplier=2.2, version=2, pretrained=pretrained, **kwargs) return model @register_model def mixnet_s(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Small model. """ model = _gen_mixnet_s( 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def mixnet_m(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Medium model. """ model = _gen_mixnet_m( 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def mixnet_l(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Large model. """ model = _gen_mixnet_m( 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def mixnet_xl(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Extra-Large model. Not a paper spec, experimental def by RW w/ depth scaling. """ model = _gen_mixnet_m( 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def mixnet_xxl(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Double Extra Large model. Not a paper spec, experimental def by RW w/ depth scaling. """ model = _gen_mixnet_m( 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_s(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Small model. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_s( 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_m(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Medium model. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_m( 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_l(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Large model. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_m( 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def tinynet_a(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs) return model @register_model def tinynet_b(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs) return model @register_model def tinynet_c(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs) return model @register_model def tinynet_d(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs) return model @register_model def tinynet_e(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_100(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet-EdgeTPU-v1 100. """ model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_100', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_xs(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet-EdgeTPU-v2 Extra Small. """ model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_xs', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_s(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet-EdgeTPU-v2 Small. """ model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_s', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_m(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet-EdgeTPU-v2 Medium. """ model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_m', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_l(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet-EdgeTPU-v2 Large. """ model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_l', pretrained=pretrained, **kwargs) return model @register_model def test_efficientnet(pretrained=False, **kwargs) -> EfficientNet: model = _gen_test_efficientnet('test_efficientnet', pretrained=pretrained, **kwargs) return model register_model_deprecations(__name__, { 'tf_efficientnet_b0_ap': 'tf_efficientnet_b0.ap_in1k', 'tf_efficientnet_b1_ap': 'tf_efficientnet_b1.ap_in1k', 'tf_efficientnet_b2_ap': 'tf_efficientnet_b2.ap_in1k', 'tf_efficientnet_b3_ap': 'tf_efficientnet_b3.ap_in1k', 'tf_efficientnet_b4_ap': 'tf_efficientnet_b4.ap_in1k', 'tf_efficientnet_b5_ap': 'tf_efficientnet_b5.ap_in1k', 'tf_efficientnet_b6_ap': 'tf_efficientnet_b6.ap_in1k', 'tf_efficientnet_b7_ap': 'tf_efficientnet_b7.ap_in1k', 'tf_efficientnet_b8_ap': 'tf_efficientnet_b8.ap_in1k', 'tf_efficientnet_b0_ns': 'tf_efficientnet_b0.ns_jft_in1k', 'tf_efficientnet_b1_ns': 'tf_efficientnet_b1.ns_jft_in1k', 'tf_efficientnet_b2_ns': 'tf_efficientnet_b2.ns_jft_in1k', 'tf_efficientnet_b3_ns': 'tf_efficientnet_b3.ns_jft_in1k', 'tf_efficientnet_b4_ns': 'tf_efficientnet_b4.ns_jft_in1k', 'tf_efficientnet_b5_ns': 'tf_efficientnet_b5.ns_jft_in1k', 'tf_efficientnet_b6_ns': 'tf_efficientnet_b6.ns_jft_in1k', 'tf_efficientnet_b7_ns': 'tf_efficientnet_b7.ns_jft_in1k', 'tf_efficientnet_l2_ns_475': 'tf_efficientnet_l2.ns_jft_in1k_475', 'tf_efficientnet_l2_ns': 'tf_efficientnet_l2.ns_jft_in1k', 'tf_efficientnetv2_s_in21ft1k': 'tf_efficientnetv2_s.in21k_ft_in1k', 'tf_efficientnetv2_m_in21ft1k': 'tf_efficientnetv2_m.in21k_ft_in1k', 'tf_efficientnetv2_l_in21ft1k': 'tf_efficientnetv2_l.in21k_ft_in1k', 'tf_efficientnetv2_xl_in21ft1k': 'tf_efficientnetv2_xl.in21k_ft_in1k', 'tf_efficientnetv2_s_in21k': 'tf_efficientnetv2_s.in21k', 'tf_efficientnetv2_m_in21k': 'tf_efficientnetv2_m.in21k', 'tf_efficientnetv2_l_in21k': 'tf_efficientnetv2_l.in21k', 'tf_efficientnetv2_xl_in21k': 'tf_efficientnetv2_xl.in21k', 'efficientnet_b2a': 'efficientnet_b2', 'efficientnet_b3a': 'efficientnet_b3', 'mnasnet_a1': 'semnasnet_100', 'mnasnet_b1': 'mnasnet_100', })
pytorch-image-models/timm/models/efficientnet.py/0
{ "file_path": "pytorch-image-models/timm/models/efficientnet.py", "repo_id": "pytorch-image-models", "token_count": 57214 }
226
""" HRNet Copied from https://github.com/HRNet/HRNet-Image-Classification Original header: Copyright (c) Microsoft Licensed under the MIT License. Written by Bin Xiao (Bin.Xiao@microsoft.com) Modified by Ke Sun (sunk@mail.ustc.edu.cn) """ import logging from typing import List import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._features import FeatureInfo from ._registry import register_model, generate_default_cfgs from .resnet import BasicBlock, Bottleneck # leveraging ResNet block_types w/ additional features like SE __all__ = ['HighResolutionNet', 'HighResolutionNetFeatures'] # model_registry will add each entrypoint fn to this _BN_MOMENTUM = 0.1 _logger = logging.getLogger(__name__) cfg_cls = dict( hrnet_w18_small=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(1,), num_channels=(32,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(2, 2), num_channels=(16, 32), fuse_method='SUM' ), stage3=dict( num_modules=1, num_branches=3, block_type='BASIC', num_blocks=(2, 2, 2), num_channels=(16, 32, 64), fuse_method='SUM' ), stage4=dict( num_modules=1, num_branches=4, block_type='BASIC', num_blocks=(2, 2, 2, 2), num_channels=(16, 32, 64, 128), fuse_method='SUM', ), ), hrnet_w18_small_v2=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(2,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(2, 2), num_channels=(18, 36), fuse_method='SUM' ), stage3=dict( num_modules=3, num_branches=3, block_type='BASIC', num_blocks=(2, 2, 2), num_channels=(18, 36, 72), fuse_method='SUM' ), stage4=dict( num_modules=2, num_branches=4, block_type='BASIC', num_blocks=(2, 2, 2, 2), num_channels=(18, 36, 72, 144), fuse_method='SUM', ), ), hrnet_w18=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(18, 36), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(18, 36, 72), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(18, 36, 72, 144), fuse_method='SUM', ), ), hrnet_w30=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(30, 60), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(30, 60, 120), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(30, 60, 120, 240), fuse_method='SUM', ), ), hrnet_w32=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(32, 64), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256), fuse_method='SUM', ), ), hrnet_w40=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(40, 80), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(40, 80, 160), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(40, 80, 160, 320), fuse_method='SUM', ), ), hrnet_w44=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(44, 88), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(44, 88, 176), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(44, 88, 176, 352), fuse_method='SUM', ), ), hrnet_w48=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(48, 96), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(48, 96, 192), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(48, 96, 192, 384), fuse_method='SUM', ), ), hrnet_w64=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(64, 128), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(64, 128, 256), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(64, 128, 256, 512), fuse_method='SUM', ), ) ) class HighResolutionModule(nn.Module): def __init__( self, num_branches, block_types, num_blocks, num_in_chs, num_channels, fuse_method, multi_scale_output=True, ): super(HighResolutionModule, self).__init__() self._check_branches( num_branches, block_types, num_blocks, num_in_chs, num_channels, ) self.num_in_chs = num_in_chs self.fuse_method = fuse_method self.num_branches = num_branches self.multi_scale_output = multi_scale_output self.branches = self._make_branches( num_branches, block_types, num_blocks, num_channels, ) self.fuse_layers = self._make_fuse_layers() self.fuse_act = nn.ReLU(False) def _check_branches(self, num_branches, block_types, num_blocks, num_in_chs, num_channels): error_msg = '' if num_branches != len(num_blocks): error_msg = 'num_branches({}) <> num_blocks({})'.format(num_branches, len(num_blocks)) elif num_branches != len(num_channels): error_msg = 'num_branches({}) <> num_channels({})'.format(num_branches, len(num_channels)) elif num_branches != len(num_in_chs): error_msg = 'num_branches({}) <> num_in_chs({})'.format(num_branches, len(num_in_chs)) if error_msg: _logger.error(error_msg) raise ValueError(error_msg) def _make_one_branch(self, branch_index, block_type, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or self.num_in_chs[branch_index] != num_channels[branch_index] * block_type.expansion: downsample = nn.Sequential( nn.Conv2d( self.num_in_chs[branch_index], num_channels[branch_index] * block_type.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(num_channels[branch_index] * block_type.expansion, momentum=_BN_MOMENTUM), ) layers = [block_type(self.num_in_chs[branch_index], num_channels[branch_index], stride, downsample)] self.num_in_chs[branch_index] = num_channels[branch_index] * block_type.expansion for i in range(1, num_blocks[branch_index]): layers.append(block_type(self.num_in_chs[branch_index], num_channels[branch_index])) return nn.Sequential(*layers) def _make_branches(self, num_branches, block_type, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append(self._make_one_branch(i, block_type, num_blocks, num_channels)) return nn.ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return nn.Identity() num_branches = self.num_branches num_in_chs = self.num_in_chs fuse_layers = [] for i in range(num_branches if self.multi_scale_output else 1): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_in_chs[i], 1, 1, 0, bias=False), nn.BatchNorm2d(num_in_chs[i], momentum=_BN_MOMENTUM), nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) elif j == i: fuse_layer.append(nn.Identity()) else: conv3x3s = [] for k in range(i - j): if k == i - j - 1: num_out_chs_conv3x3 = num_in_chs[i] conv3x3s.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM) )) else: num_out_chs_conv3x3 = num_in_chs[j] conv3x3s.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM), nn.ReLU(False) )) fuse_layer.append(nn.Sequential(*conv3x3s)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def get_num_in_chs(self): return self.num_in_chs def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if self.num_branches == 1: return [self.branches[0](x[0])] for i, branch in enumerate(self.branches): x[i] = branch(x[i]) x_fuse = [] for i, fuse_outer in enumerate(self.fuse_layers): y = None for j, f in enumerate(fuse_outer): if y is None: y = f(x[j]) else: y = y + f(x[j]) x_fuse.append(self.fuse_act(y)) return x_fuse class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (List[torch.Tensor]) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (List[torch.Tensor]) pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x @torch.jit.interface class ModuleInterface(torch.nn.Module): def forward(self, input: torch.Tensor) -> torch.Tensor: # `input` has a same name in Sequential forward pass block_types_dict = { 'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck } class HighResolutionNet(nn.Module): def __init__( self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, head='classification', **kwargs, ): super(HighResolutionNet, self).__init__() self.num_classes = num_classes assert output_stride == 32 # FIXME support dilation cfg.update(**kwargs) stem_width = cfg['stem_width'] self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) self.act2 = nn.ReLU(inplace=True) self.stage1_cfg = cfg['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = block_types_dict[self.stage1_cfg['block_type']] num_blocks = self.stage1_cfg['num_blocks'][0] self.layer1 = self._make_layer(block_type, 64, num_channels, num_blocks) stage1_out_channel = block_type.expansion * num_channels self.stage2_cfg = cfg['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = block_types_dict[self.stage2_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) self.stage3_cfg = cfg['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = block_types_dict[self.stage3_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) self.stage4_cfg = cfg['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = block_types_dict[self.stage4_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) self.head = head self.head_channels = None # set if _make_head called head_conv_bias = cfg.pop('head_conv_bias', True) if head == 'classification': # Classification Head self.num_features = self.head_hidden_size = 2048 self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head( pre_stage_channels, conv_bias=head_conv_bias, ) self.global_pool, self.head_drop, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate, ) else: if head == 'incre': self.num_features = self.head_hidden_size = 2048 self.incre_modules, _, _ = self._make_head(pre_stage_channels, incre_only=True) else: self.num_features = self.head_hidden_size = 256 self.incre_modules = None self.global_pool = nn.Identity() self.head_drop = nn.Identity() self.classifier = nn.Identity() curr_stride = 2 # module names aren't actually valid here, hook or FeatureNet based extraction would not work self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] for i, c in enumerate(self.head_channels if self.head_channels else num_channels): curr_stride *= 2 c = c * 4 if self.head_channels else c # head block_type expansion factor of 4 self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] self.init_weights() def _make_head(self, pre_stage_channels, incre_only=False, conv_bias=True): head_block_type = Bottleneck self.head_channels = [32, 64, 128, 256] # Increasing the #channels on each resolution # from C, 2C, 4C, 8C to 128, 256, 512, 1024 incre_modules = [] for i, channels in enumerate(pre_stage_channels): incre_modules.append(self._make_layer(head_block_type, channels, self.head_channels[i], 1, stride=1)) incre_modules = nn.ModuleList(incre_modules) if incre_only: return incre_modules, None, None # downsampling modules downsamp_modules = [] for i in range(len(pre_stage_channels) - 1): in_channels = self.head_channels[i] * head_block_type.expansion out_channels = self.head_channels[i + 1] * head_block_type.expansion downsamp_module = nn.Sequential( nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=conv_bias), nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True) ) downsamp_modules.append(downsamp_module) downsamp_modules = nn.ModuleList(downsamp_modules) final_layer = nn.Sequential( nn.Conv2d( in_channels=self.head_channels[3] * head_block_type.expansion, out_channels=self.num_features, kernel_size=1, stride=1, padding=0, bias=conv_bias), nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True) ) return incre_modules, downsamp_modules, final_layer def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append(nn.Sequential( nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))) else: transition_layers.append(nn.Identity()) else: conv3x3s = [] for j in range(i + 1 - num_branches_pre): _in_chs = num_channels_pre_layer[-1] _out_chs = num_channels_cur_layer[i] if j == i - num_branches_pre else _in_chs conv3x3s.append(nn.Sequential( nn.Conv2d(_in_chs, _out_chs, 3, 2, 1, bias=False), nn.BatchNorm2d(_out_chs, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv3x3s)) return nn.ModuleList(transition_layers) def _make_layer(self, block_type, inplanes, planes, block_types, stride=1): downsample = None if stride != 1 or inplanes != planes * block_type.expansion: downsample = nn.Sequential( nn.Conv2d(inplanes, planes * block_type.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block_type.expansion, momentum=_BN_MOMENTUM), ) layers = [block_type(inplanes, planes, stride, downsample)] inplanes = planes * block_type.expansion for i in range(1, block_types): layers.append(block_type(inplanes, planes)) return nn.Sequential(*layers) def _make_stage(self, layer_config, num_in_chs, multi_scale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block_type = block_types_dict[layer_config['block_type']] fuse_method = layer_config['fuse_method'] modules = [] for i in range(num_modules): # multi_scale_output is only used last module reset_multi_scale_output = multi_scale_output or i < num_modules - 1 modules.append(HighResolutionModule( num_branches, block_type, num_blocks, num_in_chs, num_channels, fuse_method, reset_multi_scale_output) ) num_in_chs = modules[-1].get_num_in_chs() return SequentialList(*modules), num_in_chs @torch.jit.ignore def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv[12]|bn[12]', block_types=r'^(?:layer|stage|transition)(\d+)' if coarse else [ (r'^layer(\d+)\.(\d+)', None), (r'^stage(\d+)\.(\d+)', None), (r'^transition(\d+)', (99999,)), ], ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "gradient checkpointing not supported" @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classifier def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def stages(self, x) -> List[torch.Tensor]: x = self.layer1(x) xl = [t(x) for i, t in enumerate(self.transition1)] yl = self.stage2(xl) xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)] yl = self.stage3(xl) xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)] yl = self.stage4(xl) return yl def forward_features(self, x): # Stem x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) # Stages yl = self.stages(x) if self.incre_modules is None or self.downsamp_modules is None: return yl y = None for i, incre in enumerate(self.incre_modules): if y is None: y = incre(yl[i]) else: down: ModuleInterface = self.downsamp_modules[i - 1] # needed for torchscript module indexing y = incre(yl[i]) + down.forward(y) y = self.final_layer(y) return y def forward_head(self, x, pre_logits: bool = False): # Classification Head x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classifier(x) def forward(self, x): y = self.forward_features(x) x = self.forward_head(y) return x class HighResolutionNetFeatures(HighResolutionNet): """HighResolutionNet feature extraction The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so. It would be more complicated to use the FeatureNet helpers. The `feature_location=incre` allows grabbing increased channel count features using part of the classification head. If `feature_location=''` the default HRNet features are returned. First stem conv is used for stride 2 features. """ def __init__( self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, feature_location='incre', out_indices=(0, 1, 2, 3, 4), **kwargs, ): assert feature_location in ('incre', '') super(HighResolutionNetFeatures, self).__init__( cfg, in_chans=in_chans, num_classes=num_classes, output_stride=output_stride, global_pool=global_pool, drop_rate=drop_rate, head=feature_location, **kwargs, ) self.feature_info = FeatureInfo(self.feature_info, out_indices) self._out_idx = {f['index'] for f in self.feature_info.get_dicts()} def forward_features(self, x): assert False, 'Not supported' def forward(self, x) -> List[torch.Tensor]: out = [] x = self.conv1(x) x = self.bn1(x) x = self.act1(x) if 0 in self._out_idx: out.append(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) x = self.stages(x) if self.incre_modules is not None: x = [incre(f) for f, incre in zip(x, self.incre_modules)] for i, f in enumerate(x): if i + 1 in self._out_idx: out.append(f) return out def _create_hrnet(variant, pretrained=False, cfg_variant=None, **model_kwargs): model_cls = HighResolutionNet features_only = False kwargs_filter = None if model_kwargs.pop('features_only', False): model_cls = HighResolutionNetFeatures kwargs_filter = ('num_classes', 'global_pool') features_only = True cfg_variant = cfg_variant or variant pretrained_strict = model_kwargs.pop( 'pretrained_strict', not features_only and model_kwargs.get('head', 'classification') == 'classification' ) model = build_model_with_cfg( model_cls, variant, pretrained, model_cfg=cfg_cls[cfg_variant], pretrained_strict=pretrained_strict, kwargs_filter=kwargs_filter, **model_kwargs, ) if features_only: model.pretrained_cfg = pretrained_cfg_for_features(model.default_cfg) model.default_cfg = model.pretrained_cfg # backwards compat return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'hrnet_w18_small.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'hrnet_w18_small.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18_small_v2.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'hrnet_w18_small_v2.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18.ms_aug_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, ), 'hrnet_w18.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w30.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w32.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w40.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w44.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w48.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w64.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18_ssld.paddle_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288) ), 'hrnet_w48_ssld.paddle_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288) ), }) @register_model def hrnet_w18_small(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) @register_model def hrnet_w18_small_v2(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) @register_model def hrnet_w18(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18', pretrained, **kwargs) @register_model def hrnet_w30(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w30', pretrained, **kwargs) @register_model def hrnet_w32(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w32', pretrained, **kwargs) @register_model def hrnet_w40(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w40', pretrained, **kwargs) @register_model def hrnet_w44(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w44', pretrained, **kwargs) @register_model def hrnet_w48(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w48', pretrained, **kwargs) @register_model def hrnet_w64(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w64', pretrained, **kwargs) @register_model def hrnet_w18_ssld(pretrained=False, **kwargs) -> HighResolutionNet: kwargs.setdefault('head_conv_bias', False) return _create_hrnet('hrnet_w18_ssld', cfg_variant='hrnet_w18', pretrained=pretrained, **kwargs) @register_model def hrnet_w48_ssld(pretrained=False, **kwargs) -> HighResolutionNet: kwargs.setdefault('head_conv_bias', False) return _create_hrnet('hrnet_w48_ssld', cfg_variant='hrnet_w48', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/hrnet.py/0
{ "file_path": "pytorch-image-models/timm/models/hrnet.py", "repo_id": "pytorch-image-models", "token_count": 17688 }
227
""" Next-ViT As described in https://arxiv.org/abs/2207.05501 Next-ViT model defs and weights adapted from https://github.com/bytedance/Next-ViT, original copyright below """ # Copyright (c) ByteDance Inc. All rights reserved. from functools import partial from typing import Optional import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, ConvMlp, get_norm_layer, get_act_layer, use_fused_attn from timm.layers import ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['NextViT'] def merge_pre_bn(module, pre_bn_1, pre_bn_2=None): """ Merge pre BN to reduce inference runtime. """ weight = module.weight.data if module.bias is None: zeros = torch.zeros(module.out_chs, device=weight.device).type(weight.type()) module.bias = nn.Parameter(zeros) bias = module.bias.data if pre_bn_2 is None: assert pre_bn_1.track_running_stats is True, "Unsupported bn_module.track_running_stats is False" assert pre_bn_1.affine is True, "Unsupported bn_module.affine is False" scale_invstd = pre_bn_1.running_var.add(pre_bn_1.eps).pow(-0.5) extra_weight = scale_invstd * pre_bn_1.weight extra_bias = pre_bn_1.bias - pre_bn_1.weight * pre_bn_1.running_mean * scale_invstd else: assert pre_bn_1.track_running_stats is True, "Unsupported bn_module.track_running_stats is False" assert pre_bn_1.affine is True, "Unsupported bn_module.affine is False" assert pre_bn_2.track_running_stats is True, "Unsupported bn_module.track_running_stats is False" assert pre_bn_2.affine is True, "Unsupported bn_module.affine is False" scale_invstd_1 = pre_bn_1.running_var.add(pre_bn_1.eps).pow(-0.5) scale_invstd_2 = pre_bn_2.running_var.add(pre_bn_2.eps).pow(-0.5) extra_weight = scale_invstd_1 * pre_bn_1.weight * scale_invstd_2 * pre_bn_2.weight extra_bias = ( scale_invstd_2 * pre_bn_2.weight * (pre_bn_1.bias - pre_bn_1.weight * pre_bn_1.running_mean * scale_invstd_1 - pre_bn_2.running_mean) + pre_bn_2.bias ) if isinstance(module, nn.Linear): extra_bias = weight @ extra_bias weight.mul_(extra_weight.view(1, weight.size(1)).expand_as(weight)) elif isinstance(module, nn.Conv2d): assert weight.shape[2] == 1 and weight.shape[3] == 1 weight = weight.reshape(weight.shape[0], weight.shape[1]) extra_bias = weight @ extra_bias weight.mul_(extra_weight.view(1, weight.size(1)).expand_as(weight)) weight = weight.reshape(weight.shape[0], weight.shape[1], 1, 1) bias.add_(extra_bias) module.weight.data = weight module.bias.data = bias class ConvNormAct(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=3, stride=1, groups=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, ): super(ConvNormAct, self).__init__() self.conv = nn.Conv2d( in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=1, groups=groups, bias=False) self.norm = norm_layer(out_chs) self.act = act_layer() def forward(self, x): x = self.conv(x) x = self.norm(x) x = self.act(x) return x def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v class PatchEmbed(nn.Module): def __init__(self, in_chs, out_chs, stride=1, norm_layer = nn.BatchNorm2d, ): super(PatchEmbed, self).__init__() if stride == 2: self.pool = nn.AvgPool2d((2, 2), stride=2, ceil_mode=True, count_include_pad=False) self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, bias=False) self.norm = norm_layer(out_chs) elif in_chs != out_chs: self.pool = nn.Identity() self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, bias=False) self.norm = norm_layer(out_chs) else: self.pool = nn.Identity() self.conv = nn.Identity() self.norm = nn.Identity() def forward(self, x): return self.norm(self.conv(self.pool(x))) class ConvAttention(nn.Module): """ Multi-Head Convolutional Attention """ def __init__(self, out_chs, head_dim, norm_layer = nn.BatchNorm2d, act_layer = nn.ReLU): super(ConvAttention, self).__init__() self.group_conv3x3 = nn.Conv2d( out_chs, out_chs, kernel_size=3, stride=1, padding=1, groups=out_chs // head_dim, bias=False ) self.norm = norm_layer(out_chs) self.act = act_layer() self.projection = nn.Conv2d(out_chs, out_chs, kernel_size=1, bias=False) def forward(self, x): out = self.group_conv3x3(x) out = self.norm(out) out = self.act(out) out = self.projection(out) return out class NextConvBlock(nn.Module): """ Next Convolution Block """ def __init__( self, in_chs, out_chs, stride=1, drop_path=0., drop=0., head_dim=32, mlp_ratio=3., norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU ): super(NextConvBlock, self).__init__() self.in_chs = in_chs self.out_chs = out_chs assert out_chs % head_dim == 0 self.patch_embed = PatchEmbed(in_chs, out_chs, stride, norm_layer=norm_layer) self.mhca = ConvAttention( out_chs, head_dim, norm_layer=norm_layer, act_layer=act_layer, ) self.attn_drop_path = DropPath(drop_path) self.norm = norm_layer(out_chs) self.mlp = ConvMlp( out_chs, hidden_features=int(out_chs * mlp_ratio), drop=drop, bias=True, act_layer=act_layer, ) self.mlp_drop_path = DropPath(drop_path) self.is_fused = False @torch.no_grad() def reparameterize(self): if not self.is_fused: merge_pre_bn(self.mlp.fc1, self.norm) self.norm = nn.Identity() self.is_fused = True def forward(self, x): x = self.patch_embed(x) x = x + self.attn_drop_path(self.mhca(x)) out = self.norm(x) x = x + self.mlp_drop_path(self.mlp(out)) return x class EfficientAttention(nn.Module): """ Efficient Multi-Head Self Attention """ fused_attn: torch.jit.Final[bool] def __init__( self, dim, out_dim=None, head_dim=32, qkv_bias=True, attn_drop=0., proj_drop=0., sr_ratio=1, norm_layer=nn.BatchNorm1d, ): super().__init__() self.dim = dim self.out_dim = out_dim if out_dim is not None else dim self.num_heads = self.dim // head_dim self.head_dim = head_dim self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, self.dim, bias=qkv_bias) self.k = nn.Linear(dim, self.dim, bias=qkv_bias) self.v = nn.Linear(dim, self.dim, bias=qkv_bias) self.proj = nn.Linear(self.dim, self.out_dim) self.attn_drop = nn.Dropout(attn_drop) self.proj_drop = nn.Dropout(proj_drop) self.sr_ratio = sr_ratio self.N_ratio = sr_ratio ** 2 if sr_ratio > 1: self.sr = nn.AvgPool1d(kernel_size=self.N_ratio, stride=self.N_ratio) self.norm = norm_layer(dim) else: self.sr = None self.norm = None def forward(self, x): B, N, C = x.shape q = self.q(x).reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3) if self.sr is not None: x = self.sr(x.transpose(1, 2)) x = self.norm(x).transpose(1, 2) k = self.k(x).reshape(B, -1, self.num_heads, self.head_dim).transpose(1, 2) v = self.v(x).reshape(B, -1, self.num_heads, self.head_dim).transpose(1, 2) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-1, -2) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class NextTransformerBlock(nn.Module): """ Next Transformer Block """ def __init__( self, in_chs, out_chs, drop_path, stride=1, sr_ratio=1, mlp_ratio=2, head_dim=32, mix_block_ratio=0.75, attn_drop=0., drop=0., norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, ): super(NextTransformerBlock, self).__init__() self.in_chs = in_chs self.out_chs = out_chs self.mix_block_ratio = mix_block_ratio self.mhsa_out_chs = _make_divisible(int(out_chs * mix_block_ratio), 32) self.mhca_out_chs = out_chs - self.mhsa_out_chs self.patch_embed = PatchEmbed(in_chs, self.mhsa_out_chs, stride) self.norm1 = norm_layer(self.mhsa_out_chs) self.e_mhsa = EfficientAttention( self.mhsa_out_chs, head_dim=head_dim, sr_ratio=sr_ratio, attn_drop=attn_drop, proj_drop=drop, ) self.mhsa_drop_path = DropPath(drop_path * mix_block_ratio) self.projection = PatchEmbed(self.mhsa_out_chs, self.mhca_out_chs, stride=1, norm_layer=norm_layer) self.mhca = ConvAttention( self.mhca_out_chs, head_dim=head_dim, norm_layer=norm_layer, act_layer=act_layer, ) self.mhca_drop_path = DropPath(drop_path * (1 - mix_block_ratio)) self.norm2 = norm_layer(out_chs) self.mlp = ConvMlp( out_chs, hidden_features=int(out_chs * mlp_ratio), act_layer=act_layer, drop=drop, ) self.mlp_drop_path = DropPath(drop_path) self.is_fused = False @torch.no_grad() def reparameterize(self): if not self.is_fused: merge_pre_bn(self.e_mhsa.q, self.norm1) if self.e_mhsa.norm is not None: merge_pre_bn(self.e_mhsa.k, self.norm1, self.e_mhsa.norm) merge_pre_bn(self.e_mhsa.v, self.norm1, self.e_mhsa.norm) self.e_mhsa.norm = nn.Identity() else: merge_pre_bn(self.e_mhsa.k, self.norm1) merge_pre_bn(self.e_mhsa.v, self.norm1) self.norm1 = nn.Identity() merge_pre_bn(self.mlp.fc1, self.norm2) self.norm2 = nn.Identity() self.is_fused = True def forward(self, x): x = self.patch_embed(x) B, C, H, W = x.shape out = self.norm1(x) out = out.reshape(B, C, -1).transpose(-1, -2) out = self.mhsa_drop_path(self.e_mhsa(out)) x = x + out.transpose(-1, -2).reshape(B, C, H, W) out = self.projection(x) out = out + self.mhca_drop_path(self.mhca(out)) x = torch.cat([x, out], dim=1) out = self.norm2(x) x = x + self.mlp_drop_path(self.mlp(out)) return x class NextStage(nn.Module): def __init__( self, in_chs, block_chs, block_types, stride=2, sr_ratio=1, mix_block_ratio=1.0, drop=0., attn_drop=0., drop_path=0., head_dim=32, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, ): super().__init__() self.grad_checkpointing = False blocks = [] for block_idx, block_type in enumerate(block_types): stride = stride if block_idx == 0 else 1 out_chs = block_chs[block_idx] block_type = block_types[block_idx] dpr = drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path if block_type is NextConvBlock: layer = NextConvBlock( in_chs, out_chs, stride=stride, drop_path=dpr, drop=drop, head_dim=head_dim, norm_layer=norm_layer, act_layer=act_layer, ) blocks.append(layer) elif block_type is NextTransformerBlock: layer = NextTransformerBlock( in_chs, out_chs, drop_path=dpr, stride=stride, sr_ratio=sr_ratio, head_dim=head_dim, mix_block_ratio=mix_block_ratio, attn_drop=attn_drop, drop=drop, norm_layer=norm_layer, act_layer=act_layer, ) blocks.append(layer) in_chs = out_chs self.blocks = nn.Sequential(*blocks) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class NextViT(nn.Module): def __init__( self, in_chans, num_classes=1000, global_pool='avg', stem_chs=(64, 32, 64), depths=(3, 4, 10, 3), strides=(1, 2, 2, 2), sr_ratios=(8, 4, 2, 1), drop_path_rate=0.1, attn_drop_rate=0., drop_rate=0., head_dim=32, mix_block_ratio=0.75, norm_layer=nn.BatchNorm2d, act_layer=None, ): super(NextViT, self).__init__() self.grad_checkpointing = False self.num_classes = num_classes norm_layer = get_norm_layer(norm_layer) if act_layer is None: act_layer = partial(nn.ReLU, inplace=True) else: act_layer = get_act_layer(act_layer) self.stage_out_chs = [ [96] * (depths[0]), [192] * (depths[1] - 1) + [256], [384, 384, 384, 384, 512] * (depths[2] // 5), [768] * (depths[3] - 1) + [1024] ] self.feature_info = [dict( num_chs=sc[-1], reduction=2**(i + 2), module=f'stages.{i}' ) for i, sc in enumerate(self.stage_out_chs)] # Next Hybrid Strategy self.stage_block_types = [ [NextConvBlock] * depths[0], [NextConvBlock] * (depths[1] - 1) + [NextTransformerBlock], [NextConvBlock, NextConvBlock, NextConvBlock, NextConvBlock, NextTransformerBlock] * (depths[2] // 5), [NextConvBlock] * (depths[3] - 1) + [NextTransformerBlock]] self.stem = nn.Sequential( ConvNormAct(in_chans, stem_chs[0], kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer), ConvNormAct(stem_chs[0], stem_chs[1], kernel_size=3, stride=1, norm_layer=norm_layer, act_layer=act_layer), ConvNormAct(stem_chs[1], stem_chs[2], kernel_size=3, stride=1, norm_layer=norm_layer, act_layer=act_layer), ConvNormAct(stem_chs[2], stem_chs[2], kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer), ) in_chs = out_chs = stem_chs[-1] stages = [] idx = 0 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] for stage_idx in range(len(depths)): stage = NextStage( in_chs=in_chs, block_chs=self.stage_out_chs[stage_idx], block_types=self.stage_block_types[stage_idx], stride=strides[stage_idx], sr_ratio=sr_ratios[stage_idx], mix_block_ratio=mix_block_ratio, head_dim=head_dim, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[stage_idx], norm_layer=norm_layer, act_layer=act_layer, ) in_chs = out_chs = self.stage_out_chs[stage_idx][-1] stages += [stage] idx += depths[stage_idx] self.num_features = self.head_hidden_size = out_chs self.stages = nn.Sequential(*stages) self.norm = norm_layer(out_chs) self.head = ClassifierHead(pool_type=global_pool, in_features=out_chs, num_classes=num_classes) self.stage_out_idx = [sum(depths[:idx + 1]) - 1 for idx in range(len(depths))] self._initialize_weights() def _initialize_weights(self): for n, m in self.named_modules(): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): trunc_normal_(m.weight, std=.02) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', # stem and embed blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm', (99999,)), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable for stage in self.stages: stage.set_grad_checkpointing(enable=enable) @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ Remap original checkpoints -> timm """ if 'head.fc.weight' in state_dict: return state_dict # non-original D = model.state_dict() out_dict = {} # remap originals based on order for ka, kb, va, vb in zip(D.keys(), state_dict.keys(), D.values(), state_dict.values()): out_dict[ka] = vb return out_dict def _create_nextvit(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( NextViT, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'nextvit_small.bd_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_base.bd_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_large.bd_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_small.bd_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'nextvit_base.bd_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'nextvit_large.bd_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'nextvit_small.bd_ssld_6m_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_base.bd_ssld_6m_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_large.bd_ssld_6m_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_small.bd_ssld_6m_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'nextvit_base.bd_ssld_6m_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'nextvit_large.bd_ssld_6m_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), }) @register_model def nextvit_small(pretrained=False, **kwargs): model_args = dict(depths=(3, 4, 10, 3), drop_path_rate=0.1) model = _create_nextvit( 'nextvit_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def nextvit_base(pretrained=False, **kwargs): model_args = dict(depths=(3, 4, 20, 3), drop_path_rate=0.2) model = _create_nextvit( 'nextvit_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def nextvit_large(pretrained=False, **kwargs): model_args = dict(depths=(3, 4, 30, 3), drop_path_rate=0.2) model = _create_nextvit( 'nextvit_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/nextvit.py/0
{ "file_path": "pytorch-image-models/timm/models/nextvit.py", "repo_id": "pytorch-image-models", "token_count": 12210 }
228
""" SEResNet implementation from Cadene's pretrained models https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py Additional credit to https://github.com/creafz Original model: https://github.com/hujie-frank/SENet ResNet code gently borrowed from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. """ import math from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['SENet'] def _weight_init(m): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1.) nn.init.constant_(m.bias, 0.) class SEModule(nn.Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = x.mean((2, 3), keepdim=True) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class Bottleneck(nn.Module): """ Base class for bottlenecks that implements `forward()` method. """ def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: shortcut = self.downsample(x) out = self.se_module(out) + shortcut out = self.relu(out) return out class SEBottleneck(Bottleneck): """ Bottleneck for SENet154. """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes * 2) self.conv2 = nn.Conv2d( planes * 2, planes * 4, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes * 4) self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBottleneck(Bottleneck): """ ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe implementation and uses `stride=stride` in `conv1` and not in `conv2` (the latter is used in the torchvision implementation of ResNet). """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNeXtBottleneck(Bottleneck): """ ResNeXt bottleneck type C with a Squeeze-and-Excitation module. """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4): super(SEResNeXtBottleneck, self).__init__() width = math.floor(planes * (base_width / 64)) * groups self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1) self.bn1 = nn.BatchNorm2d(width) self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(width) self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes, reduction=reduction) self.downsample = downsample self.stride = stride def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) if self.downsample is not None: shortcut = self.downsample(x) out = self.se_module(out) + shortcut out = self.relu(out) return out class SENet(nn.Module): def __init__( self, block, layers, groups, reduction, drop_rate=0.2, in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=1000, global_pool='avg'): """ Parameters ---------- block (nn.Module): Bottleneck class. - For SENet154: SEBottleneck - For SE-ResNet models: SEResNetBottleneck - For SE-ResNeXt models: SEResNeXtBottleneck layers (list of ints): Number of residual blocks for 4 layers of the network (layer1...layer4). groups (int): Number of groups for the 3x3 convolution in each bottleneck block. - For SENet154: 64 - For SE-ResNet models: 1 - For SE-ResNeXt models: 32 reduction (int): Reduction ratio for Squeeze-and-Excitation modules. - For all models: 16 dropout_p (float or None): Drop probability for the Dropout layer. If `None` the Dropout layer is not used. - For SENet154: 0.2 - For SE-ResNet models: None - For SE-ResNeXt models: None inplanes (int): Number of input channels for layer1. - For SENet154: 128 - For SE-ResNet models: 64 - For SE-ResNeXt models: 64 input_3x3 (bool): If `True`, use three 3x3 convolutions instead of a single 7x7 convolution in layer0. - For SENet154: True - For SE-ResNet models: False - For SE-ResNeXt models: False downsample_kernel_size (int): Kernel size for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 3 - For SE-ResNet models: 1 - For SE-ResNeXt models: 1 downsample_padding (int): Padding for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 1 - For SE-ResNet models: 0 - For SE-ResNeXt models: 0 num_classes (int): Number of outputs in `last_linear` layer. - For all models: 1000 """ super(SENet, self).__init__() self.inplanes = inplanes self.num_classes = num_classes self.drop_rate = drop_rate if input_3x3: layer0_modules = [ ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), ('bn1', nn.BatchNorm2d(64)), ('relu1', nn.ReLU(inplace=True)), ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), ('bn2', nn.BatchNorm2d(64)), ('relu2', nn.ReLU(inplace=True)), ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), ('bn3', nn.BatchNorm2d(inplanes)), ('relu3', nn.ReLU(inplace=True)), ] else: layer0_modules = [ ('conv1', nn.Conv2d( in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), ('bn1', nn.BatchNorm2d(inplanes)), ('relu1', nn.ReLU(inplace=True)), ] self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] self.layer1 = self._make_layer( block, planes=64, blocks=layers[0], groups=groups, reduction=reduction, downsample_kernel_size=1, downsample_padding=0 ) self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] self.layer2 = self._make_layer( block, planes=128, blocks=layers[1], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] self.layer3 = self._make_layer( block, planes=256, blocks=layers[2], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] self.layer4 = self._make_layer( block, planes=512, blocks=layers[3], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] self.num_features = self.head_hidden_size = 512 * block.expansion self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) for m in self.modules(): _weight_init(m) def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, downsample_kernel_size=1, downsample_padding=0): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, stride=stride, padding=downsample_padding, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, groups, reduction)) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^layer0', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.last_linear def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.layer0(x) x = self.pool0(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_senet(variant, pretrained=False, **kwargs): return build_model_with_cfg(SENet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', **kwargs } default_cfgs = generate_default_cfgs({ 'legacy_senet154.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_senet154-e9eb9fe6.pth'), 'legacy_seresnet18.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', interpolation='bicubic'), 'legacy_seresnet34.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), 'legacy_seresnet50.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), 'legacy_seresnet101.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), 'legacy_seresnet152.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), 'legacy_seresnext26_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', interpolation='bicubic'), 'legacy_seresnext50_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext50_32x4d-f3651bad.pth'), 'legacy_seresnext101_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext101_32x4d-37725eac.pth'), }) @register_model def legacy_seresnet18(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet18', pretrained, **model_args) @register_model def legacy_seresnet34(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet34', pretrained, **model_args) @register_model def legacy_seresnet50(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet50', pretrained, **model_args) @register_model def legacy_seresnet101(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet101', pretrained, **model_args) @register_model def legacy_seresnet152(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet152', pretrained, **model_args) @register_model def legacy_senet154(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) return _create_senet('legacy_senet154', pretrained, **model_args) @register_model def legacy_seresnext26_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) @register_model def legacy_seresnext50_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) @register_model def legacy_seresnext101_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args)
pytorch-image-models/timm/models/senet.py/0
{ "file_path": "pytorch-image-models/timm/models/senet.py", "repo_id": "pytorch-image-models", "token_count": 8363 }
229
""" ViTamin Paper: Designing Scalable Vison Models in the Vision-Language Era A family of model weights on Huggingface: https://huggingface.co/collections/jienengchen/vitamin-family-661048126b72debdaca060bf @inproceedings{chen2024vitamin, title={ViTamin: Designing Scalable Vision Models in the Vision-language Era}, author={Chen, Jieneng and Yu, Qihang and Shen, Xiaohui and Yuille, Alan and Chen, Liang-Chieh}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, year={2024} } Based on Apache 2.0 licensed code at https://github.com/ViTamin/ViTamin Modifications and timm support by Jieneng Chen 2024 Reference: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer_hybrid.py """ import math from dataclasses import dataclass, field from functools import partial from typing import Optional, Union, Tuple import torch import torch.nn as nn from timm.data import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import create_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d, \ make_divisible, DropPath, HybridEmbed from ._builder import build_model_with_cfg from ._manipulate import named_apply, checkpoint_seq from ._registry import register_model, generate_default_cfgs from .vision_transformer import VisionTransformer, checkpoint_filter_fn @dataclass class VitConvCfg: expand_ratio: float = 4.0 expand_output: bool = True # calculate expansion channels from output (vs input chs) kernel_size: int = 3 group_size: int = 1 # 1 == depthwise pre_norm_act: bool = False # activation after pre-norm stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw' pool_type: str = 'avg2' downsample_pool_type: str = 'avg2' act_layer: str = 'gelu' # stem & stage 1234 norm_layer: str = '' norm_eps: float = 1e-5 down_shortcut: Optional[bool] = True mlp: str = 'mlp' @dataclass class VitCfg: embed_dim: Tuple[Union[int, Tuple[int, ...]], ...] = (96, 192, 384, 768) depths: Tuple[Union[int, Tuple[int, ...]], ...] = (2, 3, 5, 2) stem_width: int = 64 conv_cfg: VitConvCfg = field(default_factory=VitConvCfg) head_type: str = "" def _init_conv(module, name, scheme=''): if isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out)) if module.bias is not None: nn.init.zeros_(module.bias) class Stem(nn.Module): def __init__( self, in_chs: int, out_chs: int, act_layer: str = 'gelu', norm_layer: str = 'layernorm2d', norm_eps: float = 1e-6, bias: bool = True, ): super().__init__() norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) self.out_chs = out_chs self.conv1 = create_conv2d(in_chs, out_chs, 3, stride=2, bias=bias) self.norm1 = norm_act_layer(out_chs) self.conv2 = create_conv2d(out_chs, out_chs, 3, stride=1, bias=bias) named_apply(_init_conv, self) def forward(self, x): x = self.conv1(x) x = self.norm1(x) x = self.conv2(x) return x class Downsample2d(nn.Module): def __init__( self, dim: int, dim_out: int, pool_type: str = 'avg2', bias: bool = True, ): super().__init__() self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False) if dim != dim_out: self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias) # 1x1 conv else: self.expand = nn.Identity() def forward(self, x): x = self.pool(x) # spatial downsample x = self.expand(x) # expand chs return x class StridedConv(nn.Module): """ downsample 2d as well """ def __init__( self, kernel_size=3, stride=2, padding=1, in_chans=3, embed_dim=768 ): super().__init__() norm_layer = partial(get_norm_layer('layernorm2d'), eps=1e-6) self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding) self.norm = norm_layer(in_chans) # affine over C def forward(self, x): x = self.norm(x) x = self.proj(x) return x class MbConvLNBlock(nn.Module): """ Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand) """ def __init__( self, in_chs: int, out_chs: int, stride: int = 1, drop_path: float = 0., kernel_size: int = 3, norm_layer: str = 'layernorm2d', norm_eps: float = 1e-6, act_layer: str = 'gelu', expand_ratio: float = 4.0, ): super(MbConvLNBlock, self).__init__() self.stride, self.in_chs, self.out_chs = stride, in_chs, out_chs mid_chs = make_divisible(out_chs * expand_ratio) prenorm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) if stride == 2: self.shortcut = Downsample2d(in_chs, out_chs, pool_type='avg', bias=True) elif in_chs != out_chs: self.shortcut = nn.Conv2d(in_chs, out_chs, 1, bias=True) else: self.shortcut = nn.Identity() self.pre_norm = prenorm_act_layer(in_chs, apply_act=False) self.down = nn.Identity() self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=1, bias=True) self.act1 = create_act_layer(act_layer, inplace=True) self.conv2_kxk = create_conv2d( mid_chs, mid_chs, kernel_size, stride=stride, dilation=1, groups=mid_chs, bias=True) self.act2 = create_act_layer(act_layer, inplace=True) self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=True) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def init_weights(self, scheme=''): named_apply(partial(_init_conv, scheme=scheme), self) def forward(self, x): shortcut = self.shortcut(x) x = self.pre_norm(x) x = self.down(x) # nn.Identity() # 1x1 expansion conv & act x = self.conv1_1x1(x) x = self.act1(x) # (strided) depthwise 3x3 conv & act x = self.conv2_kxk(x) x = self.act2(x) # 1x1 linear projection to output width x = self.conv3_1x1(x) x = self.drop_path(x) + shortcut return x class MbConvStages(nn.Module): """ MobileConv for stage 1 and stage 2 of ViTamin """ def __init__( self, cfg: VitCfg, img_size: Union[int, Tuple[int, int]] = 224, # place holder in_chans: int = 3, ): super().__init__() self.grad_checkpointing = False self.stem = Stem( in_chs=in_chans, out_chs=cfg.stem_width, ) stages = [] self.num_stages = len(cfg.embed_dim) for s, dim in enumerate(cfg.embed_dim[:2]): # stage stage_in_chs = cfg.embed_dim[s-1] if s>0 else cfg.stem_width blocks = [ MbConvLNBlock( in_chs = stage_in_chs if d==0 else dim, out_chs = dim, stride = 2 if d == 0 else 1, ) for d in range(cfg.depths[s]) ] stages += [nn.Sequential(*blocks)] self.stages = nn.Sequential(*stages) self.pool = StridedConv( stride=2, in_chans=cfg.embed_dim[1], embed_dim=cfg.embed_dim[2] ) def forward(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.pool(x) return x class GeGluMlp(nn.Module): def __init__( self, in_features, hidden_features, act_layer = 'gelu', drop = 0.0, ): super().__init__() norm_layer = partial(get_norm_layer('layernorm'), eps=1e-6) self.norm = norm_layer(in_features) self.w0 = nn.Linear(in_features, hidden_features) self.act = create_act_layer(act_layer) self.w1 = nn.Linear(in_features, hidden_features) self.w2 = nn.Linear(hidden_features, in_features) def forward(self, x): x = self.norm(x) x = self.act(self.w0(x)) * self.w1(x) x = self.w2(x) return x def _create_vitamin(variant, pretrained=False, embed_cfg=None, **kwargs): out_indices = kwargs.pop('out_indices', 3) assert embed_cfg is not None backbone = MbConvStages(cfg=embed_cfg, in_chans=kwargs.get('in_chans', 3)) kwargs['embed_layer'] = partial(HybridEmbed, backbone=backbone, proj=False) kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set return build_model_with_cfg( VisionTransformer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD, 'first_conv': 'patch_embed.backbone.stem.conv1', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'vitamin_small_224.datacomp1b_clip_ltt': _cfg( hf_hub_id='jienengchen/ViTamin-S-LTT', num_classes=384), 'vitamin_small_224.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-S', num_classes=384), 'vitamin_base_224.datacomp1b_clip_ltt': _cfg( hf_hub_id='jienengchen/ViTamin-B-LTT', num_classes=768), 'vitamin_base_224.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-B', num_classes=768), 'vitamin_large_224.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-L-224px', num_classes=768), 'vitamin_large_256.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-L-256px', num_classes=768, input_size=(3, 256, 256), crop_pct=1.0), 'vitamin_large_336.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-L-336px', num_classes=768, input_size=(3, 336, 336), crop_pct=1.0), 'vitamin_large_384.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-L-384px', num_classes=768, input_size=(3, 384, 384), crop_pct=1.0), 'vitamin_large2_224.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-L2-224px', num_classes=1024), 'vitamin_large2_256.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-L2-256px', num_classes=1024, input_size=(3, 256, 256), crop_pct=1.0), 'vitamin_large2_336.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-L2-336px', num_classes=1024, input_size=(3, 336, 336), crop_pct=1.0), 'vitamin_large2_384.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-L2-384px', num_classes=1024, input_size=(3, 384, 384), crop_pct=1.0), 'vitamin_xlarge_256.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-XL-256px', num_classes=1152, input_size=(3, 256, 256), crop_pct=1.0), 'vitamin_xlarge_336.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-XL-336px', num_classes=1152, input_size=(3, 336, 336), crop_pct=1.0), 'vitamin_xlarge_384.datacomp1b_clip': _cfg( hf_hub_id='jienengchen/ViTamin-XL-384px', num_classes=1152, input_size=(3, 384, 384), crop_pct=1.0), }) @register_model def vitamin_small_224(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(64, 128, 384), depths=(2, 4, 1), stem_width=64, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( embed_dim=384, depth=14, num_heads=6, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', embed_cfg=embed_cfg ) model = _create_vitamin('vitamin_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_base_224(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(128, 256, 768), depths=(2, 4, 1), stem_width=128, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( embed_dim=768, depth=14, num_heads=12, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large_224(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', embed_cfg=embed_cfg, ) model = _create_vitamin('vitamin_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large_256(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( img_size=256, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large_336(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( img_size=336, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', embed_cfg=embed_cfg ) model = _create_vitamin('vitamin_large_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large_384(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( img_size=384, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large2_224(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', embed_cfg=embed_cfg, ) model = _create_vitamin('vitamin_large2_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large2_256(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( img_size=256, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large2_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large2_336(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( img_size=336, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', embed_cfg=embed_cfg ) model = _create_vitamin('vitamin_large2_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_large2_384(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(160, 320, 1024), depths=(2, 4, 1), stem_width=160, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( img_size=384, embed_dim=1024, depth=31, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_large2_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_xlarge_256(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg=VitCfg( embed_dim=(192, 384, 1152), depths=(2, 4, 1), stem_width=192, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( img_size=256, embed_dim=1152, depth=32, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', pos_embed='none', embed_cfg=embed_cfg) model = _create_vitamin( 'vitamin_xlarge_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_xlarge_336(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(192, 384, 1152), depths=(2, 4, 1), stem_width=192, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( img_size=336, embed_dim=1152, depth=32, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', pos_embed='none', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_xlarge_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vitamin_xlarge_384(pretrained=False, **kwargs) -> VisionTransformer: embed_cfg = VitCfg( embed_dim=(192, 384, 1152), depths=(2, 4, 1), stem_width=192, conv_cfg=VitConvCfg( norm_layer='layernorm2d', norm_eps=1e-6, ), head_type='1d', ) model_args = dict( img_size=384, embed_dim=1152, depth=32, num_heads=16, mlp_layer=GeGluMlp, mlp_ratio=2., class_token=False, global_pool='avg', pos_embed='none', embed_cfg=embed_cfg) model = _create_vitamin('vitamin_xlarge_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/vitamin.py/0
{ "file_path": "pytorch-image-models/timm/models/vitamin.py", "repo_id": "pytorch-image-models", "token_count": 10041 }
230
""" Lookahead Optimizer Wrapper. Implementation modified from: https://github.com/alphadl/lookahead.pytorch Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610 Hacked together by / Copyright 2020 Ross Wightman """ from collections import OrderedDict from typing import Callable, Dict import torch from torch.optim.optimizer import Optimizer from collections import defaultdict class Lookahead(Optimizer): def __init__(self, base_optimizer, alpha=0.5, k=6): # NOTE super().__init__() not called on purpose self._optimizer_step_pre_hooks: Dict[int, Callable] = OrderedDict() self._optimizer_step_post_hooks: Dict[int, Callable] = OrderedDict() if not 0.0 <= alpha <= 1.0: raise ValueError(f'Invalid slow update rate: {alpha}') if not 1 <= k: raise ValueError(f'Invalid lookahead steps: {k}') defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0) self._base_optimizer = base_optimizer self.param_groups = base_optimizer.param_groups self.defaults = base_optimizer.defaults self.defaults.update(defaults) self.state = defaultdict(dict) # manually add our defaults to the param groups for name, default in defaults.items(): for group in self._base_optimizer.param_groups: group.setdefault(name, default) @torch.no_grad() def update_slow(self, group): for fast_p in group["params"]: if fast_p.grad is None: continue param_state = self._base_optimizer.state[fast_p] if 'lookahead_slow_buff' not in param_state: param_state['lookahead_slow_buff'] = torch.empty_like(fast_p) param_state['lookahead_slow_buff'].copy_(fast_p) slow = param_state['lookahead_slow_buff'] slow.add_(fast_p - slow, alpha=group['lookahead_alpha']) fast_p.copy_(slow) def sync_lookahead(self): for group in self._base_optimizer.param_groups: self.update_slow(group) @torch.no_grad() def step(self, closure=None): loss = self._base_optimizer.step(closure) for group in self._base_optimizer.param_groups: group['lookahead_step'] += 1 if group['lookahead_step'] % group['lookahead_k'] == 0: self.update_slow(group) return loss def state_dict(self): return self._base_optimizer.state_dict() def load_state_dict(self, state_dict): self._base_optimizer.load_state_dict(state_dict) self.param_groups = self._base_optimizer.param_groups
pytorch-image-models/timm/optim/lookahead.py/0
{ "file_path": "pytorch-image-models/timm/optim/lookahead.py", "repo_id": "pytorch-image-models", "token_count": 1134 }
231
""" Scheduler Factory Hacked together by / Copyright 2021 Ross Wightman """ from typing import List, Optional, Union from torch.optim import Optimizer from .cosine_lr import CosineLRScheduler from .multistep_lr import MultiStepLRScheduler from .plateau_lr import PlateauLRScheduler from .poly_lr import PolyLRScheduler from .step_lr import StepLRScheduler from .tanh_lr import TanhLRScheduler def scheduler_kwargs(cfg, decreasing_metric: Optional[bool] = None): """ cfg/argparse to kwargs helper Convert scheduler args in argparse args or cfg (.dot) like object to keyword args. """ eval_metric = getattr(cfg, 'eval_metric', 'top1') if decreasing_metric is not None: plateau_mode = 'min' if decreasing_metric else 'max' else: plateau_mode = 'min' if 'loss' in eval_metric else 'max' kwargs = dict( sched=cfg.sched, num_epochs=getattr(cfg, 'epochs', 100), decay_epochs=getattr(cfg, 'decay_epochs', 30), decay_milestones=getattr(cfg, 'decay_milestones', [30, 60]), warmup_epochs=getattr(cfg, 'warmup_epochs', 5), cooldown_epochs=getattr(cfg, 'cooldown_epochs', 0), patience_epochs=getattr(cfg, 'patience_epochs', 10), decay_rate=getattr(cfg, 'decay_rate', 0.1), min_lr=getattr(cfg, 'min_lr', 0.), warmup_lr=getattr(cfg, 'warmup_lr', 1e-5), warmup_prefix=getattr(cfg, 'warmup_prefix', False), noise=getattr(cfg, 'lr_noise', None), noise_pct=getattr(cfg, 'lr_noise_pct', 0.67), noise_std=getattr(cfg, 'lr_noise_std', 1.), noise_seed=getattr(cfg, 'seed', 42), cycle_mul=getattr(cfg, 'lr_cycle_mul', 1.), cycle_decay=getattr(cfg, 'lr_cycle_decay', 0.1), cycle_limit=getattr(cfg, 'lr_cycle_limit', 1), k_decay=getattr(cfg, 'lr_k_decay', 1.0), plateau_mode=plateau_mode, step_on_epochs=not getattr(cfg, 'sched_on_updates', False), ) return kwargs def create_scheduler( args, optimizer: Optimizer, updates_per_epoch: int = 0, ): return create_scheduler_v2( optimizer=optimizer, **scheduler_kwargs(args), updates_per_epoch=updates_per_epoch, ) def create_scheduler_v2( optimizer: Optimizer, sched: str = 'cosine', num_epochs: int = 300, decay_epochs: int = 90, decay_milestones: List[int] = (90, 180, 270), cooldown_epochs: int = 0, patience_epochs: int = 10, decay_rate: float = 0.1, min_lr: float = 0, warmup_lr: float = 1e-5, warmup_epochs: int = 0, warmup_prefix: bool = False, noise: Union[float, List[float]] = None, noise_pct: float = 0.67, noise_std: float = 1., noise_seed: int = 42, cycle_mul: float = 1., cycle_decay: float = 0.1, cycle_limit: int = 1, k_decay: float = 1.0, plateau_mode: str = 'max', step_on_epochs: bool = True, updates_per_epoch: int = 0, ): t_initial = num_epochs warmup_t = warmup_epochs decay_t = decay_epochs cooldown_t = cooldown_epochs if not step_on_epochs: assert updates_per_epoch > 0, 'updates_per_epoch must be set to number of dataloader batches' t_initial = t_initial * updates_per_epoch warmup_t = warmup_t * updates_per_epoch decay_t = decay_t * updates_per_epoch decay_milestones = [d * updates_per_epoch for d in decay_milestones] cooldown_t = cooldown_t * updates_per_epoch # warmup args warmup_args = dict( warmup_lr_init=warmup_lr, warmup_t=warmup_t, warmup_prefix=warmup_prefix, ) # setup noise args for supporting schedulers if noise is not None: if isinstance(noise, (list, tuple)): noise_range = [n * t_initial for n in noise] if len(noise_range) == 1: noise_range = noise_range[0] else: noise_range = noise * t_initial else: noise_range = None noise_args = dict( noise_range_t=noise_range, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, ) # setup cycle args for supporting schedulers cycle_args = dict( cycle_mul=cycle_mul, cycle_decay=cycle_decay, cycle_limit=cycle_limit, ) lr_scheduler = None if sched == 'cosine': lr_scheduler = CosineLRScheduler( optimizer, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, **cycle_args, **warmup_args, **noise_args, k_decay=k_decay, ) elif sched == 'tanh': lr_scheduler = TanhLRScheduler( optimizer, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, **cycle_args, **warmup_args, **noise_args, ) elif sched == 'step': lr_scheduler = StepLRScheduler( optimizer, decay_t=decay_t, decay_rate=decay_rate, t_in_epochs=step_on_epochs, **warmup_args, **noise_args, ) elif sched == 'multistep': lr_scheduler = MultiStepLRScheduler( optimizer, decay_t=decay_milestones, decay_rate=decay_rate, t_in_epochs=step_on_epochs, **warmup_args, **noise_args, ) elif sched == 'plateau': assert step_on_epochs, 'Plateau LR only supports step per epoch.' warmup_args.pop('warmup_prefix', False) lr_scheduler = PlateauLRScheduler( optimizer, decay_rate=decay_rate, patience_t=patience_epochs, cooldown_t=0, **warmup_args, lr_min=min_lr, mode=plateau_mode, **noise_args, ) elif sched == 'poly': lr_scheduler = PolyLRScheduler( optimizer, power=decay_rate, # overloading 'decay_rate' as polynomial power t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, k_decay=k_decay, **cycle_args, **warmup_args, **noise_args, ) if hasattr(lr_scheduler, 'get_cycle_length'): # for cycle based schedulers (cosine, tanh, poly) recalculate total epochs w/ cycles & cooldown t_with_cycles_and_cooldown = lr_scheduler.get_cycle_length() + cooldown_t if step_on_epochs: num_epochs = t_with_cycles_and_cooldown else: num_epochs = t_with_cycles_and_cooldown // updates_per_epoch return lr_scheduler, num_epochs
pytorch-image-models/timm/scheduler/scheduler_factory.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/scheduler_factory.py", "repo_id": "pytorch-image-models", "token_count": 3467 }
232
""" Exponential Moving Average (EMA) of model updates Hacked together by / Copyright 2020 Ross Wightman """ import logging from collections import OrderedDict from copy import deepcopy from typing import Optional import torch import torch.nn as nn _logger = logging.getLogger(__name__) class ModelEma: """ Model Exponential Moving Average (DEPRECATED) Keep a moving average of everything in the model state_dict (parameters and buffers). This version is deprecated, it does not work with scripted models. Will be removed eventually. This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage A smoothed version of the weights is necessary for some training schemes to perform well. E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA smoothing of weights to match results. Pay attention to the decay constant you are using relative to your update count per epoch. To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but disable validation of the EMA weights. Validation will have to be done manually in a separate process, or after the training stops converging. This class is sensitive where it is initialized in the sequence of model init, GPU assignment and distributed training wrappers. """ def __init__(self, model, decay=0.9999, device='', resume=''): # make a copy of the model for accumulating moving average of weights self.ema = deepcopy(model) self.ema.eval() self.decay = decay self.device = device # perform ema on different device from model if set if device: self.ema.to(device=device) self.ema_has_module = hasattr(self.ema, 'module') if resume: self._load_checkpoint(resume) for p in self.ema.parameters(): p.requires_grad_(False) def _load_checkpoint(self, checkpoint_path): checkpoint = torch.load(checkpoint_path, map_location='cpu') assert isinstance(checkpoint, dict) if 'state_dict_ema' in checkpoint: new_state_dict = OrderedDict() for k, v in checkpoint['state_dict_ema'].items(): # ema model may have been wrapped by DataParallel, and need module prefix if self.ema_has_module: name = 'module.' + k if not k.startswith('module') else k else: name = k new_state_dict[name] = v self.ema.load_state_dict(new_state_dict) _logger.info("Loaded state_dict_ema") else: _logger.warning("Failed to find state_dict_ema, starting from loaded model weights") def update(self, model): # correct a mismatch in state dict keys needs_module = hasattr(model, 'module') and not self.ema_has_module with torch.no_grad(): msd = model.state_dict() for k, ema_v in self.ema.state_dict().items(): if needs_module: k = 'module.' + k model_v = msd[k].detach() if self.device: model_v = model_v.to(device=self.device) ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) class ModelEmaV2(nn.Module): """ Model Exponential Moving Average V2 Keep a moving average of everything in the model state_dict (parameters and buffers). V2 of this module is simpler, it does not match params/buffers based on name but simply iterates in order. It works with torchscript (JIT of full model). This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage A smoothed version of the weights is necessary for some training schemes to perform well. E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA smoothing of weights to match results. Pay attention to the decay constant you are using relative to your update count per epoch. To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but disable validation of the EMA weights. Validation will have to be done manually in a separate process, or after the training stops converging. This class is sensitive where it is initialized in the sequence of model init, GPU assignment and distributed training wrappers. """ def __init__(self, model, decay=0.9999, device=None): super().__init__() # make a copy of the model for accumulating moving average of weights self.module = deepcopy(model) self.module.eval() self.decay = decay self.device = device # perform ema on different device from model if set if self.device is not None: self.module.to(device=device) def _update(self, model, update_fn): with torch.no_grad(): for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): if self.device is not None: model_v = model_v.to(device=self.device) ema_v.copy_(update_fn(ema_v, model_v)) def update(self, model): self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) def set(self, model): self._update(model, update_fn=lambda e, m: m) def forward(self, *args, **kwargs): return self.module(*args, **kwargs) class ModelEmaV3(nn.Module): """ Model Exponential Moving Average V3 Keep a moving average of everything in the model state_dict (parameters and buffers). V3 of this module leverages for_each and in-place operations for faster performance. Decay warmup based on code by @crowsonkb, her comments: If inv_gamma=1 and power=1, implements a simple average. inv_gamma=1, power=2/3 are good values for models you plan to train for a million or more steps (reaches decay factor 0.999 at 31.6K steps, 0.9999 at 1M steps), inv_gamma=1, power=3/4 for models you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at 215.4k steps). This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but disable validation of the EMA weights. Validation will have to be done manually in a separate process, or after the training stops converging. This class is sensitive where it is initialized in the sequence of model init, GPU assignment and distributed training wrappers. """ def __init__( self, model, decay: float = 0.9999, min_decay: float = 0.0, update_after_step: int = 0, use_warmup: bool = False, warmup_gamma: float = 1.0, warmup_power: float = 2/3, device: Optional[torch.device] = None, foreach: bool = True, exclude_buffers: bool = False, ): super().__init__() # make a copy of the model for accumulating moving average of weights self.module = deepcopy(model) self.module.eval() self.decay = decay self.min_decay = min_decay self.update_after_step = update_after_step self.use_warmup = use_warmup self.warmup_gamma = warmup_gamma self.warmup_power = warmup_power self.foreach = foreach self.device = device # perform ema on different device from model if set self.exclude_buffers = exclude_buffers if self.device is not None and device != next(model.parameters()).device: self.foreach = False # cannot use foreach methods with different devices self.module.to(device=device) def get_decay(self, step: Optional[int] = None) -> float: """ Compute the decay factor for the exponential moving average. """ if step is None: return self.decay step = max(0, step - self.update_after_step - 1) if step <= 0: return 0.0 if self.use_warmup: decay = 1 - (1 + step / self.warmup_gamma) ** -self.warmup_power decay = max(min(decay, self.decay), self.min_decay) else: decay = self.decay return decay @torch.no_grad() def update(self, model, step: Optional[int] = None): decay = self.get_decay(step) if self.exclude_buffers: self.apply_update_no_buffers_(model, decay) else: self.apply_update_(model, decay) def apply_update_(self, model, decay: float): # interpolate parameters and buffers if self.foreach: ema_lerp_values = [] model_lerp_values = [] for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): if ema_v.is_floating_point(): ema_lerp_values.append(ema_v) model_lerp_values.append(model_v) else: ema_v.copy_(model_v) if hasattr(torch, '_foreach_lerp_'): torch._foreach_lerp_(ema_lerp_values, model_lerp_values, weight=1. - decay) else: torch._foreach_mul_(ema_lerp_values, scalar=decay) torch._foreach_add_(ema_lerp_values, model_lerp_values, alpha=1. - decay) else: for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): if ema_v.is_floating_point(): ema_v.lerp_(model_v.to(device=self.device), weight=1. - decay) else: ema_v.copy_(model_v.to(device=self.device)) def apply_update_no_buffers_(self, model, decay: float): # interpolate parameters, copy buffers ema_params = tuple(self.module.parameters()) model_params = tuple(model.parameters()) if self.foreach: if hasattr(torch, '_foreach_lerp_'): torch._foreach_lerp_(ema_params, model_params, weight=1. - decay) else: torch._foreach_mul_(ema_params, scalar=decay) torch._foreach_add_(ema_params, model_params, alpha=1 - decay) else: for ema_p, model_p in zip(ema_params, model_params): ema_p.lerp_(model_p.to(device=self.device), weight=1. - decay) for ema_b, model_b in zip(self.module.buffers(), model.buffers()): ema_b.copy_(model_b.to(device=self.device)) @torch.no_grad() def set(self, model): for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): ema_v.copy_(model_v.to(device=self.device)) def forward(self, *args, **kwargs): return self.module(*args, **kwargs)
pytorch-image-models/timm/utils/model_ema.py/0
{ "file_path": "pytorch-image-models/timm/utils/model_ema.py", "repo_id": "pytorch-image-models", "token_count": 4614 }
233
[package] name = "grpc-metadata" version = "0.1.0" edition = "2021" [dependencies] opentelemetry = "^0.20" tonic = "^0.10" tracing = "^0.1" tracing-opentelemetry = "^0.21"
text-generation-inference/backends/grpc-metadata/Cargo.toml/0
{ "file_path": "text-generation-inference/backends/grpc-metadata/Cargo.toml", "repo_id": "text-generation-inference", "token_count": 83 }
234
#!/bin/bash set -ex TRT_VER="10.2.0.19" CUDA_VER="12.5" CUDNN_VER="9.2.1.18-1" NCCL_VER="2.22.3-1+cuda12.5" CUBLAS_VER="12.5.3.2-1" NVRTC_VER="12.5.82-1" for i in "$@"; do case $i in --TRT_VER=?*) TRT_VER="${i#*=}";; --CUDA_VER=?*) CUDA_VER="${i#*=}";; --CUDNN_VER=?*) CUDNN_VER="${i#*=}";; --NCCL_VER=?*) NCCL_VER="${i#*=}";; --CUBLAS_VER=?*) CUBLAS_VER="${i#*=}";; *) ;; esac shift done NVCC_VERSION_OUTPUT=$(nvcc --version) if [[ $(echo $NVCC_VERSION_OUTPUT | grep -oP "\d+\.\d+" | head -n 1) != ${CUDA_VER} ]]; then echo "The version of pre-installed CUDA is not equal to ${CUDA_VER}." exit 1 fi install_ubuntu_requirements() { apt-get update && apt-get install -y --no-install-recommends gnupg2 curl ca-certificates ARCH=$(uname -m) if [ "$ARCH" = "amd64" ];then ARCH="x86_64";fi if [ "$ARCH" = "aarch64" ];then ARCH="sbsa";fi curl -fsSLO https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/${ARCH}/cuda-keyring_1.0-1_all.deb dpkg -i cuda-keyring_1.0-1_all.deb apt-get update if [[ $(apt list --installed | grep libcudnn9) ]]; then apt-get remove --purge -y --allow-change-held-packages libcudnn9* fi if [[ $(apt list --installed | grep libnccl) ]]; then apt-get remove --purge -y --allow-change-held-packages libnccl* fi if [[ $(apt list --installed | grep libcublas) ]]; then apt-get remove --purge -y --allow-change-held-packages libcublas* fi if [[ $(apt list --installed | grep cuda-nvrtc-dev) ]]; then apt-get remove --purge -y --allow-change-held-packages cuda-nvrtc-dev* fi CUBLAS_CUDA_VERSION=$(echo $CUDA_VER | sed 's/\./-/g') apt-get install -y --no-install-recommends libcudnn9-cuda-12=${CUDNN_VER} libcudnn9-dev-cuda-12=${CUDNN_VER} apt-get install -y --no-install-recommends libnccl2=${NCCL_VER} libnccl-dev=${NCCL_VER} apt-get install -y --no-install-recommends libcublas-${CUBLAS_CUDA_VERSION}=${CUBLAS_VER} libcublas-dev-${CUBLAS_CUDA_VERSION}=${CUBLAS_VER} # NVRTC static library doesn't exist in NGC PyTorch container. NVRTC_CUDA_VERSION=$(echo $CUDA_VER | sed 's/\./-/g') apt-get install -y --no-install-recommends cuda-nvrtc-dev-${NVRTC_CUDA_VERSION}=${NVRTC_VER} apt-get clean rm -rf /var/lib/apt/lists/* } install_centos_requirements() { CUBLAS_CUDA_VERSION=$(echo $CUDA_VER | sed 's/\./-/g') yum -y update yum -y install epel-release yum remove -y libnccl* && yum -y install libnccl-${NCCL_VER} libnccl-devel-${NCCL_VER} yum remove -y libcublas* && yum -y install libcublas-${CUBLAS_CUDA_VERSION}-${CUBLAS_VER} libcublas-devel-${CUBLAS_CUDA_VERSION}-${CUBLAS_VER} yum clean all } install_tensorrt() { #PY_VERSION=$(python3 -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))') #PARSED_PY_VERSION=$(echo "${PY_VERSION//./}") TRT_CUDA_VERSION="12.5" if [ -z "$RELEASE_URL_TRT" ];then ARCH=${TRT_TARGETARCH} if [ -z "$ARCH" ];then ARCH=$(uname -m);fi if [ "$ARCH" = "arm64" ];then ARCH="aarch64";fi if [ "$ARCH" = "amd64" ];then ARCH="x86_64";fi if [ "$ARCH" = "x86_64" ];then DIR_NAME="x64-agnostic"; else DIR_NAME=${ARCH};fi if [ "$ARCH" = "aarch64" ];then OS1="Ubuntu22_04" && OS2="Ubuntu-22.04" && OS="ubuntu-22.04"; else OS1="Linux" && OS2="Linux" && OS="linux";fi RELEASE_URL_TRT=https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.2.0/tars/TensorRT-${TRT_VER}.${OS2}.${ARCH}-gnu.cuda-${TRT_CUDA_VERSION}.tar.gz fi wget --no-verbose ${RELEASE_URL_TRT} -O /tmp/TensorRT.tar tar -xf /tmp/TensorRT.tar -C /usr/local/ mv /usr/local/TensorRT-${TRT_VER} /usr/local/tensorrt # pip3 install /usr/local/tensorrt/python/tensorrt-*-cp${PARSED_PY_VERSION}-*.whl rm -rf /tmp/TensorRT.tar } # Install base packages depending on the base OS ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') case "$ID" in debian) install_ubuntu_requirements install_tensorrt ;; ubuntu) install_ubuntu_requirements install_tensorrt ;; centos) install_centos_requirements install_tensorrt ;; *) echo "Unable to determine OS..." exit 1 ;; esac
text-generation-inference/backends/trtllm/scripts/install_tensorrt.sh/0
{ "file_path": "text-generation-inference/backends/trtllm/scripts/install_tensorrt.sh", "repo_id": "text-generation-inference", "token_count": 2016 }
235
use clap::{Parser, Subcommand}; use text_generation_router::{server, usage_stats}; use text_generation_router_v3::{connect_backend, V3Error}; use thiserror::Error; /// App Configuration #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { #[command(subcommand)] command: Option<Commands>, #[clap(default_value = "128", long, env)] max_concurrent_requests: usize, #[clap(default_value = "2", long, env)] max_best_of: usize, #[clap(default_value = "4", long, env)] max_stop_sequences: usize, #[clap(default_value = "5", long, env)] max_top_n_tokens: u32, #[clap(default_value = "1024", long, env)] max_input_tokens: usize, #[clap(default_value = "2048", long, env)] max_total_tokens: usize, #[clap(default_value = "1.2", long, env)] waiting_served_ratio: f32, #[clap(default_value = "4096", long, env)] max_batch_prefill_tokens: u32, #[clap(long, env)] max_batch_total_tokens: Option<u32>, #[clap(default_value = "20", long, env)] max_waiting_tokens: usize, #[clap(long, env)] max_batch_size: Option<usize>, #[clap(default_value = "0.0.0.0", long, env)] hostname: String, #[clap(default_value = "3000", long, short, env)] port: u16, #[clap(default_value = "/tmp/text-generation-server-0", long, env)] master_shard_uds_path: String, #[clap(default_value = "bigscience/bloom", long, env)] tokenizer_name: String, #[clap(long, env)] tokenizer_config_path: Option<String>, #[clap(long, env)] revision: Option<String>, #[clap(default_value = "2", long, env)] validation_workers: usize, #[clap(long, env)] api_key: Option<String>, #[clap(long, env)] json_output: bool, #[clap(long, env)] otlp_endpoint: Option<String>, #[clap(default_value = "text-generation-inference.router", long, env)] otlp_service_name: String, #[clap(long, env)] cors_allow_origin: Option<Vec<String>>, #[clap(long, env)] ngrok: bool, #[clap(long, env)] ngrok_authtoken: Option<String>, #[clap(long, env)] ngrok_edge: Option<String>, #[clap(long, env, default_value_t = false)] messages_api_enabled: bool, #[clap(long, env, default_value_t = false)] disable_grammar_support: bool, #[clap(default_value = "4", long, env)] max_client_batch_size: usize, #[clap(default_value = "on", long, env)] usage_stats: usage_stats::UsageStatsLevel, } #[derive(Debug, Subcommand)] enum Commands { PrintSchema, } #[tokio::main] async fn main() -> Result<(), RouterError> { // Get args let args = Args::parse(); // Pattern match configuration let Args { command, max_concurrent_requests, max_best_of, max_stop_sequences, max_top_n_tokens, max_input_tokens, max_total_tokens, waiting_served_ratio, max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, max_batch_size, hostname, port, master_shard_uds_path, tokenizer_name, tokenizer_config_path, revision, validation_workers, api_key, json_output, otlp_endpoint, otlp_service_name, cors_allow_origin, ngrok, ngrok_authtoken, ngrok_edge, messages_api_enabled, disable_grammar_support, max_client_batch_size, usage_stats, } = args; if let Some(Commands::PrintSchema) = command { use utoipa::OpenApi; let api_doc = text_generation_router::server::ApiDoc::openapi(); let api_doc = serde_json::to_string_pretty(&api_doc).unwrap(); println!("{}", api_doc); std::process::exit(0); }; text_generation_router::logging::init_logging(otlp_endpoint, otlp_service_name, json_output); // Validate args if max_input_tokens >= max_total_tokens { return Err(RouterError::ArgumentValidation( "`max_input_tokens` must be < `max_total_tokens`".to_string(), )); } if max_input_tokens as u32 > max_batch_prefill_tokens { return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be >= `max_input_tokens`. Given: {max_batch_prefill_tokens} and {max_input_tokens}"))); } if validation_workers == 0 { return Err(RouterError::ArgumentValidation( "`validation_workers` must be > 0".to_string(), )); } if let Some(ref max_batch_total_tokens) = max_batch_total_tokens { if max_batch_prefill_tokens > *max_batch_total_tokens { return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {max_batch_prefill_tokens} and {max_batch_total_tokens}"))); } if max_total_tokens as u32 > *max_batch_total_tokens { return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_batch_total_tokens}"))); } } if let Some(max_batch_size) = max_batch_size { if max_batch_size == 0 { return Err(RouterError::ArgumentValidation( "`max_batch_size` must be > 0".to_string(), )); } } let (backend, _backend_info) = connect_backend( max_input_tokens, max_total_tokens, master_shard_uds_path, waiting_served_ratio, max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, max_batch_size, ) .await?; // Run server server::run( backend, max_concurrent_requests, max_best_of, max_stop_sequences, max_top_n_tokens, max_input_tokens, max_total_tokens, validation_workers, api_key, tokenizer_name, tokenizer_config_path, revision, hostname, port, cors_allow_origin, ngrok, ngrok_authtoken, ngrok_edge, messages_api_enabled, disable_grammar_support, max_client_batch_size, usage_stats, ) .await?; Ok(()) } #[derive(Debug, Error)] enum RouterError { #[error("Argument validation error: {0}")] ArgumentValidation(String), #[error("Backend failed: {0}")] Backend(#[from] V3Error), #[error("WebServer error: {0}")] WebServer(#[from] server::WebServerError), #[error("Tokio runtime failed to start: {0}")] Tokio(#[from] std::io::Error), }
text-generation-inference/backends/v3/src/main.rs/0
{ "file_path": "text-generation-inference/backends/v3/src/main.rs", "repo_id": "text-generation-inference", "token_count": 3129 }
236
[tool.poetry] name = "text-generation" version = "0.7.0" description = "Hugging Face Text Generation Python Client" license = "Apache-2.0" authors = ["Olivier Dehaene <olivier@huggingface.co>"] maintainers = ["Olivier Dehaene <olivier@huggingface.co>"] readme = "README.md" homepage = "https://github.com/huggingface/text-generation-inference" repository = "https://github.com/huggingface/text-generation-inference" [tool.poetry.dependencies] python = "^3.7" pydantic = "> 2, < 3" aiohttp = "^3.8" huggingface-hub = ">= 0.12, < 1.0" [tool.poetry.dev-dependencies] pytest = "^6.2.5" pytest-asyncio = "^0.17.2" pytest-cov = "^3.0.0" [tool.pytest.ini_options] asyncio_mode = "auto" [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api"
text-generation-inference/clients/python/pyproject.toml/0
{ "file_path": "text-generation-inference/clients/python/pyproject.toml", "repo_id": "text-generation-inference", "token_count": 334 }
237
# Consuming Text Generation Inference There are many ways to consume Text Generation Inference (TGI) server in your applications. After launching the server, you can use the [Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api) `/v1/chat/completions` route and make a `POST` request to get results from the server. You can also pass `"stream": true` to the call if you want TGI to return a stream of tokens. For more information on the API, consult the OpenAPI documentation of `text-generation-inference` available [here](https://huggingface.github.io/text-generation-inference). You can make the requests using any tool of your preference, such as curl, Python, or TypeScript. For an end-to-end experience, we've open-sourced [ChatUI](https://github.com/huggingface/chat-ui), a chat interface for open-access models. ## curl After a successful server launch, you can query the model using the `v1/chat/completions` route, to get responses that are compliant to the OpenAI Chat Completion spec: ```bash curl localhost:8080/v1/chat/completions \ -X POST \ -d '{ "model": "tgi", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is deep learning?" } ], "stream": true, "max_tokens": 20 }' \ -H 'Content-Type: application/json' ``` For non-chat use-cases, you can also use the `/generate` and `/generate_stream` routes. ```bash curl 127.0.0.1:8080/generate \ -X POST \ -d '{ "inputs":"What is Deep Learning?", "parameters":{ "max_new_tokens":20 } }' \ -H 'Content-Type: application/json' ``` ## Python ### Inference Client [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/main/en/index) is a Python library to interact with the Hugging Face Hub, including its endpoints. It provides a high-level class, [`huggingface_hub.InferenceClient`](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient), which makes it easy to make calls to TGI's Messages API. `InferenceClient` also takes care of parameter validation and provides a simple-to-use interface. Install `huggingface_hub` package via pip. ```bash pip install huggingface_hub ``` You can now use `InferenceClient` the exact same way you would use `OpenAI` client in Python ```python from huggingface_hub import InferenceClient client = InferenceClient( base_url="http://localhost:8080/v1/", ) output = client.chat.completions.create( model="tgi", messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Count to 10"}, ], stream=True, max_tokens=1024, ) for chunk in output: print(chunk.choices[0].delta.content) ``` You can check out more details about OpenAI compatibility [here](https://huggingface.co/docs/huggingface_hub/en/guides/inference#openai-compatibility). There is also an async version of the client, `AsyncInferenceClient`, based on `asyncio` and `aiohttp`. You can find docs for it [here](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.AsyncInferenceClient) ### OpenAI Client You can directly use the OpenAI [Python](https://github.com/openai/openai-python) or [JS](https://github.com/openai/openai-node) clients to interact with TGI. Install the OpenAI Python package via pip. ```bash pip install openai ``` ```python from openai import OpenAI # init the client but point it to TGI client = OpenAI( base_url="http://localhost:8080/v1/", api_key="-" ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ], stream=True ) # iterate and print stream for message in chat_completion: print(message) ``` ## UI ### Gradio Gradio is a Python library that helps you build web applications for your machine learning models with a few lines of code. It has a `ChatInterface` wrapper that helps create neat UIs for chatbots. Let's take a look at how to create a chatbot with streaming mode using TGI and Gradio. Let's install Gradio and Hub Python library first. ```bash pip install huggingface-hub gradio ``` Assume you are serving your model on port 8080, we will query through [InferenceClient](consuming_tgi#inference-client). ```python import gradio as gr from huggingface_hub import InferenceClient client = InferenceClient(base_url="http://127.0.0.1:8080") def inference(message, history): partial_message = "" output = client.chat.completions.create( messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": message}, ], stream=True, max_tokens=1024, ) for chunk in output: partial_message += chunk.choices[0].delta.content yield partial_message gr.ChatInterface( inference, chatbot=gr.Chatbot(height=300), textbox=gr.Textbox(placeholder="Chat with me!", container=False, scale=7), description="This is the demo for Gradio UI consuming TGI endpoint.", title="Gradio 🤝 TGI", examples=["Are tomatoes vegetables?"], retry_btn="Retry", undo_btn="Undo", clear_btn="Clear", ).queue().launch() ``` You can check out the UI and try the demo directly here 👇 <div class="block dark:hidden"> <iframe src="https://merve-gradio-tgi-2.hf.space?__theme=light" width="850" height="750" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://merve-gradio-tgi-2.hf.space?__theme=dark" width="850" height="750" ></iframe> </div> You can read more about how to customize a `ChatInterface` [here](https://www.gradio.app/guides/creating-a-chatbot-fast). ### ChatUI [ChatUI](https://github.com/huggingface/chat-ui) is an open-source interface built for consuming LLMs. It offers many customization options, such as web search with SERP API and more. ChatUI can automatically consume the TGI server and even provides an option to switch between different TGI endpoints. You can try it out at [Hugging Chat](https://huggingface.co/chat/), or use the [ChatUI Docker Space](https://huggingface.co/new-space?template=huggingchat/chat-ui-template) to deploy your own Hugging Chat to Spaces. To serve both ChatUI and TGI in same environment, simply add your own endpoints to the `MODELS` variable in `.env.local` file inside the `chat-ui` repository. Provide the endpoints pointing to where TGI is served. ``` { // rest of the model config here "endpoints": [{"url": "https://HOST:PORT/generate_stream"}] } ``` ![ChatUI](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chatui_screen.png)
text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md/0
{ "file_path": "text-generation-inference/docs/source/basic_tutorials/consuming_tgi.md", "repo_id": "text-generation-inference", "token_count": 2375 }
238
## Speculation Speculative decoding, assisted generation, Medusa, and others are a few different names for the same idea. The idea is to generate tokens *before* the large model actually runs, and only *check* if those tokens where valid. So you are making *more* computations on your LLM, but if you are correct you produce 1, 2, 3 etc.. tokens on a single LLM pass. Since LLMs are usually memory bound (and not compute bound), provided your guesses are correct enough, this is a 2-3x faster inference (It can be much more for code oriented tasks for instance). You can check a more [detailed explanation](https://huggingface.co/blog/assisted-generation). Text-generation inference supports 2 main speculative methods: - Medusa - N-gram ### Medusa Medusa is a [simple method](https://arxiv.org/abs/2401.10774) to create many tokens in a single pass using fine-tuned LM heads in addition to your existing models. You can check a few existing fine-tunes for popular models: - [text-generation-inference/gemma-7b-it-medusa](https://huggingface.co/text-generation-inference/gemma-7b-it-medusa) - [text-generation-inference/Mixtral-8x7B-Instruct-v0.1-medusa](https://huggingface.co/text-generation-inference/Mixtral-8x7B-Instruct-v0.1-medusa) - [text-generation-inference/Mistral-7B-Instruct-v0.2-medusa](https://huggingface.co/text-generation-inference/Mistral-7B-Instruct-v0.2-medusa) In order to create your own medusa heads for your own finetune, you should check own the original medusa repo. [../basic_tutorials/train_medusa.md](../basic_tutorials/train_medusa.md) In order to use medusa models in TGI, simply point to a medusa enabled model, and everything will load automatically. ### N-gram If you don't have a medusa model, or don't have the resource to fine-tune, you can try to use `n-gram`. N-gram works by trying to find matching tokens in the previous sequence, and use those as speculation for generating new tokens. For example, if the tokens "np.mean" appear multiple times in the sequence, the model can speculate that the next continuation of the tokens "np." is probably also "mean". This is an extremely simple method, which works best for code, or highly repetitive text. This might not be beneficial, if the speculation misses too much. In order to enable n-gram speculation simply use `--speculate 2` in your flags. [Details about the flag](https://huggingface.co/docs/text-generation-inference/basic_tutorials/launcher#speculate)
text-generation-inference/docs/source/conceptual/speculation.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/speculation.md", "repo_id": "text-generation-inference", "token_count": 712 }
239