text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Adapted from https://github.com/huggingface/transformers/blob/c409cd81777fb27aadc043ed3d8339dbc020fb3b/src/transformers/quantizers/auto.py """ import warnings from typing import Dict, Optional, Union from .bitsandbytes import BnB4BitDiffusersQuantizer, BnB8BitDiffusersQuantizer from .gguf import GGUFQuantizer from .quantization_config import ( BitsAndBytesConfig, GGUFQuantizationConfig, QuantizationConfigMixin, QuantizationMethod, QuantoConfig, TorchAoConfig, ) from .quanto import QuantoQuantizer from .torchao import TorchAoHfQuantizer AUTO_QUANTIZER_MAPPING = { "bitsandbytes_4bit": BnB4BitDiffusersQuantizer, "bitsandbytes_8bit": BnB8BitDiffusersQuantizer, "gguf": GGUFQuantizer, "quanto": QuantoQuantizer, "torchao": TorchAoHfQuantizer, } AUTO_QUANTIZATION_CONFIG_MAPPING = { "bitsandbytes_4bit": BitsAndBytesConfig, "bitsandbytes_8bit": BitsAndBytesConfig, "gguf": GGUFQuantizationConfig, "quanto": QuantoConfig, "torchao": TorchAoConfig, } class DiffusersAutoQuantizer: """ The auto diffusers quantizer class that takes care of automatically instantiating to the correct `DiffusersQuantizer` given the `QuantizationConfig`. """ @classmethod def from_dict(cls, quantization_config_dict: Dict): quant_method = quantization_config_dict.get("quant_method", None) # We need a special care for bnb models to make sure everything is BC .. if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False): suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit" quant_method = QuantizationMethod.BITS_AND_BYTES + suffix elif quant_method is None: raise ValueError( "The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized" ) if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING.keys(): raise ValueError( f"Unknown quantization type, got {quant_method} - supported types are:" f" {list(AUTO_QUANTIZER_MAPPING.keys())}" ) target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method] return target_cls.from_dict(quantization_config_dict) @classmethod def from_config(cls, quantization_config: Union[QuantizationConfigMixin, Dict], **kwargs): # Convert it to a QuantizationConfig if the q_config is a dict if isinstance(quantization_config, dict): quantization_config = cls.from_dict(quantization_config) quant_method = quantization_config.quant_method # Again, we need a special care for bnb as we have a single quantization config # class for both 4-bit and 8-bit quantization if quant_method == QuantizationMethod.BITS_AND_BYTES: if quantization_config.load_in_8bit: quant_method += "_8bit" else: quant_method += "_4bit" if quant_method not in AUTO_QUANTIZER_MAPPING.keys(): raise ValueError( f"Unknown quantization type, got {quant_method} - supported types are:" f" {list(AUTO_QUANTIZER_MAPPING.keys())}" ) target_cls = AUTO_QUANTIZER_MAPPING[quant_method] return target_cls(quantization_config, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): model_config = cls.load_config(pretrained_model_name_or_path, **kwargs) if getattr(model_config, "quantization_config", None) is None: raise ValueError( f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized." ) quantization_config_dict = model_config.quantization_config quantization_config = cls.from_dict(quantization_config_dict) # Update with potential kwargs that are passed through from_pretrained. quantization_config.update(kwargs) return cls.from_config(quantization_config) @classmethod def merge_quantization_configs( cls, quantization_config: Union[dict, QuantizationConfigMixin], quantization_config_from_args: Optional[QuantizationConfigMixin], ): """ handles situations where both quantization_config from args and quantization_config from model config are present. """ if quantization_config_from_args is not None: warning_msg = ( "You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading" " already has a `quantization_config` attribute. The `quantization_config` from the model will be used." ) else: warning_msg = "" if isinstance(quantization_config, dict): quantization_config = cls.from_dict(quantization_config) if warning_msg != "": warnings.warn(warning_msg) return quantization_config
diffusers/src/diffusers/quantizers/auto.py/0
{ "file_path": "diffusers/src/diffusers/quantizers/auto.py", "repo_id": "diffusers", "token_count": 2249 }
187
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) _dummy_modules = {} _import_structure = {} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_pt_objects # noqa F403 _dummy_modules.update(get_objects_from_module(dummy_pt_objects)) else: _import_structure["deprecated"] = ["KarrasVeScheduler", "ScoreSdeVpScheduler"] _import_structure["scheduling_amused"] = ["AmusedScheduler"] _import_structure["scheduling_consistency_decoder"] = ["ConsistencyDecoderScheduler"] _import_structure["scheduling_consistency_models"] = ["CMStochasticIterativeScheduler"] _import_structure["scheduling_ddim"] = ["DDIMScheduler"] _import_structure["scheduling_ddim_cogvideox"] = ["CogVideoXDDIMScheduler"] _import_structure["scheduling_ddim_inverse"] = ["DDIMInverseScheduler"] _import_structure["scheduling_ddim_parallel"] = ["DDIMParallelScheduler"] _import_structure["scheduling_ddpm"] = ["DDPMScheduler"] _import_structure["scheduling_ddpm_parallel"] = ["DDPMParallelScheduler"] _import_structure["scheduling_ddpm_wuerstchen"] = ["DDPMWuerstchenScheduler"] _import_structure["scheduling_deis_multistep"] = ["DEISMultistepScheduler"] _import_structure["scheduling_dpm_cogvideox"] = ["CogVideoXDPMScheduler"] _import_structure["scheduling_dpmsolver_multistep"] = ["DPMSolverMultistepScheduler"] _import_structure["scheduling_dpmsolver_multistep_inverse"] = ["DPMSolverMultistepInverseScheduler"] _import_structure["scheduling_dpmsolver_singlestep"] = ["DPMSolverSinglestepScheduler"] _import_structure["scheduling_edm_dpmsolver_multistep"] = ["EDMDPMSolverMultistepScheduler"] _import_structure["scheduling_edm_euler"] = ["EDMEulerScheduler"] _import_structure["scheduling_euler_ancestral_discrete"] = ["EulerAncestralDiscreteScheduler"] _import_structure["scheduling_euler_discrete"] = ["EulerDiscreteScheduler"] _import_structure["scheduling_flow_match_euler_discrete"] = ["FlowMatchEulerDiscreteScheduler"] _import_structure["scheduling_flow_match_heun_discrete"] = ["FlowMatchHeunDiscreteScheduler"] _import_structure["scheduling_flow_match_lcm"] = ["FlowMatchLCMScheduler"] _import_structure["scheduling_heun_discrete"] = ["HeunDiscreteScheduler"] _import_structure["scheduling_ipndm"] = ["IPNDMScheduler"] _import_structure["scheduling_k_dpm_2_ancestral_discrete"] = ["KDPM2AncestralDiscreteScheduler"] _import_structure["scheduling_k_dpm_2_discrete"] = ["KDPM2DiscreteScheduler"] _import_structure["scheduling_lcm"] = ["LCMScheduler"] _import_structure["scheduling_pndm"] = ["PNDMScheduler"] _import_structure["scheduling_repaint"] = ["RePaintScheduler"] _import_structure["scheduling_sasolver"] = ["SASolverScheduler"] _import_structure["scheduling_scm"] = ["SCMScheduler"] _import_structure["scheduling_sde_ve"] = ["ScoreSdeVeScheduler"] _import_structure["scheduling_tcd"] = ["TCDScheduler"] _import_structure["scheduling_unclip"] = ["UnCLIPScheduler"] _import_structure["scheduling_unipc_multistep"] = ["UniPCMultistepScheduler"] _import_structure["scheduling_utils"] = ["AysSchedules", "KarrasDiffusionSchedulers", "SchedulerMixin"] _import_structure["scheduling_vq_diffusion"] = ["VQDiffusionScheduler"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_flax_objects # noqa F403 _dummy_modules.update(get_objects_from_module(dummy_flax_objects)) else: _import_structure["scheduling_ddim_flax"] = ["FlaxDDIMScheduler"] _import_structure["scheduling_ddpm_flax"] = ["FlaxDDPMScheduler"] _import_structure["scheduling_dpmsolver_multistep_flax"] = ["FlaxDPMSolverMultistepScheduler"] _import_structure["scheduling_euler_discrete_flax"] = ["FlaxEulerDiscreteScheduler"] _import_structure["scheduling_karras_ve_flax"] = ["FlaxKarrasVeScheduler"] _import_structure["scheduling_lms_discrete_flax"] = ["FlaxLMSDiscreteScheduler"] _import_structure["scheduling_pndm_flax"] = ["FlaxPNDMScheduler"] _import_structure["scheduling_sde_ve_flax"] = ["FlaxScoreSdeVeScheduler"] _import_structure["scheduling_utils_flax"] = [ "FlaxKarrasDiffusionSchedulers", "FlaxSchedulerMixin", "FlaxSchedulerOutput", "broadcast_to_shape_from_left", ] try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_torch_and_scipy_objects # noqa F403 _dummy_modules.update(get_objects_from_module(dummy_torch_and_scipy_objects)) else: _import_structure["scheduling_lms_discrete"] = ["LMSDiscreteScheduler"] try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils import dummy_torch_and_torchsde_objects # noqa F403 _dummy_modules.update(get_objects_from_module(dummy_torch_and_torchsde_objects)) else: _import_structure["scheduling_cosine_dpmsolver_multistep"] = ["CosineDPMSolverMultistepScheduler"] _import_structure["scheduling_dpmsolver_sde"] = ["DPMSolverSDEScheduler"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .deprecated import KarrasVeScheduler, ScoreSdeVpScheduler from .scheduling_amused import AmusedScheduler from .scheduling_consistency_decoder import ConsistencyDecoderScheduler from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_cogvideox import CogVideoXDDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_ddpm_wuerstchen import DDPMWuerstchenScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpm_cogvideox import CogVideoXDPMScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_edm_dpmsolver_multistep import EDMDPMSolverMultistepScheduler from .scheduling_edm_euler import EDMEulerScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler from .scheduling_flow_match_heun_discrete import FlowMatchHeunDiscreteScheduler from .scheduling_flow_match_lcm import FlowMatchLCMScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPM2AncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPM2DiscreteScheduler from .scheduling_lcm import LCMScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sasolver import SASolverScheduler from .scheduling_scm import SCMScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_tcd import TCDScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import AysSchedules, KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_euler_discrete_flax import FlaxEulerDiscreteScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_cosine_dpmsolver_multistep import CosineDPMSolverMultistepScheduler from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) for name, value in _dummy_modules.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/schedulers/__init__.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/__init__.py", "repo_id": "diffusers", "token_count": 4471 }
188
# Copyright (c) 2022 Pablo Pernías MIT License # Copyright 2025 UC Berkeley Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class DDPMWuerstchenSchedulerOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. """ prev_sample: torch.Tensor def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DDPMWuerstchenScheduler(SchedulerMixin, ConfigMixin): """ Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and Langevin dynamics sampling. [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details, see the original paper: https://huggingface.co/papers/2006.11239 Args: scaler (`float`): .... s (`float`): .... """ @register_to_config def __init__( self, scaler: float = 1.0, s: float = 0.008, ): self.scaler = scaler self.s = torch.tensor([s]) self._init_alpha_cumprod = torch.cos(self.s / (1 + self.s) * torch.pi * 0.5) ** 2 # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 def _alpha_cumprod(self, t, device): if self.scaler > 1: t = 1 - (1 - t) ** self.scaler elif self.scaler < 1: t = t**self.scaler alpha_cumprod = torch.cos( (t + self.s.to(device)) / (1 + self.s.to(device)) * torch.pi * 0.5 ) ** 2 / self._init_alpha_cumprod.to(device) return alpha_cumprod.clamp(0.0001, 0.9999) def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): input sample timestep (`int`, optional): current timestep Returns: `torch.Tensor`: scaled input sample """ return sample def set_timesteps( self, num_inference_steps: int = None, timesteps: Optional[List[int]] = None, device: Union[str, torch.device] = None, ): """ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. Args: num_inference_steps (`Dict[float, int]`): the number of diffusion steps used when generating samples with a pre-trained model. If passed, then `timesteps` must be `None`. device (`str` or `torch.device`, optional): the device to which the timesteps are moved to. {2 / 3: 20, 0.0: 10} """ if timesteps is None: timesteps = torch.linspace(1.0, 0.0, num_inference_steps + 1, device=device) if not isinstance(timesteps, torch.Tensor): timesteps = torch.Tensor(timesteps).to(device) self.timesteps = timesteps def step( self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, generator=None, return_dict: bool = True, ) -> Union[DDPMWuerstchenSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.Tensor`): current instance of sample being created by diffusion process. generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMWuerstchenSchedulerOutput class Returns: [`DDPMWuerstchenSchedulerOutput`] or `tuple`: [`DDPMWuerstchenSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ dtype = model_output.dtype device = model_output.device t = timestep prev_t = self.previous_timestep(t) alpha_cumprod = self._alpha_cumprod(t, device).view(t.size(0), *[1 for _ in sample.shape[1:]]) alpha_cumprod_prev = self._alpha_cumprod(prev_t, device).view(prev_t.size(0), *[1 for _ in sample.shape[1:]]) alpha = alpha_cumprod / alpha_cumprod_prev mu = (1.0 / alpha).sqrt() * (sample - (1 - alpha) * model_output / (1 - alpha_cumprod).sqrt()) std_noise = randn_tensor(mu.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) std = ((1 - alpha) * (1.0 - alpha_cumprod_prev) / (1.0 - alpha_cumprod)).sqrt() * std_noise pred = mu + std * (prev_t != 0).float().view(prev_t.size(0), *[1 for _ in sample.shape[1:]]) if not return_dict: return (pred.to(dtype),) return DDPMWuerstchenSchedulerOutput(prev_sample=pred.to(dtype)) def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor, ) -> torch.Tensor: device = original_samples.device dtype = original_samples.dtype alpha_cumprod = self._alpha_cumprod(timesteps, device=device).view( timesteps.size(0), *[1 for _ in original_samples.shape[1:]] ) noisy_samples = alpha_cumprod.sqrt() * original_samples + (1 - alpha_cumprod).sqrt() * noise return noisy_samples.to(dtype=dtype) def __len__(self): return self.config.num_train_timesteps def previous_timestep(self, timestep): index = (self.timesteps - timestep[0]).abs().argmin().item() prev_t = self.timesteps[index + 1][None].expand(timestep.shape[0]) return prev_t
diffusers/src/diffusers/schedulers/scheduling_ddpm_wuerstchen.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_ddpm_wuerstchen.py", "repo_id": "diffusers", "token_count": 3646 }
189
# Copyright 2025 Kakao Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class UnCLIPSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class UnCLIPScheduler(SchedulerMixin, ConfigMixin): """ NOTE: do not use this scheduler. The DDPM scheduler has been updated to support the changes made here. This scheduler will be removed and replaced with DDPM. This is a modified DDPM Scheduler specifically for the karlo unCLIP model. This scheduler has some minor variations in how it calculates the learned range variance and dynamically re-calculates betas based off the timesteps it is skipping. The scheduler also uses a slightly different step ratio when computing timesteps to use for inference. See [`~DDPMScheduler`] for more information on DDPM scheduling Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. variance_type (`str`): options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small_log` or `learned_range`. clip_sample (`bool`, default `True`): option to clip predicted sample between `-clip_sample_range` and `clip_sample_range` for numerical stability. clip_sample_range (`float`, default `1.0`): The range to clip the sample between. See `clip_sample`. prediction_type (`str`, default `epsilon`, optional): prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process) or `sample` (directly predicting the noisy sample`) """ @register_to_config def __init__( self, num_train_timesteps: int = 1000, variance_type: str = "fixed_small_log", clip_sample: bool = True, clip_sample_range: Optional[float] = 1.0, prediction_type: str = "epsilon", beta_schedule: str = "squaredcos_cap_v2", ): if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'") self.betas = betas_for_alpha_bar(num_train_timesteps) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.one = torch.tensor(1.0) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) self.variance_type = variance_type def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): input sample timestep (`int`, optional): current timestep Returns: `torch.Tensor`: scaled input sample """ return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. Note that this scheduler uses a slightly different step ratio than the other diffusers schedulers. The different step ratio is to mimic the original karlo implementation and does not affect the quality or accuracy of the results. Args: num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. """ self.num_inference_steps = num_inference_steps step_ratio = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) self.timesteps = torch.from_numpy(timesteps).to(device) def _get_variance(self, t, prev_timestep=None, predicted_variance=None, variance_type=None): if prev_timestep is None: prev_timestep = t - 1 alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev if prev_timestep == t - 1: beta = self.betas[t] else: beta = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://huggingface.co/papers/2006.11239) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample variance = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: variance_type = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": variance = torch.log(torch.clamp(variance, min=1e-20)) variance = torch.exp(0.5 * variance) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler min_log = variance.log() max_log = beta.log() frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log return variance def step( self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, prev_timestep: Optional[int] = None, generator=None, return_dict: bool = True, ) -> Union[UnCLIPSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.Tensor`): current instance of sample being created by diffusion process. prev_timestep (`int`, *optional*): The previous timestep to predict the previous sample at. Used to dynamically compute beta. If not given, `t-1` is used and the pre-computed beta is used. generator: random number generator. return_dict (`bool`): option for returning tuple rather than UnCLIPSchedulerOutput class Returns: [`~schedulers.scheduling_utils.UnCLIPSchedulerOutput`] or `tuple`: [`~schedulers.scheduling_utils.UnCLIPSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ t = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: predicted_variance = None # 1. compute alphas, betas if prev_timestep is None: prev_timestep = t - 1 alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev if prev_timestep == t - 1: beta = self.betas[t] alpha = self.alphas[t] else: beta = 1 - alpha_prod_t / alpha_prod_t_prev alpha = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://huggingface.co/papers/2006.11239 if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) elif self.config.prediction_type == "sample": pred_original_sample = model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`" " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: pred_original_sample = torch.clamp( pred_original_sample, -self.config.clip_sample_range, self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://huggingface.co/papers/2006.11239 pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * beta) / beta_prod_t current_sample_coeff = alpha ** (0.5) * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://huggingface.co/papers/2006.11239 pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise variance = 0 if t > 0: variance_noise = randn_tensor( model_output.shape, dtype=model_output.dtype, generator=generator, device=model_output.device ) variance = self._get_variance( t, predicted_variance=predicted_variance, prev_timestep=prev_timestep, ) if self.variance_type == "fixed_small_log": variance = variance elif self.variance_type == "learned_range": variance = (0.5 * variance).exp() else: raise ValueError( f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`" " for the UnCLIPScheduler." ) variance = variance * variance_noise pred_prev_sample = pred_prev_sample + variance if not return_dict: return ( pred_prev_sample, pred_original_sample, ) return UnCLIPSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor, ) -> torch.Tensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement # for the subsequent add_noise calls self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
diffusers/src/diffusers/schedulers/scheduling_unclip.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_unclip.py", "repo_id": "diffusers", "token_count": 6284 }
190
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class OnnxRuntimeModel(metaclass=DummyObject): _backends = ["onnx"] def __init__(self, *args, **kwargs): requires_backends(self, ["onnx"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["onnx"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["onnx"])
diffusers/src/diffusers/utils/dummy_onnx_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_onnx_objects.py", "repo_id": "diffusers", "token_count": 202 }
191
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Import utilities: Utilities related to imports and our lazy inits. """ import importlib.util import inspect import operator as op import os import sys from collections import OrderedDict, defaultdict from itertools import chain from types import ModuleType from typing import Any, Tuple, Union from huggingface_hub.utils import is_jinja_available # noqa: F401 from packaging.version import Version, parse from . import logging # The package importlib_metadata is in a different place, depending on the python version. if sys.version_info < (3, 8): import importlib_metadata else: import importlib.metadata as importlib_metadata try: _package_map = importlib_metadata.packages_distributions() # load-once to avoid expensive calls except Exception: _package_map = None logger = logging.get_logger(__name__) # pylint: disable=invalid-name ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() USE_SAFETENSORS = os.environ.get("USE_SAFETENSORS", "AUTO").upper() DIFFUSERS_SLOW_IMPORT = os.environ.get("DIFFUSERS_SLOW_IMPORT", "FALSE").upper() DIFFUSERS_SLOW_IMPORT = DIFFUSERS_SLOW_IMPORT in ENV_VARS_TRUE_VALUES STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} _is_google_colab = "google.colab" in sys.modules or any(k.startswith("COLAB_") for k in os.environ) def _is_package_available(pkg_name: str, get_dist_name: bool = False) -> Tuple[bool, str]: global _package_map pkg_exists = importlib.util.find_spec(pkg_name) is not None pkg_version = "N/A" if pkg_exists: if _package_map is None: _package_map = defaultdict(list) try: # Fallback for Python < 3.10 for dist in importlib_metadata.distributions(): _top_level_declared = (dist.read_text("top_level.txt") or "").split() _infered_opt_names = { f.parts[0] if len(f.parts) > 1 else inspect.getmodulename(f) for f in (dist.files or []) } - {None} _top_level_inferred = filter(lambda name: "." not in name, _infered_opt_names) for pkg in _top_level_declared or _top_level_inferred: _package_map[pkg].append(dist.metadata["Name"]) except Exception as _: pass try: if get_dist_name and pkg_name in _package_map and _package_map[pkg_name]: if len(_package_map[pkg_name]) > 1: logger.warning( f"Multiple distributions found for package {pkg_name}. Picked distribution: {_package_map[pkg_name][0]}" ) pkg_name = _package_map[pkg_name][0] pkg_version = importlib_metadata.version(pkg_name) logger.debug(f"Successfully imported {pkg_name} version {pkg_version}") except (ImportError, importlib_metadata.PackageNotFoundError): pkg_exists = False return pkg_exists, pkg_version if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: _torch_available, _torch_version = _is_package_available("torch") else: logger.info("Disabling PyTorch because USE_TORCH is set") _torch_available = False _torch_version = "N/A" _jax_version = "N/A" _flax_version = "N/A" if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: _flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None if _flax_available: try: _jax_version = importlib_metadata.version("jax") _flax_version = importlib_metadata.version("flax") logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") except importlib_metadata.PackageNotFoundError: _flax_available = False else: _flax_available = False if USE_SAFETENSORS in ENV_VARS_TRUE_AND_AUTO_VALUES: _safetensors_available, _safetensors_version = _is_package_available("safetensors") else: logger.info("Disabling Safetensors because USE_TF is set") _safetensors_available = False _onnxruntime_version = "N/A" _onnx_available = importlib.util.find_spec("onnxruntime") is not None if _onnx_available: candidates = ( "onnxruntime", "onnxruntime-cann", "onnxruntime-directml", "ort_nightly_directml", "onnxruntime-gpu", "ort_nightly_gpu", "onnxruntime-migraphx", "onnxruntime-openvino", "onnxruntime-qnn", "onnxruntime-rocm", "onnxruntime-training", "onnxruntime-vitisai", ) _onnxruntime_version = None # For the metadata, we have to look for both onnxruntime and onnxruntime-x for pkg in candidates: try: _onnxruntime_version = importlib_metadata.version(pkg) break except importlib_metadata.PackageNotFoundError: pass _onnx_available = _onnxruntime_version is not None if _onnx_available: logger.debug(f"Successfully imported onnxruntime version {_onnxruntime_version}") # (sayakpaul): importlib.util.find_spec("opencv-python") returns None even when it's installed. # _opencv_available = importlib.util.find_spec("opencv-python") is not None try: candidates = ( "opencv-python", "opencv-contrib-python", "opencv-python-headless", "opencv-contrib-python-headless", ) _opencv_version = None for pkg in candidates: try: _opencv_version = importlib_metadata.version(pkg) break except importlib_metadata.PackageNotFoundError: pass _opencv_available = _opencv_version is not None if _opencv_available: logger.debug(f"Successfully imported cv2 version {_opencv_version}") except importlib_metadata.PackageNotFoundError: _opencv_available = False _bs4_available = importlib.util.find_spec("bs4") is not None try: # importlib metadata under different name _bs4_version = importlib_metadata.version("beautifulsoup4") logger.debug(f"Successfully imported ftfy version {_bs4_version}") except importlib_metadata.PackageNotFoundError: _bs4_available = False _invisible_watermark_available = importlib.util.find_spec("imwatermark") is not None try: _invisible_watermark_version = importlib_metadata.version("invisible-watermark") logger.debug(f"Successfully imported invisible-watermark version {_invisible_watermark_version}") except importlib_metadata.PackageNotFoundError: _invisible_watermark_available = False _torch_xla_available, _torch_xla_version = _is_package_available("torch_xla") _torch_npu_available, _torch_npu_version = _is_package_available("torch_npu") _transformers_available, _transformers_version = _is_package_available("transformers") _hf_hub_available, _hf_hub_version = _is_package_available("huggingface_hub") _kernels_available, _kernels_version = _is_package_available("kernels") _inflect_available, _inflect_version = _is_package_available("inflect") _unidecode_available, _unidecode_version = _is_package_available("unidecode") _k_diffusion_available, _k_diffusion_version = _is_package_available("k_diffusion") _note_seq_available, _note_seq_version = _is_package_available("note_seq") _wandb_available, _wandb_version = _is_package_available("wandb") _tensorboard_available, _tensorboard_version = _is_package_available("tensorboard") _compel_available, _compel_version = _is_package_available("compel") _sentencepiece_available, _sentencepiece_version = _is_package_available("sentencepiece") _torchsde_available, _torchsde_version = _is_package_available("torchsde") _peft_available, _peft_version = _is_package_available("peft") _torchvision_available, _torchvision_version = _is_package_available("torchvision") _matplotlib_available, _matplotlib_version = _is_package_available("matplotlib") _timm_available, _timm_version = _is_package_available("timm") _bitsandbytes_available, _bitsandbytes_version = _is_package_available("bitsandbytes") _imageio_available, _imageio_version = _is_package_available("imageio") _ftfy_available, _ftfy_version = _is_package_available("ftfy") _scipy_available, _scipy_version = _is_package_available("scipy") _librosa_available, _librosa_version = _is_package_available("librosa") _accelerate_available, _accelerate_version = _is_package_available("accelerate") _xformers_available, _xformers_version = _is_package_available("xformers") _gguf_available, _gguf_version = _is_package_available("gguf") _torchao_available, _torchao_version = _is_package_available("torchao") _bitsandbytes_available, _bitsandbytes_version = _is_package_available("bitsandbytes") _optimum_quanto_available, _optimum_quanto_version = _is_package_available("optimum", get_dist_name=True) _pytorch_retinaface_available, _pytorch_retinaface_version = _is_package_available("pytorch_retinaface") _better_profanity_available, _better_profanity_version = _is_package_available("better_profanity") _nltk_available, _nltk_version = _is_package_available("nltk") _cosmos_guardrail_available, _cosmos_guardrail_version = _is_package_available("cosmos_guardrail") _sageattention_available, _sageattention_version = _is_package_available("sageattention") _flash_attn_available, _flash_attn_version = _is_package_available("flash_attn") _flash_attn_3_available, _flash_attn_3_version = _is_package_available("flash_attn_3") _kornia_available, _kornia_version = _is_package_available("kornia") def is_torch_available(): return _torch_available def is_torch_xla_available(): return _torch_xla_available def is_torch_npu_available(): return _torch_npu_available def is_flax_available(): return _flax_available def is_transformers_available(): return _transformers_available def is_inflect_available(): return _inflect_available def is_unidecode_available(): return _unidecode_available def is_onnx_available(): return _onnx_available def is_opencv_available(): return _opencv_available def is_scipy_available(): return _scipy_available def is_librosa_available(): return _librosa_available def is_xformers_available(): return _xformers_available def is_accelerate_available(): return _accelerate_available def is_kernels_available(): return _kernels_available def is_k_diffusion_available(): return _k_diffusion_available def is_note_seq_available(): return _note_seq_available def is_wandb_available(): return _wandb_available def is_tensorboard_available(): return _tensorboard_available def is_compel_available(): return _compel_available def is_ftfy_available(): return _ftfy_available def is_bs4_available(): return _bs4_available def is_torchsde_available(): return _torchsde_available def is_invisible_watermark_available(): return _invisible_watermark_available def is_peft_available(): return _peft_available def is_torchvision_available(): return _torchvision_available def is_matplotlib_available(): return _matplotlib_available def is_safetensors_available(): return _safetensors_available def is_bitsandbytes_available(): return _bitsandbytes_available def is_google_colab(): return _is_google_colab def is_sentencepiece_available(): return _sentencepiece_available def is_imageio_available(): return _imageio_available def is_gguf_available(): return _gguf_available def is_torchao_available(): return _torchao_available def is_optimum_quanto_available(): return _optimum_quanto_available def is_timm_available(): return _timm_available def is_pytorch_retinaface_available(): return _pytorch_retinaface_available def is_better_profanity_available(): return _better_profanity_available def is_nltk_available(): return _nltk_available def is_cosmos_guardrail_available(): return _cosmos_guardrail_available def is_hpu_available(): return all(importlib.util.find_spec(lib) for lib in ("habana_frameworks", "habana_frameworks.torch")) def is_sageattention_available(): return _sageattention_available def is_flash_attn_available(): return _flash_attn_available def is_flash_attn_3_available(): return _flash_attn_3_available def is_kornia_available(): return _kornia_available # docstyle-ignore FLAX_IMPORT_ERROR = """ {0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/google/flax and follow the ones that match your environment. """ # docstyle-ignore INFLECT_IMPORT_ERROR = """ {0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install inflect` """ # docstyle-ignore PYTORCH_IMPORT_ERROR = """ {0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. """ # docstyle-ignore ONNX_IMPORT_ERROR = """ {0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip install onnxruntime` """ # docstyle-ignore OPENCV_IMPORT_ERROR = """ {0} requires the OpenCV library but it was not found in your environment. You can install it with pip: `pip install opencv-python` """ # docstyle-ignore SCIPY_IMPORT_ERROR = """ {0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install scipy` """ # docstyle-ignore LIBROSA_IMPORT_ERROR = """ {0} requires the librosa library but it was not found in your environment. Checkout the instructions on the installation page: https://librosa.org/doc/latest/install.html and follow the ones that match your environment. """ # docstyle-ignore TRANSFORMERS_IMPORT_ERROR = """ {0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip install transformers` """ # docstyle-ignore UNIDECODE_IMPORT_ERROR = """ {0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install Unidecode` """ # docstyle-ignore K_DIFFUSION_IMPORT_ERROR = """ {0} requires the k-diffusion library but it was not found in your environment. You can install it with pip: `pip install k-diffusion` """ # docstyle-ignore NOTE_SEQ_IMPORT_ERROR = """ {0} requires the note-seq library but it was not found in your environment. You can install it with pip: `pip install note-seq` """ # docstyle-ignore WANDB_IMPORT_ERROR = """ {0} requires the wandb library but it was not found in your environment. You can install it with pip: `pip install wandb` """ # docstyle-ignore TENSORBOARD_IMPORT_ERROR = """ {0} requires the tensorboard library but it was not found in your environment. You can install it with pip: `pip install tensorboard` """ # docstyle-ignore COMPEL_IMPORT_ERROR = """ {0} requires the compel library but it was not found in your environment. You can install it with pip: `pip install compel` """ # docstyle-ignore BS4_IMPORT_ERROR = """ {0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip: `pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore FTFY_IMPORT_ERROR = """ {0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. """ # docstyle-ignore TORCHSDE_IMPORT_ERROR = """ {0} requires the torchsde library but it was not found in your environment. You can install it with pip: `pip install torchsde` """ # docstyle-ignore INVISIBLE_WATERMARK_IMPORT_ERROR = """ {0} requires the invisible-watermark library but it was not found in your environment. You can install it with pip: `pip install invisible-watermark>=0.2.0` """ # docstyle-ignore PEFT_IMPORT_ERROR = """ {0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install peft` """ # docstyle-ignore SAFETENSORS_IMPORT_ERROR = """ {0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors` """ # docstyle-ignore SENTENCEPIECE_IMPORT_ERROR = """ {0} requires the sentencepiece library but it was not found in your environment. You can install it with pip: `pip install sentencepiece` """ # docstyle-ignore BITSANDBYTES_IMPORT_ERROR = """ {0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes` """ # docstyle-ignore IMAGEIO_IMPORT_ERROR = """ {0} requires the imageio library and ffmpeg but it was not found in your environment. You can install it with pip: `pip install imageio imageio-ffmpeg` """ # docstyle-ignore GGUF_IMPORT_ERROR = """ {0} requires the gguf library but it was not found in your environment. You can install it with pip: `pip install gguf` """ TORCHAO_IMPORT_ERROR = """ {0} requires the torchao library but it was not found in your environment. You can install it with pip: `pip install torchao` """ QUANTO_IMPORT_ERROR = """ {0} requires the optimum-quanto library but it was not found in your environment. You can install it with pip: `pip install optimum-quanto` """ # docstyle-ignore PYTORCH_RETINAFACE_IMPORT_ERROR = """ {0} requires the pytorch_retinaface library but it was not found in your environment. You can install it with pip: `pip install pytorch_retinaface` """ # docstyle-ignore BETTER_PROFANITY_IMPORT_ERROR = """ {0} requires the better_profanity library but it was not found in your environment. You can install it with pip: `pip install better_profanity` """ # docstyle-ignore NLTK_IMPORT_ERROR = """ {0} requires the nltk library but it was not found in your environment. You can install it with pip: `pip install nltk` """ BACKENDS_MAPPING = OrderedDict( [ ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)), ("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)), ("opencv", (is_opencv_available, OPENCV_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)), ("k_diffusion", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)), ("note_seq", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)), ("wandb", (is_wandb_available, WANDB_IMPORT_ERROR)), ("tensorboard", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)), ("compel", (is_compel_available, COMPEL_IMPORT_ERROR)), ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), ("torchsde", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)), ("invisible_watermark", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)), ("peft", (is_peft_available, PEFT_IMPORT_ERROR)), ("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)), ("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)), ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), ("imageio", (is_imageio_available, IMAGEIO_IMPORT_ERROR)), ("gguf", (is_gguf_available, GGUF_IMPORT_ERROR)), ("torchao", (is_torchao_available, TORCHAO_IMPORT_ERROR)), ("quanto", (is_optimum_quanto_available, QUANTO_IMPORT_ERROR)), ("pytorch_retinaface", (is_pytorch_retinaface_available, PYTORCH_RETINAFACE_IMPORT_ERROR)), ("better_profanity", (is_better_profanity_available, BETTER_PROFANITY_IMPORT_ERROR)), ("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)), ] ) def requires_backends(obj, backends): if not isinstance(backends, (list, tuple)): backends = [backends] name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ checks = (BACKENDS_MAPPING[backend] for backend in backends) failed = [msg.format(name) for available, msg in checks if not available()] if failed: raise ImportError("".join(failed)) if name in [ "VersatileDiffusionTextToImagePipeline", "VersatileDiffusionPipeline", "VersatileDiffusionDualGuidedPipeline", "StableDiffusionImageVariationPipeline", "UnCLIPPipeline", ] and is_transformers_version("<", "4.25.0"): raise ImportError( f"You need to install `transformers>=4.25` in order to use {name}: \n```\n pip install" " --upgrade transformers \n```" ) if name in ["StableDiffusionDepth2ImgPipeline", "StableDiffusionPix2PixZeroPipeline"] and is_transformers_version( "<", "4.26.0" ): raise ImportError( f"You need to install `transformers>=4.26` in order to use {name}: \n```\n pip install" " --upgrade transformers \n```" ) class DummyObject(type): """ Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by `requires_backend` each time a user tries to access any method of that class. """ def __getattr__(cls, key): if key.startswith("_") and key not in ["_load_connected_pipes", "_is_onnx"]: return super().__getattr__(cls, key) requires_backends(cls, cls._backends) # This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319 def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): """ Compares a library version to some requirement using a given operation. Args: library_or_version (`str` or `packaging.version.Version`): A library name or a version to check. operation (`str`): A string representation of an operator, such as `">"` or `"<="`. requirement_version (`str`): The version to compare the library version against """ if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") operation = STR_OPERATION_TO_FUNC[operation] if isinstance(library_or_version, str): library_or_version = parse(importlib_metadata.version(library_or_version)) return operation(library_or_version, parse(requirement_version)) # This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L338 def is_torch_version(operation: str, version: str): """ Compares the current PyTorch version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A string version of PyTorch """ return compare_versions(parse(_torch_version), operation, version) def is_torch_xla_version(operation: str, version: str): """ Compares the current torch_xla version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A string version of torch_xla """ if not is_torch_xla_available: return False return compare_versions(parse(_torch_xla_version), operation, version) def is_transformers_version(operation: str, version: str): """ Compares the current Transformers version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _transformers_available: return False return compare_versions(parse(_transformers_version), operation, version) def is_hf_hub_version(operation: str, version: str): """ Compares the current Hugging Face Hub version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _hf_hub_available: return False return compare_versions(parse(_hf_hub_version), operation, version) def is_accelerate_version(operation: str, version: str): """ Compares the current Accelerate version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _accelerate_available: return False return compare_versions(parse(_accelerate_version), operation, version) def is_peft_version(operation: str, version: str): """ Compares the current PEFT version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _peft_available: return False return compare_versions(parse(_peft_version), operation, version) def is_bitsandbytes_version(operation: str, version: str): """ Args: Compares the current bitsandbytes version to a given reference with an operation. operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _bitsandbytes_available: return False return compare_versions(parse(_bitsandbytes_version), operation, version) def is_gguf_version(operation: str, version: str): """ Compares the current Accelerate version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _gguf_available: return False return compare_versions(parse(_gguf_version), operation, version) def is_torchao_version(operation: str, version: str): """ Compares the current torchao version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _torchao_available: return False return compare_versions(parse(_torchao_version), operation, version) def is_k_diffusion_version(operation: str, version: str): """ Compares the current k-diffusion version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _k_diffusion_available: return False return compare_versions(parse(_k_diffusion_version), operation, version) def is_optimum_quanto_version(operation: str, version: str): """ Compares the current Accelerate version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _optimum_quanto_available: return False return compare_versions(parse(_optimum_quanto_version), operation, version) def is_xformers_version(operation: str, version: str): """ Compares the current xformers version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _xformers_available: return False return compare_versions(parse(_xformers_version), operation, version) def is_sageattention_version(operation: str, version: str): """ Compares the current sageattention version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _sageattention_available: return False return compare_versions(parse(_sageattention_version), operation, version) def is_flash_attn_version(operation: str, version: str): """ Compares the current flash-attention version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A version string """ if not _flash_attn_available: return False return compare_versions(parse(_flash_attn_version), operation, version) def get_objects_from_module(module): """ Returns a dict of object names and values in a module, while skipping private/internal objects Args: module (ModuleType): Module to extract the objects from. Returns: dict: Dictionary of object names and corresponding values """ objects = {} for name in dir(module): if name.startswith("_"): continue objects[name] = getattr(module, name) return objects class OptionalDependencyNotAvailable(BaseException): """ An error indicating that an optional dependency of Diffusers was not found in the environment. """ class _LazyModule(ModuleType): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. """ # Very heavily inspired by optuna.integration._IntegrationModule # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): super().__init__(name) self._modules = set(import_structure.keys()) self._class_to_module = {} for key, values in import_structure.items(): for value in values: self._class_to_module[value] = key # Needed for autocompletion in an IDE self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) self.__file__ = module_file self.__spec__ = module_spec self.__path__ = [os.path.dirname(module_file)] self._objects = {} if extra_objects is None else extra_objects self._name = name self._import_structure = import_structure # Needed for autocompletion in an IDE def __dir__(self): result = super().__dir__() # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. for attr in self.__all__: if attr not in result: result.append(attr) return result def __getattr__(self, name: str) -> Any: if name in self._objects: return self._objects[name] if name in self._modules: value = self._get_module(name) elif name in self._class_to_module.keys(): module = self._get_module(self._class_to_module[name]) value = getattr(module, name) else: raise AttributeError(f"module {self.__name__} has no attribute {name}") setattr(self, name, value) return value def _get_module(self, module_name: str): try: return importlib.import_module("." + module_name, self.__name__) except Exception as e: raise RuntimeError( f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" f" traceback):\n{e}" ) from e def __reduce__(self): return (self.__class__, (self._name, self.__file__, self._import_structure))
diffusers/src/diffusers/utils/import_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/import_utils.py", "repo_id": "diffusers", "token_count": 12599 }
192
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import sys import unittest import numpy as np import torch import torch.nn as nn from huggingface_hub import hf_hub_download from safetensors.torch import load_file from transformers import CLIPTextModel, CLIPTokenizer from diffusers import ( AutoPipelineForImage2Image, AutoPipelineForText2Image, DDIMScheduler, DiffusionPipeline, LCMScheduler, StableDiffusionPipeline, ) from diffusers.utils.import_utils import is_accelerate_available from diffusers.utils.testing_utils import ( Expectations, backend_empty_cache, load_image, nightly, numpy_cosine_similarity_distance, require_peft_backend, require_torch_accelerator, slow, torch_device, ) sys.path.append(".") from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 if is_accelerate_available(): from accelerate.utils import release_memory class StableDiffusionLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase): pipeline_class = StableDiffusionPipeline scheduler_cls = DDIMScheduler scheduler_kwargs = { "beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear", "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 1, } unet_kwargs = { "block_out_channels": (32, 64), "layers_per_block": 2, "sample_size": 32, "in_channels": 4, "out_channels": 4, "down_block_types": ("DownBlock2D", "CrossAttnDownBlock2D"), "up_block_types": ("CrossAttnUpBlock2D", "UpBlock2D"), "cross_attention_dim": 32, } vae_kwargs = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2" tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2" @property def output_shape(self): return (1, 64, 64, 3) def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) # Keeping this test here makes sense because it doesn't look any integration # (value assertions on logits). @slow @require_torch_accelerator def test_integration_move_lora_cpu(self): path = "stable-diffusion-v1-5/stable-diffusion-v1-5" lora_id = "takuma104/lora-test-text-encoder-lora-target" pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) pipe.load_lora_weights(lora_id, adapter_name="adapter-1") pipe.load_lora_weights(lora_id, adapter_name="adapter-2") pipe = pipe.to(torch_device) self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder", ) self.assertTrue( check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in unet", ) # We will offload the first adapter in CPU and check if the offloading # has been performed correctly pipe.set_lora_device(["adapter-1"], "cpu") for name, module in pipe.unet.named_modules(): if "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)): self.assertTrue(module.weight.device == torch.device("cpu")) elif "adapter-2" in name and not isinstance(module, (nn.Dropout, nn.Identity)): self.assertTrue(module.weight.device != torch.device("cpu")) for name, module in pipe.text_encoder.named_modules(): if "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)): self.assertTrue(module.weight.device == torch.device("cpu")) elif "adapter-2" in name and not isinstance(module, (nn.Dropout, nn.Identity)): self.assertTrue(module.weight.device != torch.device("cpu")) pipe.set_lora_device(["adapter-1"], 0) for n, m in pipe.unet.named_modules(): if "adapter-1" in n and not isinstance(m, (nn.Dropout, nn.Identity)): self.assertTrue(m.weight.device != torch.device("cpu")) for n, m in pipe.text_encoder.named_modules(): if "adapter-1" in n and not isinstance(m, (nn.Dropout, nn.Identity)): self.assertTrue(m.weight.device != torch.device("cpu")) pipe.set_lora_device(["adapter-1", "adapter-2"], torch_device) for n, m in pipe.unet.named_modules(): if ("adapter-1" in n or "adapter-2" in n) and not isinstance(m, (nn.Dropout, nn.Identity)): self.assertTrue(m.weight.device != torch.device("cpu")) for n, m in pipe.text_encoder.named_modules(): if ("adapter-1" in n or "adapter-2" in n) and not isinstance(m, (nn.Dropout, nn.Identity)): self.assertTrue(m.weight.device != torch.device("cpu")) @slow @require_torch_accelerator def test_integration_move_lora_dora_cpu(self): from peft import LoraConfig path = "stable-diffusion-v1-5/stable-diffusion-v1-5" unet_lora_config = LoraConfig( init_lora_weights="gaussian", target_modules=["to_k", "to_q", "to_v", "to_out.0"], use_dora=True, ) text_lora_config = LoraConfig( init_lora_weights="gaussian", target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], use_dora=True, ) pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) pipe.unet.add_adapter(unet_lora_config, "adapter-1") pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder", ) self.assertTrue( check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in unet", ) for name, param in pipe.unet.named_parameters(): if "lora_" in name: self.assertEqual(param.device, torch.device("cpu")) for name, param in pipe.text_encoder.named_parameters(): if "lora_" in name: self.assertEqual(param.device, torch.device("cpu")) pipe.set_lora_device(["adapter-1"], torch_device) for name, param in pipe.unet.named_parameters(): if "lora_" in name: self.assertNotEqual(param.device, torch.device("cpu")) for name, param in pipe.text_encoder.named_parameters(): if "lora_" in name: self.assertNotEqual(param.device, torch.device("cpu")) @slow @require_torch_accelerator def test_integration_set_lora_device_different_target_layers(self): # fixes a bug that occurred when calling set_lora_device with multiple adapters loaded that target different # layers, see #11833 from peft import LoraConfig path = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) # configs partly target the same, partly different layers config0 = LoraConfig(target_modules=["to_k", "to_v"]) config1 = LoraConfig(target_modules=["to_k", "to_q"]) pipe.unet.add_adapter(config0, adapter_name="adapter-0") pipe.unet.add_adapter(config1, adapter_name="adapter-1") pipe = pipe.to(torch_device) self.assertTrue( check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in unet", ) # sanity check that the adapters don't target the same layers, otherwise the test passes even without the fix modules_adapter_0 = {n for n, _ in pipe.unet.named_modules() if n.endswith(".adapter-0")} modules_adapter_1 = {n for n, _ in pipe.unet.named_modules() if n.endswith(".adapter-1")} self.assertNotEqual(modules_adapter_0, modules_adapter_1) self.assertTrue(modules_adapter_0 - modules_adapter_1) self.assertTrue(modules_adapter_1 - modules_adapter_0) # setting both separately works pipe.set_lora_device(["adapter-0"], "cpu") pipe.set_lora_device(["adapter-1"], "cpu") for name, module in pipe.unet.named_modules(): if "adapter-0" in name and not isinstance(module, (nn.Dropout, nn.Identity)): self.assertTrue(module.weight.device == torch.device("cpu")) elif "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)): self.assertTrue(module.weight.device == torch.device("cpu")) # setting both at once also works pipe.set_lora_device(["adapter-0", "adapter-1"], torch_device) for name, module in pipe.unet.named_modules(): if "adapter-0" in name and not isinstance(module, (nn.Dropout, nn.Identity)): self.assertTrue(module.weight.device != torch.device("cpu")) elif "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)): self.assertTrue(module.weight.device != torch.device("cpu")) @slow @nightly @require_torch_accelerator @require_peft_backend class LoraIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_integration_logits_with_scale(self): path = "stable-diffusion-v1-5/stable-diffusion-v1-5" lora_id = "takuma104/lora-test-text-encoder-lora-target" pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32) pipe.load_lora_weights(lora_id) pipe = pipe.to(torch_device) self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder", ) prompt = "a red sks dog" images = pipe( prompt=prompt, num_inference_steps=15, cross_attention_kwargs={"scale": 0.5}, generator=torch.manual_seed(0), output_type="np", ).images expected_slice_scale = np.array([0.307, 0.283, 0.310, 0.310, 0.300, 0.314, 0.336, 0.314, 0.321]) predicted_slice = images[0, -3:, -3:, -1].flatten() max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) assert max_diff < 1e-3 pipe.unload_lora_weights() release_memory(pipe) def test_integration_logits_no_scale(self): path = "stable-diffusion-v1-5/stable-diffusion-v1-5" lora_id = "takuma104/lora-test-text-encoder-lora-target" pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32) pipe.load_lora_weights(lora_id) pipe = pipe.to(torch_device) self.assertTrue( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder", ) prompt = "a red sks dog" images = pipe(prompt=prompt, num_inference_steps=30, generator=torch.manual_seed(0), output_type="np").images expected_slice_scale = np.array([0.074, 0.064, 0.073, 0.0842, 0.069, 0.0641, 0.0794, 0.076, 0.084]) predicted_slice = images[0, -3:, -3:, -1].flatten() max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) assert max_diff < 1e-3 pipe.unload_lora_weights() release_memory(pipe) def test_dreambooth_old_format(self): generator = torch.Generator("cpu").manual_seed(0) lora_model_id = "hf-internal-testing/lora_dreambooth_dog_example" base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) pipe = pipe.to(torch_device) pipe.load_lora_weights(lora_model_id) images = pipe( "A photo of a sks dog floating in the river", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.7207, 0.6787, 0.6010, 0.7478, 0.6838, 0.6064, 0.6984, 0.6443, 0.5785]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-4 pipe.unload_lora_weights() release_memory(pipe) def test_dreambooth_text_encoder_new_format(self): generator = torch.Generator().manual_seed(0) lora_model_id = "hf-internal-testing/lora-trained" base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) pipe = pipe.to(torch_device) pipe.load_lora_weights(lora_model_id) images = pipe("A photo of a sks dog", output_type="np", generator=generator, num_inference_steps=2).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.6628, 0.6138, 0.5390, 0.6625, 0.6130, 0.5463, 0.6166, 0.5788, 0.5359]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-4 pipe.unload_lora_weights() release_memory(pipe) def test_a1111(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None).to( torch_device ) lora_model_id = "hf-internal-testing/civitai-light-shadow-lora" lora_filename = "light_and_shadow.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-3 pipe.unload_lora_weights() release_memory(pipe) def test_lycoris(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/Amixx", safety_checker=None, use_safetensors=True, variant="fp16" ).to(torch_device) lora_model_id = "hf-internal-testing/edgLycorisMugler-light" lora_filename = "edgLycorisMugler-light.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.6463, 0.658, 0.599, 0.6542, 0.6512, 0.6213, 0.658, 0.6485, 0.6017]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-3 pipe.unload_lora_weights() release_memory(pipe) def test_a1111_with_model_cpu_offload(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None) pipe.enable_model_cpu_offload(device=torch_device) lora_model_id = "hf-internal-testing/civitai-light-shadow-lora" lora_filename = "light_and_shadow.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-3 pipe.unload_lora_weights() release_memory(pipe) def test_a1111_with_sequential_cpu_offload(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None) pipe.enable_sequential_cpu_offload(device=torch_device) lora_model_id = "hf-internal-testing/civitai-light-shadow-lora" lora_filename = "light_and_shadow.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-3 pipe.unload_lora_weights() release_memory(pipe) def test_kohya_sd_v15_with_higher_dimensions(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None ).to(torch_device) lora_model_id = "hf-internal-testing/urushisato-lora" lora_filename = "urushisato_v15.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.7165, 0.6616, 0.5833, 0.7504, 0.6718, 0.587, 0.6871, 0.6361, 0.5694]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-3 pipe.unload_lora_weights() release_memory(pipe) def test_vanilla_funetuning(self): generator = torch.Generator().manual_seed(0) lora_model_id = "hf-internal-testing/sd-model-finetuned-lora-t4" base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) pipe = pipe.to(torch_device) pipe.load_lora_weights(lora_model_id) images = pipe("A pokemon with blue eyes.", output_type="np", generator=generator, num_inference_steps=2).images image_slice = images[0, -3:, -3:, -1].flatten() expected_slices = Expectations( { ("xpu", 3): np.array( [ 0.6544, 0.6127, 0.5397, 0.6845, 0.6047, 0.5469, 0.6349, 0.5906, 0.5382, ] ), ("cuda", 7): np.array( [ 0.7406, 0.699, 0.5963, 0.7493, 0.7045, 0.6096, 0.6886, 0.6388, 0.583, ] ), ("cuda", 8): np.array( [ 0.6542, 0.61253, 0.5396, 0.6843, 0.6044, 0.5468, 0.6349, 0.5905, 0.5381, ] ), } ) expected_slice = expected_slices.get_expectation() max_diff = numpy_cosine_similarity_distance(expected_slice, image_slice) assert max_diff < 1e-4 pipe.unload_lora_weights() release_memory(pipe) def test_unload_kohya_lora(self): generator = torch.manual_seed(0) prompt = "masterpiece, best quality, mountain" num_inference_steps = 2 pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None ).to(torch_device) initial_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images initial_images = initial_images[0, -3:, -3:, -1].flatten() lora_model_id = "hf-internal-testing/civitai-colored-icons-lora" lora_filename = "Colored_Icons_by_vizsumit.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) generator = torch.manual_seed(0) lora_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images lora_images = lora_images[0, -3:, -3:, -1].flatten() pipe.unload_lora_weights() generator = torch.manual_seed(0) unloaded_lora_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten() self.assertFalse(np.allclose(initial_images, lora_images)) self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3)) release_memory(pipe) def test_load_unload_load_kohya_lora(self): # This test ensures that a Kohya-style LoRA can be safely unloaded and then loaded # without introducing any side-effects. Even though the test uses a Kohya-style # LoRA, the underlying adapter handling mechanism is format-agnostic. generator = torch.manual_seed(0) prompt = "masterpiece, best quality, mountain" num_inference_steps = 2 pipe = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None ).to(torch_device) initial_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images initial_images = initial_images[0, -3:, -3:, -1].flatten() lora_model_id = "hf-internal-testing/civitai-colored-icons-lora" lora_filename = "Colored_Icons_by_vizsumit.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) generator = torch.manual_seed(0) lora_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images lora_images = lora_images[0, -3:, -3:, -1].flatten() pipe.unload_lora_weights() generator = torch.manual_seed(0) unloaded_lora_images = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten() self.assertFalse(np.allclose(initial_images, lora_images)) self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3)) # make sure we can load a LoRA again after unloading and they don't have # any undesired effects. pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) generator = torch.manual_seed(0) lora_images_again = pipe( prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps ).images lora_images_again = lora_images_again[0, -3:, -3:, -1].flatten() self.assertTrue(np.allclose(lora_images, lora_images_again, atol=1e-3)) release_memory(pipe) def test_not_empty_state_dict(self): # Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again pipe = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to(torch_device) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors") lcm_lora = load_file(cached_file) pipe.load_lora_weights(lcm_lora, adapter_name="lcm") self.assertTrue(lcm_lora != {}) release_memory(pipe) def test_load_unload_load_state_dict(self): # Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again pipe = AutoPipelineForText2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to(torch_device) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors") lcm_lora = load_file(cached_file) previous_state_dict = lcm_lora.copy() pipe.load_lora_weights(lcm_lora, adapter_name="lcm") self.assertDictEqual(lcm_lora, previous_state_dict) pipe.unload_lora_weights() pipe.load_lora_weights(lcm_lora, adapter_name="lcm") self.assertDictEqual(lcm_lora, previous_state_dict) release_memory(pipe) def test_sdv1_5_lcm_lora(self): pipe = DiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) generator = torch.Generator("cpu").manual_seed(0) lora_model_id = "latent-consistency/lcm-lora-sdv1-5" pipe.load_lora_weights(lora_model_id) image = pipe( "masterpiece, best quality, mountain", generator=generator, num_inference_steps=4, guidance_scale=0.5 ).images[0] expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_lora/sdv15_lcm_lora.png" ) image_np = pipe.image_processor.pil_to_numpy(image) expected_image_np = pipe.image_processor.pil_to_numpy(expected_image) max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten()) assert max_diff < 1e-4 pipe.unload_lora_weights() release_memory(pipe) def test_sdv1_5_lcm_lora_img2img(self): pipe = AutoPipelineForImage2Image.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape.png" ) generator = torch.Generator("cpu").manual_seed(0) lora_model_id = "latent-consistency/lcm-lora-sdv1-5" pipe.load_lora_weights(lora_model_id) image = pipe( "snowy mountain", generator=generator, image=init_image, strength=0.5, num_inference_steps=4, guidance_scale=0.5, ).images[0] expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_lora/sdv15_lcm_lora_img2img.png" ) image_np = pipe.image_processor.pil_to_numpy(image) expected_image_np = pipe.image_processor.pil_to_numpy(expected_image) max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten()) assert max_diff < 1e-4 pipe.unload_lora_weights() release_memory(pipe) def test_sd_load_civitai_empty_network_alpha(self): """ This test simply checks that loading a LoRA with an empty network alpha works fine See: https://github.com/huggingface/diffusers/issues/5606 """ pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") pipeline.enable_sequential_cpu_offload(device=torch_device) civitai_path = hf_hub_download("ybelkada/test-ahi-civitai", "ahi_lora_weights.safetensors") pipeline.load_lora_weights(civitai_path, adapter_name="ahri") images = pipeline( "ahri, masterpiece, league of legends", output_type="np", generator=torch.manual_seed(156), num_inference_steps=5, ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.0, 0.0, 0.0, 0.002557, 0.020954, 0.001792, 0.006581, 0.00591, 0.002995]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-3 pipeline.unload_lora_weights() release_memory(pipeline)
diffusers/tests/lora/test_lora_layers_sd.py/0
{ "file_path": "diffusers/tests/lora/test_lora_layers_sd.py", "repo_id": "diffusers", "token_count": 14124 }
193
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from diffusers import AutoencoderKLMagvit from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class AutoencoderKLMagvitTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AutoencoderKLMagvit main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_kl_magvit_config(self): return { "in_channels": 3, "latent_channels": 4, "out_channels": 3, "block_out_channels": [8, 8, 8, 8], "down_block_types": [ "SpatialDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", ], "up_block_types": [ "SpatialUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", ], "layers_per_block": 1, "norm_num_groups": 8, "spatial_group_norm": True, } @property def dummy_input(self): batch_size = 2 num_frames = 9 num_channels = 3 height = 16 width = 16 image = floats_tensor((batch_size, num_channels, num_frames, height, width)).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 9, 16, 16) @property def output_shape(self): return (3, 9, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_kl_magvit_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"EasyAnimateEncoder", "EasyAnimateDecoder"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) @unittest.skip("Not quite sure why this test fails. Revisit later.") def test_effective_gradient_checkpointing(self): pass @unittest.skip("Unsupported test.") def test_forward_with_norm_groups(self): pass
diffusers/tests/models/autoencoders/test_models_autoencoder_magvit.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_autoencoder_magvit.py", "repo_id": "diffusers", "token_count": 1237 }
194
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import DiTTransformer2DModel, Transformer2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, slow, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class DiTTransformer2DModelTests(ModelTesterMixin, unittest.TestCase): model_class = DiTTransformer2DModel main_input_name = "hidden_states" @property def dummy_input(self): batch_size = 4 in_channels = 4 sample_size = 8 scheduler_num_train_steps = 1000 num_class_labels = 4 hidden_states = floats_tensor((batch_size, in_channels, sample_size, sample_size)).to(torch_device) timesteps = torch.randint(0, scheduler_num_train_steps, size=(batch_size,)).to(torch_device) class_label_ids = torch.randint(0, num_class_labels, size=(batch_size,)).to(torch_device) return {"hidden_states": hidden_states, "timestep": timesteps, "class_labels": class_label_ids} @property def input_shape(self): return (4, 8, 8) @property def output_shape(self): return (8, 8, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 4, "out_channels": 8, "activation_fn": "gelu-approximate", "num_attention_heads": 2, "attention_head_dim": 4, "attention_bias": True, "num_layers": 1, "norm_type": "ada_norm_zero", "num_embeds_ada_norm": 8, "patch_size": 2, "sample_size": 8, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): super().test_output( expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape ) def test_correct_class_remapping_from_dict_config(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = Transformer2DModel.from_config(init_dict) assert isinstance(model, DiTTransformer2DModel) def test_gradient_checkpointing_is_applied(self): expected_set = {"DiTTransformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) def test_effective_gradient_checkpointing(self): super().test_effective_gradient_checkpointing(loss_tolerance=1e-4) def test_correct_class_remapping_from_pretrained_config(self): config = DiTTransformer2DModel.load_config("facebook/DiT-XL-2-256", subfolder="transformer") model = Transformer2DModel.from_config(config) assert isinstance(model, DiTTransformer2DModel) @slow def test_correct_class_remapping(self): model = Transformer2DModel.from_pretrained("facebook/DiT-XL-2-256", subfolder="transformer") assert isinstance(model, DiTTransformer2DModel)
diffusers/tests/models/transformers/test_models_dit_transformer2d.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_dit_transformer2d.py", "repo_id": "diffusers", "token_count": 1438 }
195
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import HunyuanVideoTransformer3DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin enable_full_determinism() class HunyuanVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): model_class = HunyuanVideoTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 1 num_channels = 4 num_frames = 1 height = 16 width = 16 text_encoder_embedding_dim = 16 pooled_projection_dim = 8 sequence_length = 12 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) pooled_projections = torch.randn((batch_size, pooled_projection_dim)).to(torch_device) encoder_attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) guidance = torch.randint(0, 1000, size=(batch_size,)).to(torch_device, dtype=torch.float32) return { "hidden_states": hidden_states, "timestep": timestep, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_projections, "encoder_attention_mask": encoder_attention_mask, "guidance": guidance, } @property def input_shape(self): return (4, 1, 16, 16) @property def output_shape(self): return (4, 1, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 4, "out_channels": 4, "num_attention_heads": 2, "attention_head_dim": 10, "num_layers": 1, "num_single_layers": 1, "num_refiner_layers": 1, "patch_size": 1, "patch_size_t": 1, "guidance_embeds": True, "text_embed_dim": 16, "pooled_projection_dim": 8, "rope_axes_dim": (2, 4, 4), "image_condition_type": None, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"HunyuanVideoTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class HunyuanTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = HunyuanVideoTransformer3DModel def prepare_init_args_and_inputs_for_common(self): return HunyuanVideoTransformer3DTests().prepare_init_args_and_inputs_for_common() class HunyuanSkyreelsImageToVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): model_class = HunyuanVideoTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 1 num_channels = 8 num_frames = 1 height = 16 width = 16 text_encoder_embedding_dim = 16 pooled_projection_dim = 8 sequence_length = 12 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) pooled_projections = torch.randn((batch_size, pooled_projection_dim)).to(torch_device) encoder_attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) guidance = torch.randint(0, 1000, size=(batch_size,)).to(torch_device, dtype=torch.float32) return { "hidden_states": hidden_states, "timestep": timestep, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_projections, "encoder_attention_mask": encoder_attention_mask, "guidance": guidance, } @property def input_shape(self): return (8, 1, 16, 16) @property def output_shape(self): return (4, 1, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 8, "out_channels": 4, "num_attention_heads": 2, "attention_head_dim": 10, "num_layers": 1, "num_single_layers": 1, "num_refiner_layers": 1, "patch_size": 1, "patch_size_t": 1, "guidance_embeds": True, "text_embed_dim": 16, "pooled_projection_dim": 8, "rope_axes_dim": (2, 4, 4), "image_condition_type": None, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): super().test_output(expected_output_shape=(1, *self.output_shape)) def test_gradient_checkpointing_is_applied(self): expected_set = {"HunyuanVideoTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class HunyuanSkyreelsImageToVideoCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = HunyuanVideoTransformer3DModel def prepare_init_args_and_inputs_for_common(self): return HunyuanSkyreelsImageToVideoTransformer3DTests().prepare_init_args_and_inputs_for_common() class HunyuanVideoImageToVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): model_class = HunyuanVideoTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 1 num_channels = 2 * 4 + 1 num_frames = 1 height = 16 width = 16 text_encoder_embedding_dim = 16 pooled_projection_dim = 8 sequence_length = 12 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) pooled_projections = torch.randn((batch_size, pooled_projection_dim)).to(torch_device) encoder_attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) return { "hidden_states": hidden_states, "timestep": timestep, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_projections, "encoder_attention_mask": encoder_attention_mask, } @property def input_shape(self): return (8, 1, 16, 16) @property def output_shape(self): return (4, 1, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 2 * 4 + 1, "out_channels": 4, "num_attention_heads": 2, "attention_head_dim": 10, "num_layers": 1, "num_single_layers": 1, "num_refiner_layers": 1, "patch_size": 1, "patch_size_t": 1, "guidance_embeds": False, "text_embed_dim": 16, "pooled_projection_dim": 8, "rope_axes_dim": (2, 4, 4), "image_condition_type": "latent_concat", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): super().test_output(expected_output_shape=(1, *self.output_shape)) def test_gradient_checkpointing_is_applied(self): expected_set = {"HunyuanVideoTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class HunyuanImageToVideoCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = HunyuanVideoTransformer3DModel def prepare_init_args_and_inputs_for_common(self): return HunyuanVideoImageToVideoTransformer3DTests().prepare_init_args_and_inputs_for_common() class HunyuanVideoTokenReplaceImageToVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): model_class = HunyuanVideoTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 1 num_channels = 2 num_frames = 1 height = 16 width = 16 text_encoder_embedding_dim = 16 pooled_projection_dim = 8 sequence_length = 12 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) pooled_projections = torch.randn((batch_size, pooled_projection_dim)).to(torch_device) encoder_attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) guidance = torch.randint(0, 1000, size=(batch_size,)).to(torch_device, dtype=torch.float32) return { "hidden_states": hidden_states, "timestep": timestep, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_projections, "encoder_attention_mask": encoder_attention_mask, "guidance": guidance, } @property def input_shape(self): return (8, 1, 16, 16) @property def output_shape(self): return (4, 1, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 2, "out_channels": 4, "num_attention_heads": 2, "attention_head_dim": 10, "num_layers": 1, "num_single_layers": 1, "num_refiner_layers": 1, "patch_size": 1, "patch_size_t": 1, "guidance_embeds": True, "text_embed_dim": 16, "pooled_projection_dim": 8, "rope_axes_dim": (2, 4, 4), "image_condition_type": "token_replace", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): super().test_output(expected_output_shape=(1, *self.output_shape)) def test_gradient_checkpointing_is_applied(self): expected_set = {"HunyuanVideoTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class HunyuanVideoTokenReplaceCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = HunyuanVideoTransformer3DModel def prepare_init_args_and_inputs_for_common(self): return HunyuanVideoTokenReplaceImageToVideoTransformer3DTests().prepare_init_args_and_inputs_for_common()
diffusers/tests/models/transformers/test_models_transformer_hunyuan_video.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_hunyuan_video.py", "repo_id": "diffusers", "token_count": 5242 }
196
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import math import unittest import torch from diffusers import UNet2DModel from diffusers.utils import logging from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, require_torch_accelerator, slow, torch_all_close, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin logger = logging.get_logger(__name__) enable_full_determinism() class Unet2DModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (4, 8), "norm_num_groups": 2, "down_block_types": ("DownBlock2D", "AttnDownBlock2D"), "up_block_types": ("AttnUpBlock2D", "UpBlock2D"), "attention_head_dim": 3, "out_channels": 3, "in_channels": 3, "layers_per_block": 2, "sample_size": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_mid_block_attn_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["add_attention"] = True init_dict["attn_norm_num_groups"] = 4 model = self.model_class(**init_dict) model.to(torch_device) model.eval() self.assertIsNotNone( model.mid_block.attentions[0].group_norm, "Mid block Attention group norm should exist but does not." ) self.assertEqual( model.mid_block.attentions[0].group_norm.num_groups, init_dict["attn_norm_num_groups"], "Mid block Attention group norm does not have the expected number of groups.", ) with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_mid_block_none(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() mid_none_init_dict, mid_none_inputs_dict = self.prepare_init_args_and_inputs_for_common() mid_none_init_dict["mid_block_type"] = None model = self.model_class(**init_dict) model.to(torch_device) model.eval() mid_none_model = self.model_class(**mid_none_init_dict) mid_none_model.to(torch_device) mid_none_model.eval() self.assertIsNone(mid_none_model.mid_block, "Mid block should not exist.") with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] with torch.no_grad(): mid_none_output = mid_none_model(**mid_none_inputs_dict) if isinstance(mid_none_output, dict): mid_none_output = mid_none_output.to_tuple()[0] self.assertFalse(torch.allclose(output, mid_none_output, rtol=1e-3), "outputs should be different.") def test_gradient_checkpointing_is_applied(self): expected_set = { "AttnUpBlock2D", "AttnDownBlock2D", "UNetMidBlock2D", "UpBlock2D", "DownBlock2D", } # NOTE: unlike UNet2DConditionModel, UNet2DModel does not currently support tuples for `attention_head_dim` attention_head_dim = 8 block_out_channels = (16, 32) super().test_gradient_checkpointing_is_applied( expected_set=expected_set, attention_head_dim=attention_head_dim, block_out_channels=block_out_channels ) class UNetLDMModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (4, 32, 32) @property def output_shape(self): return (4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 32, "in_channels": 4, "out_channels": 4, "layers_per_block": 2, "block_out_channels": (32, 64), "attention_head_dim": 32, "down_block_types": ("DownBlock2D", "DownBlock2D"), "up_block_types": ("UpBlock2D", "UpBlock2D"), } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): model, loading_info = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input).sample assert image is not None, "Make sure output is not None" @require_torch_accelerator def test_from_pretrained_accelerate(self): model, _ = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) model.to(torch_device) image = model(**self.dummy_input).sample assert image is not None, "Make sure output is not None" @require_torch_accelerator def test_from_pretrained_accelerate_wont_change_results(self): # by default model loading will use accelerate as `low_cpu_mem_usage=True` model_accelerate, _ = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) model_accelerate.to(torch_device) model_accelerate.eval() noise = torch.randn( 1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0), ) noise = noise.to(torch_device) time_step = torch.tensor([10] * noise.shape[0]).to(torch_device) arr_accelerate = model_accelerate(noise, time_step)["sample"] # two models don't need to stay in the device at the same time del model_accelerate backend_empty_cache(torch_device) gc.collect() model_normal_load, _ = UNet2DModel.from_pretrained( "fusing/unet-ldm-dummy-update", output_loading_info=True, low_cpu_mem_usage=False ) model_normal_load.to(torch_device) model_normal_load.eval() arr_normal_load = model_normal_load(noise, time_step)["sample"] assert torch_all_close(arr_accelerate, arr_normal_load, rtol=1e-3) def test_output_pretrained(self): model = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update") model.eval() model.to(torch_device) noise = torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0), ) noise = noise.to(torch_device) time_step = torch.tensor([10] * noise.shape[0]).to(torch_device) with torch.no_grad(): output = model(noise, time_step).sample output_slice = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800]) # fmt: on self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-3)) def test_gradient_checkpointing_is_applied(self): expected_set = {"DownBlock2D", "UNetMidBlock2D", "UpBlock2D"} # NOTE: unlike UNet2DConditionModel, UNet2DModel does not currently support tuples for `attention_head_dim` attention_head_dim = 32 block_out_channels = (32, 64) super().test_gradient_checkpointing_is_applied( expected_set=expected_set, attention_head_dim=attention_head_dim, block_out_channels=block_out_channels ) class NCSNppModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DModel main_input_name = "sample" @property def dummy_input(self, sizes=(32, 32)): batch_size = 4 num_channels = 3 noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor(batch_size * [10]).to(dtype=torch.int32, device=torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": [32, 64, 64, 64], "in_channels": 3, "layers_per_block": 1, "out_channels": 3, "time_embedding_type": "fourier", "norm_eps": 1e-6, "mid_block_scale_factor": math.sqrt(2.0), "norm_num_groups": None, "down_block_types": [ "SkipDownBlock2D", "AttnSkipDownBlock2D", "SkipDownBlock2D", "SkipDownBlock2D", ], "up_block_types": [ "SkipUpBlock2D", "SkipUpBlock2D", "AttnSkipUpBlock2D", "SkipUpBlock2D", ], } inputs_dict = self.dummy_input return init_dict, inputs_dict @slow def test_from_pretrained_hub(self): model, loading_info = UNet2DModel.from_pretrained("google/ncsnpp-celebahq-256", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) inputs = self.dummy_input noise = floats_tensor((4, 3) + (256, 256)).to(torch_device) inputs["sample"] = noise image = model(**inputs) assert image is not None, "Make sure output is not None" @slow def test_output_pretrained_ve_mid(self): model = UNet2DModel.from_pretrained("google/ncsnpp-celebahq-256") model.to(torch_device) batch_size = 4 num_channels = 3 sizes = (256, 256) noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) with torch.no_grad(): output = model(noise, time_step).sample output_slice = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-4836.2178, -6487.1470, -3816.8196, -7964.9302, -10966.3037, -20043.5957, 8137.0513, 2340.3328, 544.6056]) # fmt: on self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) def test_output_pretrained_ve_large(self): model = UNet2DModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update") model.to(torch_device) batch_size = 4 num_channels = 3 sizes = (32, 32) noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) with torch.no_grad(): output = model(noise, time_step).sample output_slice = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256]) # fmt: on self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) @unittest.skip("Test not supported.") def test_forward_with_norm_groups(self): # not required for this model pass def test_gradient_checkpointing_is_applied(self): expected_set = { "UNetMidBlock2D", } block_out_channels = (32, 64, 64, 64) super().test_gradient_checkpointing_is_applied( expected_set=expected_set, block_out_channels=block_out_channels ) def test_effective_gradient_checkpointing(self): super().test_effective_gradient_checkpointing(skip={"time_proj.weight"}) @unittest.skip( "To make layerwise casting work with this model, we will have to update the implementation. Due to potentially low usage, we don't support it here." ) def test_layerwise_casting_inference(self): pass @unittest.skip( "To make layerwise casting work with this model, we will have to update the implementation. Due to potentially low usage, we don't support it here." ) def test_layerwise_casting_memory(self): pass
diffusers/tests/models/unets/test_models_unet_2d.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_2d.py", "repo_id": "diffusers", "token_count": 6484 }
197
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import tempfile import unittest from pathlib import Path from diffusers import ( DDIMScheduler, DDPMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, PNDMScheduler, logging, ) from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.utils.testing_utils import CaptureLogger class SampleObject(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], ): pass class SampleObject2(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", f=[1, 3], ): pass class SampleObject3(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 3], f=[1, 3], ): pass class SampleObject4(ConfigMixin): config_name = "config.json" @register_to_config def __init__( self, a=2, b=5, c=(2, 5), d="for diffusion", e=[1, 5], f=[5, 4], ): pass class SampleObjectPaths(ConfigMixin): config_name = "config.json" @register_to_config def __init__(self, test_file_1=Path("foo/bar"), test_file_2=Path("foo bar\\bar")): pass class ConfigTester(unittest.TestCase): def test_load_not_from_mixin(self): with self.assertRaises(ValueError): ConfigMixin.load_config("dummy_path") def test_register_to_config(self): obj = SampleObject() config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # init ignore private arguments obj = SampleObject(_name_or_path="lalala") config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # can override default obj = SampleObject(c=6) config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == 6 assert config["d"] == "for diffusion" assert config["e"] == [1, 3] # can use positional arguments. obj = SampleObject(1, c=6) config = obj.config assert config["a"] == 1 assert config["b"] == 5 assert config["c"] == 6 assert config["d"] == "for diffusion" assert config["e"] == [1, 3] def test_save_load(self): obj = SampleObject() config = obj.config assert config["a"] == 2 assert config["b"] == 5 assert config["c"] == (2, 5) assert config["d"] == "for diffusion" assert config["e"] == [1, 3] with tempfile.TemporaryDirectory() as tmpdirname: obj.save_config(tmpdirname) new_obj = SampleObject.from_config(SampleObject.load_config(tmpdirname)) new_config = new_obj.config # unfreeze configs config = dict(config) new_config = dict(new_config) assert config.pop("c") == (2, 5) # instantiated as tuple assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json config.pop("_use_default_values") assert config == new_config def test_load_ddim_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: ddim = DDIMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert ddim.__class__ == DDIMScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_euler_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: euler = EulerDiscreteScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert euler.__class__ == EulerDiscreteScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_euler_ancestral_from_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: euler = EulerAncestralDiscreteScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert euler.__class__ == EulerAncestralDiscreteScheduler # no warning should be thrown assert cap_logger.out == "" def test_load_pndm(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: pndm = PNDMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert pndm.__class__ == PNDMScheduler # no warning should be thrown assert cap_logger.out == "" def test_overwrite_config_on_load(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: ddpm = DDPMScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler", prediction_type="sample", beta_end=8, ) with CaptureLogger(logger) as cap_logger_2: ddpm_2 = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256", beta_start=88) assert ddpm.__class__ == DDPMScheduler assert ddpm.config.prediction_type == "sample" assert ddpm.config.beta_end == 8 assert ddpm_2.config.beta_start == 88 # no warning should be thrown assert cap_logger.out == "" assert cap_logger_2.out == "" def test_load_dpmsolver(self): logger = logging.get_logger("diffusers.configuration_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: dpm = DPMSolverMultistepScheduler.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" ) assert dpm.__class__ == DPMSolverMultistepScheduler # no warning should be thrown assert cap_logger.out == "" def test_use_default_values(self): # let's first save a config that should be in the form # a=2, # b=5, # c=(2, 5), # d="for diffusion", # e=[1, 3], config = SampleObject() config_dict = {k: v for k, v in config.config.items() if not k.startswith("_")} # make sure that default config has all keys in `_use_default_values` assert set(config_dict.keys()) == set(config.config._use_default_values) with tempfile.TemporaryDirectory() as tmpdirname: config.save_config(tmpdirname) # now loading it with SampleObject2 should put f into `_use_default_values` config = SampleObject2.from_config(SampleObject2.load_config(tmpdirname)) assert "f" in config.config._use_default_values assert config.config.f == [1, 3] # now loading the config, should **NOT** use [1, 3] for `f`, but the default [1, 4] value # **BECAUSE** it is part of `config.config._use_default_values` new_config = SampleObject4.from_config(config.config) assert new_config.config.f == [5, 4] config.config._use_default_values.pop() new_config_2 = SampleObject4.from_config(config.config) assert new_config_2.config.f == [1, 3] # Nevertheless "e" should still be correctly loaded to [1, 3] from SampleObject2 instead of defaulting to [1, 5] assert new_config_2.config.e == [1, 3] def test_check_path_types(self): # Verify that we get a string returned from a WindowsPath or PosixPath (depending on system) config = SampleObjectPaths() json_string = config.to_json_string() result = json.loads(json_string) assert result["test_file_1"] == config.config.test_file_1.as_posix() assert result["test_file_2"] == config.config.test_file_2.as_posix()
diffusers/tests/others/test_config.py/0
{ "file_path": "diffusers/tests/others/test_config.py", "repo_id": "diffusers", "token_count": 4259 }
198
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class FlaxControlNetPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_canny(self): controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params prompts = "bird" num_samples = jax.device_count() prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) canny_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) rng = jax.random.PRNGKey(0) rng = jax.random.split(rng, jax.device_count()) p_params = replicate(params) prompt_ids = shard(prompt_ids) processed_image = shard(processed_image) images = pipe( prompt_ids=prompt_ids, image=processed_image, params=p_params, prng_seed=rng, num_inference_steps=50, jit=True, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) assert jnp.abs(output_slice - expected_slice).max() < 1e-2 def test_pose(self): controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params prompts = "Chef in the kitchen" num_samples = jax.device_count() prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) pose_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) processed_image = pipe.prepare_image_inputs([pose_image] * num_samples) rng = jax.random.PRNGKey(0) rng = jax.random.split(rng, jax.device_count()) p_params = replicate(params) prompt_ids = shard(prompt_ids) processed_image = shard(processed_image) images = pipe( prompt_ids=prompt_ids, image=processed_image, params=p_params, prng_seed=rng, num_inference_steps=50, jit=True, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) assert jnp.abs(output_slice - expected_slice).max() < 1e-2
diffusers/tests/pipelines/controlnet/test_flax_controlnet.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet/test_flax_controlnet.py", "repo_id": "diffusers", "token_count": 2109 }
199
import gc import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, FasterCacheConfig, FlowMatchEulerDiscreteScheduler, FluxPipeline, FluxTransformer2DModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, nightly, numpy_cosine_similarity_distance, require_big_accelerator, slow, torch_device, ) from ..test_pipelines_common import ( FasterCacheTesterMixin, FirstBlockCacheTesterMixin, FluxIPAdapterTesterMixin, PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, check_qkv_fused_layers_exist, ) class FluxPipelineFastTests( PipelineTesterMixin, FluxIPAdapterTesterMixin, PyramidAttentionBroadcastTesterMixin, FasterCacheTesterMixin, FirstBlockCacheTesterMixin, unittest.TestCase, ): pipeline_class = FluxPipeline params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) batch_params = frozenset(["prompt"]) # there is no xformers processor for Flux test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True faster_cache_config = FasterCacheConfig( spatial_attention_block_skip_range=2, spatial_attention_timestep_skip_range=(-1, 901), unconditional_batch_skip_range=2, attention_weight_callback=lambda _: 0.5, is_guidance_distilled=True, ) def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): torch.manual_seed(0) transformer = FluxTransformer2DModel( patch_size=1, in_channels=4, num_layers=num_layers, num_single_layers=num_single_layers, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[4, 4, 8], ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModel(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=1, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "transformer": transformer, "vae": vae, "image_encoder": None, "feature_extractor": None, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 8, "width": 8, "max_sequence_length": 48, "output_type": "np", } return inputs def test_flux_different_prompts(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_same_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "a different prompt" output_different_prompts = pipe(**inputs).images[0] max_diff = np.abs(output_same_prompt - output_different_prompts).max() # Outputs should be different here # For some reasons, they don't show large differences self.assertGreater(max_diff, 1e-6, "Outputs should be different for different prompts.") def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() self.assertTrue( check_qkv_fused_layers_exist(pipe.transformer, ["to_qkv"]), ("Something wrong with the fused attention layers. Expected all the attention projections to be fused."), ) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] pipe.transformer.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] self.assertTrue( np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ("Fusion of QKV projections shouldn't affect the outputs."), ) self.assertTrue( np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ("Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled."), ) self.assertTrue( np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ("Original outputs should match when fused QKV projections are disabled."), ) def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) height_width_pairs = [(32, 32), (72, 57)] for height, width in height_width_pairs: expected_height = height - height % (pipe.vae_scale_factor * 2) expected_width = width - width % (pipe.vae_scale_factor * 2) inputs.update({"height": height, "width": width}) image = pipe(**inputs).images[0] output_height, output_width, _ = image.shape self.assertEqual( (output_height, output_width), (expected_height, expected_width), f"Output shape {image.shape} does not match expected shape {(expected_height, expected_width)}", ) def test_flux_true_cfg(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) inputs.pop("generator") no_true_cfg_out = pipe(**inputs, generator=torch.manual_seed(0)).images[0] inputs["negative_prompt"] = "bad quality" inputs["true_cfg_scale"] = 2.0 true_cfg_out = pipe(**inputs, generator=torch.manual_seed(0)).images[0] self.assertFalse( np.allclose(no_true_cfg_out, true_cfg_out), "Outputs should be different when true_cfg_scale is set." ) @nightly @require_big_accelerator class FluxPipelineSlowTests(unittest.TestCase): pipeline_class = FluxPipeline repo_id = "black-forest-labs/FLUX.1-schnell" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, seed=0): generator = torch.Generator(device="cpu").manual_seed(seed) prompt_embeds = torch.load( hf_hub_download(repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/prompt_embeds.pt") ).to(torch_device) pooled_prompt_embeds = torch.load( hf_hub_download( repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/pooled_prompt_embeds.pt" ) ).to(torch_device) return { "prompt_embeds": prompt_embeds, "pooled_prompt_embeds": pooled_prompt_embeds, "num_inference_steps": 2, "guidance_scale": 0.0, "max_sequence_length": 256, "output_type": "np", "generator": generator, } def test_flux_inference(self): pipe = self.pipeline_class.from_pretrained( self.repo_id, torch_dtype=torch.bfloat16, text_encoder=None, text_encoder_2=None ).to(torch_device) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] image_slice = image[0, :10, :10] # fmt: off expected_slice = np.array( [0.3242, 0.3203, 0.3164, 0.3164, 0.3125, 0.3125, 0.3281, 0.3242, 0.3203, 0.3301, 0.3262, 0.3242, 0.3281, 0.3242, 0.3203, 0.3262, 0.3262, 0.3164, 0.3262, 0.3281, 0.3184, 0.3281, 0.3281, 0.3203, 0.3281, 0.3281, 0.3164, 0.3320, 0.3320, 0.3203], dtype=np.float32, ) # fmt: on max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) self.assertLess( max_diff, 1e-4, f"Image slice is different from expected slice: {image_slice} != {expected_slice}" ) @slow @require_big_accelerator class FluxIPAdapterPipelineSlowTests(unittest.TestCase): pipeline_class = FluxPipeline repo_id = "black-forest-labs/FLUX.1-dev" image_encoder_pretrained_model_name_or_path = "openai/clip-vit-large-patch14" weight_name = "ip_adapter.safetensors" ip_adapter_repo_id = "XLabs-AI/flux-ip-adapter" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) prompt_embeds = torch.load( hf_hub_download(repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/prompt_embeds.pt") ) pooled_prompt_embeds = torch.load( hf_hub_download( repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/pooled_prompt_embeds.pt" ) ) negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) ip_adapter_image = np.zeros((1024, 1024, 3), dtype=np.uint8) return { "prompt_embeds": prompt_embeds, "pooled_prompt_embeds": pooled_prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "negative_pooled_prompt_embeds": negative_pooled_prompt_embeds, "ip_adapter_image": ip_adapter_image, "num_inference_steps": 2, "guidance_scale": 3.5, "true_cfg_scale": 4.0, "max_sequence_length": 256, "output_type": "np", "generator": generator, } def test_flux_ip_adapter_inference(self): pipe = self.pipeline_class.from_pretrained( self.repo_id, torch_dtype=torch.bfloat16, text_encoder=None, text_encoder_2=None ) pipe.load_ip_adapter( self.ip_adapter_repo_id, weight_name=self.weight_name, image_encoder_pretrained_model_name_or_path=self.image_encoder_pretrained_model_name_or_path, ) pipe.set_ip_adapter_scale(1.0) pipe.enable_model_cpu_offload() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] image_slice = image[0, :10, :10] # fmt: off expected_slice = np.array( [0.1855, 0.1680, 0.1406, 0.1953, 0.1699, 0.1465, 0.2012, 0.1738, 0.1484, 0.2051, 0.1797, 0.1523, 0.2012, 0.1719, 0.1445, 0.2070, 0.1777, 0.1465, 0.2090, 0.1836, 0.1484, 0.2129, 0.1875, 0.1523, 0.2090, 0.1816, 0.1484, 0.2110, 0.1836, 0.1543], dtype=np.float32, ) # fmt: on max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) self.assertLess( max_diff, 1e-4, f"Image slice is different from expected slice: {image_slice} != {expected_slice}" )
diffusers/tests/pipelines/flux/test_pipeline_flux.py/0
{ "file_path": "diffusers/tests/pipelines/flux/test_pipeline_flux.py", "repo_id": "diffusers", "token_count": 6567 }
200
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, is_flaky, load_image, load_numpy, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create mask mask = np.zeros((64, 64), dtype=np.float32) mask[:32, :32] = 1 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintPipeline params = ["image_embeds", "negative_image_embeds", "image", "mask_image"] batch_params = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeds", "masked_image", "mask_image"] def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_inpaint(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_slice.flatten()}" ) assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" ) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) @is_flaky() def test_model_cpu_offload_forward_pass(self): super().test_inference_batch_single_identical(expected_max_diff=8e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) # override default test because we need to zero out mask too in order to make sure final latent is all zero def test_callback_inputs(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) callback_kwargs["mask_image"] = torch.zeros_like(callback_kwargs["mask_image"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_accelerator class KandinskyV22InpaintPipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_kandinsky_inpaint(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) mask[:250, 250:-250] = 1 prompt = "a hat" pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22InpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=2, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cpu").manual_seed(0) output = pipeline( image=init_image, mask_image=mask, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=2, height=768, width=768, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) assert max_diff < 1e-4
diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py", "repo_id": "diffusers", "token_count": 5572 }
201
# coding=utf-8 # Copyright 2025 Latte Team and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect import tempfile import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, DDIMScheduler, FasterCacheConfig, LattePipeline, LatteTransformer3DModel, PyramidAttentionBroadcastConfig, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( FasterCacheTesterMixin, PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np, ) enable_full_determinism() class LattePipelineFastTests( PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, FasterCacheTesterMixin, unittest.TestCase ): pipeline_class = LattePipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params test_layerwise_casting = True test_group_offloading = True pab_config = PyramidAttentionBroadcastConfig( spatial_attention_block_skip_range=2, temporal_attention_block_skip_range=2, cross_attention_block_skip_range=2, spatial_attention_timestep_skip_range=(100, 700), temporal_attention_timestep_skip_range=(100, 800), cross_attention_timestep_skip_range=(100, 800), spatial_attention_block_identifiers=["transformer_blocks"], temporal_attention_block_identifiers=["temporal_transformer_blocks"], cross_attention_block_identifiers=["transformer_blocks"], ) faster_cache_config = FasterCacheConfig( spatial_attention_block_skip_range=2, temporal_attention_block_skip_range=2, spatial_attention_timestep_skip_range=(-1, 901), temporal_attention_timestep_skip_range=(-1, 901), unconditional_batch_skip_range=2, attention_weight_callback=lambda _: 0.5, ) def get_dummy_components(self, num_layers: int = 1): torch.manual_seed(0) transformer = LatteTransformer3DModel( sample_size=8, num_layers=num_layers, patch_size=2, attention_head_dim=8, num_attention_heads=3, caption_channels=32, in_channels=4, cross_attention_dim=24, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_single", norm_elementwise_affine=False, norm_eps=1e-6, ) torch.manual_seed(0) vae = AutoencoderKL() scheduler = DDIMScheduler() text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder.eval(), "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "negative_prompt": "low quality", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 8, "width": 8, "video_length": 1, "output_type": "pt", "clean_caption": False, } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (1, 3, 8, 8)) expected_video = torch.randn(1, 3, 8, 8) max_diff = np.abs(generated_video - expected_video).max() self.assertLessEqual(max_diff, 1e10) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) @unittest.skip("Not supported.") def test_attention_slicing_forward_pass(self): pass @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) @unittest.skip("Test not supported because `encode_prompt()` has multiple returns.") def test_encode_prompt_works_in_isolation(self): pass def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] ( prompt_embeds, negative_prompt_embeds, ) = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 8, "width": 8, "video_length": 1, "mask_feature": False, "output_type": "pt", "clean_caption": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1.0) @slow @require_torch_accelerator class LattePipelineIntegrationTests(unittest.TestCase): prompt = "A painting of a squirrel eating a burger." def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_latte(self): generator = torch.Generator("cpu").manual_seed(0) pipe = LattePipeline.from_pretrained("maxin-cn/Latte-1", torch_dtype=torch.float16) pipe.enable_model_cpu_offload(device=torch_device) prompt = self.prompt videos = pipe( prompt=prompt, height=512, width=512, generator=generator, num_inference_steps=2, clean_caption=False, ).frames video = videos[0] expected_video = torch.randn(1, 512, 512, 3).numpy() max_diff = numpy_cosine_similarity_distance(video.flatten(), expected_video) assert max_diff < 1e-3, f"Max diff is too high. got {video.flatten()}"
diffusers/tests/pipelines/latte/test_latte.py/0
{ "file_path": "diffusers/tests/pipelines/latte/test_latte.py", "repo_id": "diffusers", "token_count": 5332 }
202
import inspect import unittest import numpy as np import torch from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel, StableDiffusion3PAGPipeline, StableDiffusion3Pipeline, ) from diffusers.utils.testing_utils import ( torch_device, ) from ..test_pipelines_common import ( PipelineTesterMixin, check_qkv_fusion_matches_attn_procs_length, check_qkv_fusion_processors_exist, ) class StableDiffusion3PAGPipelineFastTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = StableDiffusion3PAGPipeline params = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) batch_params = frozenset(["prompt", "negative_prompt"]) test_xformers_attention = False def get_dummy_components(self): torch.manual_seed(0) transformer = SD3Transformer2DModel( sample_size=32, patch_size=1, in_channels=4, num_layers=2, attention_head_dim=8, num_attention_heads=4, caption_projection_dim=32, joint_attention_dim=32, pooled_projection_dim=64, out_channels=4, ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=4, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "text_encoder_3": text_encoder_3, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "tokenizer_3": tokenizer_3, "transformer": transformer, "vae": vae, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "pag_scale": 0.0, } return inputs def test_stable_diffusion_3_different_prompts(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_same_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "a different prompt" inputs["prompt_3"] = "another different prompt" output_different_prompts = pipe(**inputs).images[0] max_diff = np.abs(output_same_prompt - output_different_prompts).max() # Outputs should be different here assert max_diff > 1e-2 def test_stable_diffusion_3_different_negative_prompts(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_same_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt_2"] = "deformed" inputs["negative_prompt_3"] = "blurry" output_different_prompts = pipe(**inputs).images[0] max_diff = np.abs(output_same_prompt - output_different_prompts).max() # Outputs should be different here assert max_diff > 1e-2 def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() assert check_qkv_fusion_processors_exist(pipe.transformer), ( "Something wrong with the fused attention processors. Expected all the attention processors to be fused." ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] pipe.transformer.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( "Fusion of QKV projections shouldn't affect the outputs." ) assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." ) assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Original outputs should match when fused QKV projections are disabled." ) def test_pag_disable_enable(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # base pipeline (expect same output when pag is disabled) pipe_sd = StableDiffusion3Pipeline(**components) pipe_sd = pipe_sd.to(device) pipe_sd.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() # pag disabled with pag_scale=0.0 pipe_pag = self.pipeline_class(**components) pipe_pag = pipe_pag.to(device) pipe_pag.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["pag_scale"] = 0.0 out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 def test_pag_applied_layers(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # base pipeline pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) all_self_attn_layers = [k for k in pipe.transformer.attn_processors.keys() if "attn" in k] original_attn_procs = pipe.transformer.attn_processors pag_layers = ["blocks.0", "blocks.1"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) # blocks.0 block_0_self_attn = ["transformer_blocks.0.attn.processor"] pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.0"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(block_0_self_attn) pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.0.attn"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(block_0_self_attn) pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.(0|1)"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert (len(pipe.pag_attn_processors)) == 2 pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.0", r"blocks\.1"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert len(pipe.pag_attn_processors) == 2
diffusers/tests/pipelines/pag/test_pag_sd3.py/0
{ "file_path": "diffusers/tests/pipelines/pag/test_pag_sd3.py", "repo_id": "diffusers", "token_count": 4730 }
203
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pytest import torch from PIL import Image from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor from diffusers import ( AutoencoderKLQwenImage, FlowMatchEulerDiscreteScheduler, QwenImageEditPipeline, QwenImageTransformer2DModel, ) from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class QwenImageEditPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = QwenImageEditPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = frozenset(["prompt", "image"]) image_params = frozenset(["image"]) image_latents_params = frozenset(["latents"]) required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self): tiny_ckpt_id = "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration" torch.manual_seed(0) transformer = QwenImageTransformer2DModel( patch_size=2, in_channels=16, out_channels=4, num_layers=2, attention_head_dim=16, num_attention_heads=3, joint_attention_dim=16, guidance_embeds=False, axes_dims_rope=(8, 4, 4), ) torch.manual_seed(0) z_dim = 4 vae = AutoencoderKLQwenImage( base_dim=z_dim * 6, z_dim=z_dim, dim_mult=[1, 2, 4], num_res_blocks=1, temperal_downsample=[False, True], latents_mean=[0.0] * z_dim, latents_std=[1.0] * z_dim, ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler() torch.manual_seed(0) config = Qwen2_5_VLConfig( text_config={ "hidden_size": 16, "intermediate_size": 16, "num_hidden_layers": 2, "num_attention_heads": 2, "num_key_value_heads": 2, "rope_scaling": { "mrope_section": [1, 1, 2], "rope_type": "default", "type": "default", }, "rope_theta": 1000000.0, }, vision_config={ "depth": 2, "hidden_size": 16, "intermediate_size": 16, "num_heads": 2, "out_hidden_size": 16, }, hidden_size=16, vocab_size=152064, vision_end_token_id=151653, vision_start_token_id=151652, vision_token_id=151654, ) text_encoder = Qwen2_5_VLForConditionalGeneration(config) tokenizer = Qwen2Tokenizer.from_pretrained(tiny_ckpt_id) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "processor": Qwen2VLProcessor.from_pretrained(tiny_ckpt_id), } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "image": Image.new("RGB", (32, 32)), "negative_prompt": "bad quality", "generator": generator, "num_inference_steps": 2, "true_cfg_scale": 1.0, "height": 32, "width": 32, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images generated_image = image[0] self.assertEqual(generated_image.shape, (3, 32, 32)) # fmt: off expected_slice = torch.tensor([[0.5637, 0.6341, 0.6001, 0.5620, 0.5794, 0.5498, 0.5757, 0.6389, 0.4174, 0.3597, 0.5649, 0.4894, 0.4969, 0.5255, 0.4083, 0.4986]]) # fmt: on generated_slice = generated_image.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_vae_tiling(self, expected_diff_max: float = 0.2): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to("cpu") pipe.set_progress_bar_config(disable=None) # Without tiling inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_without_tiling = pipe(**inputs)[0] # With tiling pipe.vae.enable_tiling( tile_sample_min_height=96, tile_sample_min_width=96, tile_sample_stride_height=64, tile_sample_stride_width=64, ) inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_with_tiling = pipe(**inputs)[0] self.assertLess( (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), expected_diff_max, "VAE tiling should not affect the inference results", ) @pytest.mark.xfail(condition=True, reason="Preconfigured embeddings need to be revisited.", strict=True) def test_encode_prompt_works_in_isolation(self, extra_required_param_value_dict=None, atol=1e-4, rtol=1e-4): super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict, atol, rtol)
diffusers/tests/pipelines/qwenimage/test_qwenimage_edit.py/0
{ "file_path": "diffusers/tests/pipelines/qwenimage/test_qwenimage_edit.py", "repo_id": "diffusers", "token_count": 4148 }
204
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection from diffusers import ( AutoencoderKL, DPMSolverMultistepScheduler, PNDMScheduler, StableDiffusionImageVariationPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_max_memory_allocated, backend_reset_peak_memory_stats, enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionImageVariationPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionImageVariationPipeline params = IMAGE_VARIATION_PARAMS batch_params = IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, image_size=32, patch_size=4, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor(crop_size=32, size=32) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "image_encoder": image_encoder, "feature_extractor": feature_extractor, "safety_checker": None, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_stable_diffusion_img_variation_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5348, 0.5924, 0.4798, 0.5237, 0.5741, 0.4651, 0.5344, 0.4942, 0.4851]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img_variation_multiple_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["image"] = 2 * [inputs["image"]] output = sd_pipe(**inputs) image = output.images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) expected_slice = np.array([0.6647, 0.5557, 0.5723, 0.5567, 0.5869, 0.6044, 0.5502, 0.5439, 0.5189]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_accelerator class StableDiffusionImageVariationPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/input_image_vermeer.png" ) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "image": init_image, "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_img_variation_pipeline_default(self): sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", safety_checker=None ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_inputs(generator_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.5348, 0.5924, 0.4798, 0.5237, 0.5741, 0.4651, 0.5344, 0.4942, 0.4851]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 1e-4 def test_stable_diffusion_img_variation_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.5348, 0.5924, 0.4798, 0.5237, 0.5741, 0.4651, 0.5344, 0.4942, 0.4851]) max_diff = numpy_cosine_similarity_distance(latents_slice.flatten(), expected_slice) assert max_diff < 1e-3 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.5348, 0.5924, 0.4798, 0.5237, 0.5741, 0.4651, 0.5344, 0.4942, 0.4851]) max_diff = numpy_cosine_similarity_distance(latents_slice.flatten(), expected_slice) assert max_diff < 1e-3 callback_fn.has_been_called = False pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", safety_checker=None, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() generator_device = "cpu" inputs = self.get_inputs(generator_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == inputs["num_inference_steps"] def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) backend_reset_peak_memory_stats(torch_device) pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", safety_checker=None, torch_dtype=torch.float16 ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = backend_max_memory_allocated(torch_device) # make sure that less than 2.6 GB is allocated assert mem_bytes < 2.6 * 10**9 @nightly @require_torch_accelerator class StableDiffusionImageVariationPipelineNightlyTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/input_image_vermeer.png" ) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "image": init_image, "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_img_variation_pndm(self): sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained("fusing/sd-image-variations-diffusers") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/lambdalabs_variations_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img_variation_dpm(self): sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained("fusing/sd-image-variations-diffusers") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/lambdalabs_variations_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
diffusers/tests/pipelines/stable_diffusion_image_variation/test_stable_diffusion_image_variation.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_image_variation/test_stable_diffusion_image_variation.py", "repo_id": "diffusers", "token_count": 6001 }
205
import gc import inspect import json import os import tempfile import unittest import uuid from typing import Any, Callable, Dict, Union import numpy as np import PIL.Image import torch import torch.nn as nn from huggingface_hub import ModelCard, delete_repo from huggingface_hub.utils import is_jinja_available from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderTiny, ConsistencyDecoderVAE, DDIMScheduler, DiffusionPipeline, FasterCacheConfig, KolorsPipeline, PyramidAttentionBroadcastConfig, StableDiffusionPipeline, StableDiffusionXLPipeline, UNet2DConditionModel, apply_faster_cache, ) from diffusers.hooks import apply_group_offloading from diffusers.hooks.faster_cache import FasterCacheBlockHook, FasterCacheDenoiserHook from diffusers.hooks.first_block_cache import FirstBlockCacheConfig from diffusers.hooks.pyramid_attention_broadcast import PyramidAttentionBroadcastHook from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import FluxIPAdapterMixin, IPAdapterMixin from diffusers.models.attention import AttentionModuleMixin from diffusers.models.attention_processor import AttnProcessor from diffusers.models.controlnets.controlnet_xs import UNetControlNetXSModel from diffusers.models.unets.unet_3d_condition import UNet3DConditionModel from diffusers.models.unets.unet_i2vgen_xl import I2VGenXLUNet from diffusers.models.unets.unet_motion_model import UNetMotionModel from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.source_code_parsing_utils import ReturnNameVisitor from diffusers.utils.testing_utils import ( CaptureLogger, backend_empty_cache, numpy_cosine_similarity_distance, require_accelerate_version_greater, require_accelerator, require_hf_hub_version_greater, require_torch, require_torch_accelerator, require_transformers_version_greater, skip_mps, torch_device, ) from ..models.autoencoders.vae import ( get_asym_autoencoder_kl_config, get_autoencoder_kl_config, get_autoencoder_tiny_config, get_consistency_vae_config, ) from ..models.transformers.test_models_transformer_flux import create_flux_ip_adapter_state_dict from ..models.unets.test_models_unet_2d_condition import ( create_ip_adapter_faceid_state_dict, create_ip_adapter_state_dict, ) from ..others.test_utils import TOKEN, USER, is_staging_test def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor def check_same_shape(tensor_list): shapes = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:]) def check_qkv_fusion_matches_attn_procs_length(model, original_attn_processors): current_attn_processors = model.attn_processors return len(current_attn_processors) == len(original_attn_processors) def check_qkv_fusion_processors_exist(model): current_attn_processors = model.attn_processors proc_names = [v.__class__.__name__ for _, v in current_attn_processors.items()] return all(p.startswith("Fused") for p in proc_names) def check_qkv_fused_layers_exist(model, layer_names): is_fused_submodules = [] for submodule in model.modules(): if not isinstance(submodule, AttentionModuleMixin): continue is_fused_attribute_set = submodule.fused_projections is_fused_layer = True for layer in layer_names: is_fused_layer = is_fused_layer and getattr(submodule, layer, None) is not None is_fused = is_fused_attribute_set and is_fused_layer is_fused_submodules.append(is_fused) return all(is_fused_submodules) class SDFunctionTesterMixin: """ This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. It provides a set of common tests for PyTorch pipeline that inherit from StableDiffusionMixin, e.g. vae_slicing, vae_tiling, freeu, etc. """ def test_vae_slicing(self, image_count=4): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count if "image" in inputs: # fix batch size mismatch in I2V_Gen pipeline inputs["image"] = [inputs["image"]] * image_count output_1 = pipe(**inputs) # make sure sliced vae decode yields the same result pipe.enable_vae_slicing() inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count if "image" in inputs: inputs["image"] = [inputs["image"]] * image_count inputs["return_dict"] = False output_2 = pipe(**inputs) assert np.abs(output_2[0].flatten() - output_1[0].flatten()).max() < 1e-2 def test_vae_tiling(self): components = self.get_dummy_components() # make sure here that pndm scheduler skips prk if "safety_checker" in components: components["safety_checker"] = None pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["return_dict"] = False # Test that tiled decode at 512x512 yields the same result as the non-tiled decode output_1 = pipe(**inputs)[0] # make sure tiled vae decode yields the same result pipe.enable_vae_tiling() inputs = self.get_dummy_inputs(torch_device) inputs["return_dict"] = False output_2 = pipe(**inputs)[0] assert np.abs(to_np(output_2) - to_np(output_1)).max() < 5e-1 # test that tiled decode works with various shapes shapes = [(1, 4, 73, 97), (1, 4, 65, 49)] with torch.no_grad(): for shape in shapes: zeros = torch.zeros(shape).to(torch_device) pipe.vae.decode(zeros) # MPS currently doesn't support ComplexFloats, which are required for FreeU - see https://github.com/huggingface/diffusers/issues/7569. @skip_mps def test_freeu(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # Normal inference inputs = self.get_dummy_inputs(torch_device) inputs["return_dict"] = False inputs["output_type"] = "np" output = pipe(**inputs)[0] # FreeU-enabled inference pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) inputs = self.get_dummy_inputs(torch_device) inputs["return_dict"] = False inputs["output_type"] = "np" output_freeu = pipe(**inputs)[0] # FreeU-disabled inference pipe.disable_freeu() freeu_keys = {"s1", "s2", "b1", "b2"} for upsample_block in pipe.unet.up_blocks: for key in freeu_keys: assert getattr(upsample_block, key) is None, f"Disabling of FreeU should have set {key} to None." inputs = self.get_dummy_inputs(torch_device) inputs["return_dict"] = False inputs["output_type"] = "np" output_no_freeu = pipe(**inputs)[0] assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), ( "Enabling of FreeU should lead to different results." ) assert np.allclose(output, output_no_freeu, atol=1e-2), ( f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}." ) def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image = pipe(**inputs)[0] original_image_slice = image[0, -3:, -3:, -1] pipe.fuse_qkv_projections() for _, component in pipe.components.items(): if ( isinstance(component, nn.Module) and hasattr(component, "original_attn_processors") and component.original_attn_processors is not None ): assert check_qkv_fusion_processors_exist(component), ( "Something wrong with the fused attention processors. Expected all the attention processors to be fused." ) assert check_qkv_fusion_matches_attn_procs_length(component, component.original_attn_processors), ( "Something wrong with the attention processors concerning the fused QKV projections." ) inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image_fused = pipe(**inputs)[0] image_slice_fused = image_fused[0, -3:, -3:, -1] pipe.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image_disabled = pipe(**inputs)[0] image_slice_disabled = image_disabled[0, -3:, -3:, -1] assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( "Fusion of QKV projections shouldn't affect the outputs." ) assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." ) assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Original outputs should match when fused QKV projections are disabled." ) class IPAdapterTesterMixin: """ This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. It provides a set of common tests for pipelines that support IP Adapters. """ def test_pipeline_signature(self): parameters = inspect.signature(self.pipeline_class.__call__).parameters assert issubclass(self.pipeline_class, IPAdapterMixin) self.assertIn( "ip_adapter_image", parameters, "`ip_adapter_image` argument must be supported by the `__call__` method", ) self.assertIn( "ip_adapter_image_embeds", parameters, "`ip_adapter_image_embeds` argument must be supported by the `__call__` method", ) def _get_dummy_image_embeds(self, cross_attention_dim: int = 32): return torch.randn((2, 1, cross_attention_dim), device=torch_device) def _get_dummy_faceid_image_embeds(self, cross_attention_dim: int = 32): return torch.randn((2, 1, 1, cross_attention_dim), device=torch_device) def _get_dummy_masks(self, input_size: int = 64): _masks = torch.zeros((1, 1, input_size, input_size), device=torch_device) _masks[0, :, :, : int(input_size / 2)] = 1 return _masks def _modify_inputs_for_ip_adapter_test(self, inputs: Dict[str, Any]): parameters = inspect.signature(self.pipeline_class.__call__).parameters if "image" in parameters.keys() and "strength" in parameters.keys(): inputs["num_inference_steps"] = 4 inputs["output_type"] = "np" inputs["return_dict"] = False return inputs def test_ip_adapter(self, expected_max_diff: float = 1e-4, expected_pipe_slice=None): r"""Tests for IP-Adapter. The following scenarios are tested: - Single IP-Adapter with scale=0 should produce same output as no IP-Adapter. - Multi IP-Adapter with scale=0 should produce same output as no IP-Adapter. - Single IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. - Multi IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. """ # Raising the tolerance for this test when it's run on a CPU because we # compare against static slices and that can be shaky (with a VVVV low probability). expected_max_diff = 9e-4 if torch_device == "cpu" else expected_max_diff components = self.get_dummy_components() pipe = self.pipeline_class(**components).to(torch_device) pipe.set_progress_bar_config(disable=None) cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) # forward pass without ip adapter inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) if expected_pipe_slice is None: output_without_adapter = pipe(**inputs)[0] else: output_without_adapter = expected_pipe_slice # 1. Single IP-Adapter test cases adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights(adapter_state_dict) # forward pass with single ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] pipe.set_ip_adapter_scale(0.0) output_without_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with single ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] pipe.set_ip_adapter_scale(42.0) output_with_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_adapter_scale, expected_max_diff, "Output without ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_adapter_scale, 1e-2, "Output with ip-adapter must be different from normal inference" ) # 2. Multi IP-Adapter test cases adapter_state_dict_1 = create_ip_adapter_state_dict(pipe.unet) adapter_state_dict_2 = create_ip_adapter_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights([adapter_state_dict_1, adapter_state_dict_2]) # forward pass with multi ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 pipe.set_ip_adapter_scale([0.0, 0.0]) output_without_multi_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_without_multi_adapter_scale = output_without_multi_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with multi ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 pipe.set_ip_adapter_scale([42.0, 42.0]) output_with_multi_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_with_multi_adapter_scale = output_with_multi_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_multi_adapter_scale = np.abs( output_without_multi_adapter_scale - output_without_adapter ).max() max_diff_with_multi_adapter_scale = np.abs(output_with_multi_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_multi_adapter_scale, expected_max_diff, "Output without multi-ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_multi_adapter_scale, 1e-2, "Output with multi-ip-adapter scale must be different from normal inference", ) def test_ip_adapter_cfg(self, expected_max_diff: float = 1e-4): parameters = inspect.signature(self.pipeline_class.__call__).parameters if "guidance_scale" not in parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components).to(torch_device) pipe.set_progress_bar_config(disable=None) cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights(adapter_state_dict) pipe.set_ip_adapter_scale(1.0) # forward pass with CFG not applied inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)[0].unsqueeze(0)] inputs["guidance_scale"] = 1.0 out_no_cfg = pipe(**inputs)[0] # forward pass with CFG applied inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] inputs["guidance_scale"] = 7.5 out_cfg = pipe(**inputs)[0] assert out_cfg.shape == out_no_cfg.shape def test_ip_adapter_masks(self, expected_max_diff: float = 1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components).to(torch_device) pipe.set_progress_bar_config(disable=None) cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) sample_size = pipe.unet.config.get("sample_size", 32) block_out_channels = pipe.vae.config.get("block_out_channels", [128, 256, 512, 512]) input_size = sample_size * (2 ** (len(block_out_channels) - 1)) # forward pass without ip adapter inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) output_without_adapter = pipe(**inputs)[0] output_without_adapter = output_without_adapter[0, -3:, -3:, -1].flatten() adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights(adapter_state_dict) # forward pass with single ip adapter and masks, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] inputs["cross_attention_kwargs"] = {"ip_adapter_masks": [self._get_dummy_masks(input_size)]} pipe.set_ip_adapter_scale(0.0) output_without_adapter_scale = pipe(**inputs)[0] output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with single ip adapter and masks, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] inputs["cross_attention_kwargs"] = {"ip_adapter_masks": [self._get_dummy_masks(input_size)]} pipe.set_ip_adapter_scale(42.0) output_with_adapter_scale = pipe(**inputs)[0] output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_adapter_scale, expected_max_diff, "Output without ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_adapter_scale, 1e-3, "Output with ip-adapter must be different from normal inference" ) def test_ip_adapter_faceid(self, expected_max_diff: float = 1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components).to(torch_device) pipe.set_progress_bar_config(disable=None) cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) # forward pass without ip adapter inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) output_without_adapter = pipe(**inputs)[0] output_without_adapter = output_without_adapter[0, -3:, -3:, -1].flatten() adapter_state_dict = create_ip_adapter_faceid_state_dict(pipe.unet) pipe.unet._load_ip_adapter_weights(adapter_state_dict) # forward pass with single ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_faceid_image_embeds(cross_attention_dim)] pipe.set_ip_adapter_scale(0.0) output_without_adapter_scale = pipe(**inputs)[0] output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with single ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_faceid_image_embeds(cross_attention_dim)] pipe.set_ip_adapter_scale(42.0) output_with_adapter_scale = pipe(**inputs)[0] output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_adapter_scale, expected_max_diff, "Output without ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_adapter_scale, 1e-3, "Output with ip-adapter must be different from normal inference" ) class FluxIPAdapterTesterMixin: """ This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. It provides a set of common tests for pipelines that support IP Adapters. """ def test_pipeline_signature(self): parameters = inspect.signature(self.pipeline_class.__call__).parameters assert issubclass(self.pipeline_class, FluxIPAdapterMixin) self.assertIn( "ip_adapter_image", parameters, "`ip_adapter_image` argument must be supported by the `__call__` method", ) self.assertIn( "ip_adapter_image_embeds", parameters, "`ip_adapter_image_embeds` argument must be supported by the `__call__` method", ) def _get_dummy_image_embeds(self, image_embed_dim: int = 768): return torch.randn((1, 1, image_embed_dim), device=torch_device) def _modify_inputs_for_ip_adapter_test(self, inputs: Dict[str, Any]): inputs["negative_prompt"] = "" if "true_cfg_scale" in inspect.signature(self.pipeline_class.__call__).parameters: inputs["true_cfg_scale"] = 4.0 inputs["output_type"] = "np" inputs["return_dict"] = False return inputs def test_ip_adapter(self, expected_max_diff: float = 1e-4, expected_pipe_slice=None): r"""Tests for IP-Adapter. The following scenarios are tested: - Single IP-Adapter with scale=0 should produce same output as no IP-Adapter. - Multi IP-Adapter with scale=0 should produce same output as no IP-Adapter. - Single IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. - Multi IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. """ # Raising the tolerance for this test when it's run on a CPU because we # compare against static slices and that can be shaky (with a VVVV low probability). expected_max_diff = 9e-4 if torch_device == "cpu" else expected_max_diff components = self.get_dummy_components() pipe = self.pipeline_class(**components).to(torch_device) pipe.set_progress_bar_config(disable=None) image_embed_dim = ( pipe.transformer.config.pooled_projection_dim if hasattr(pipe.transformer.config, "pooled_projection_dim") else 768 ) # forward pass without ip adapter inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) if expected_pipe_slice is None: output_without_adapter = pipe(**inputs)[0] else: output_without_adapter = expected_pipe_slice # 1. Single IP-Adapter test cases adapter_state_dict = create_flux_ip_adapter_state_dict(pipe.transformer) pipe.transformer._load_ip_adapter_weights(adapter_state_dict) # forward pass with single ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] inputs["negative_ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] pipe.set_ip_adapter_scale(0.0) output_without_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with single ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] inputs["negative_ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] pipe.set_ip_adapter_scale(42.0) output_with_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_adapter_scale, expected_max_diff, "Output without ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_adapter_scale, 1e-2, "Output with ip-adapter must be different from normal inference" ) # 2. Multi IP-Adapter test cases adapter_state_dict_1 = create_flux_ip_adapter_state_dict(pipe.transformer) adapter_state_dict_2 = create_flux_ip_adapter_state_dict(pipe.transformer) pipe.transformer._load_ip_adapter_weights([adapter_state_dict_1, adapter_state_dict_2]) # forward pass with multi ip adapter, but scale=0 which should have no effect inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] * 2 inputs["negative_ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] * 2 pipe.set_ip_adapter_scale([0.0, 0.0]) output_without_multi_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_without_multi_adapter_scale = output_without_multi_adapter_scale[0, -3:, -3:, -1].flatten() # forward pass with multi ip adapter, but with scale of adapter weights inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] * 2 inputs["negative_ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] * 2 pipe.set_ip_adapter_scale([42.0, 42.0]) output_with_multi_adapter_scale = pipe(**inputs)[0] if expected_pipe_slice is not None: output_with_multi_adapter_scale = output_with_multi_adapter_scale[0, -3:, -3:, -1].flatten() max_diff_without_multi_adapter_scale = np.abs( output_without_multi_adapter_scale - output_without_adapter ).max() max_diff_with_multi_adapter_scale = np.abs(output_with_multi_adapter_scale - output_without_adapter).max() self.assertLess( max_diff_without_multi_adapter_scale, expected_max_diff, "Output without multi-ip-adapter must be same as normal inference", ) self.assertGreater( max_diff_with_multi_adapter_scale, 1e-2, "Output with multi-ip-adapter scale must be different from normal inference", ) class PipelineLatentTesterMixin: """ This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. It provides a set of common tests for PyTorch pipeline that has vae, e.g. equivalence of different input and output types, etc. """ @property def image_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `image_params` in the child test class. " "`image_params` are tested for if all accepted input image types (i.e. `pt`,`pil`,`np`) are producing same results" ) @property def image_latents_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `image_latents_params` in the child test class. " "`image_latents_params` are tested for if passing latents directly are producing same results" ) def get_dummy_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"): inputs = self.get_dummy_inputs(device, seed) def convert_to_pt(image): if isinstance(image, torch.Tensor): input_image = image elif isinstance(image, np.ndarray): input_image = VaeImageProcessor.numpy_to_pt(image) elif isinstance(image, PIL.Image.Image): input_image = VaeImageProcessor.pil_to_numpy(image) input_image = VaeImageProcessor.numpy_to_pt(input_image) else: raise ValueError(f"unsupported input_image_type {type(image)}") return input_image def convert_pt_to_type(image, input_image_type): if input_image_type == "pt": input_image = image elif input_image_type == "np": input_image = VaeImageProcessor.pt_to_numpy(image) elif input_image_type == "pil": input_image = VaeImageProcessor.pt_to_numpy(image) input_image = VaeImageProcessor.numpy_to_pil(input_image) else: raise ValueError(f"unsupported input_image_type {input_image_type}.") return input_image for image_param in self.image_params: if image_param in inputs.keys(): inputs[image_param] = convert_pt_to_type( convert_to_pt(inputs[image_param]).to(device), input_image_type ) inputs["output_type"] = output_type return inputs def test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4): self._test_pt_np_pil_outputs_equivalent(expected_max_diff=expected_max_diff) def _test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4, input_image_type="pt"): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) output_pt = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="pt") )[0] output_np = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="np") )[0] output_pil = pipe( **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="pil") )[0] max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() self.assertLess( max_diff, expected_max_diff, "`output_type=='pt'` generate different results from `output_type=='np'`" ) max_diff = np.abs(np.array(output_pil[0]) - (output_np * 255).round()).max() self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") def test_pt_np_pil_inputs_equivalent(self): if len(self.image_params) == 0: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out_input_pt = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] out_input_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] out_input_pil = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pil"))[0] max_diff = np.abs(out_input_pt - out_input_np).max() self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`") max_diff = np.abs(out_input_pil - out_input_np).max() self.assertLess(max_diff, 1e-2, "`input_type=='pt'` generate different result from `input_type=='np'`") def test_latents_input(self): if len(self.image_latents_params) == 0: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] vae = components["vae"] inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type="pt") generator = inputs["generator"] for image_param in self.image_latents_params: if image_param in inputs.keys(): inputs[image_param] = ( vae.encode(inputs[image_param]).latent_dist.sample(generator) * vae.config.scaling_factor ) out_latents_inputs = pipe(**inputs)[0] max_diff = np.abs(out - out_latents_inputs).max() self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image") def test_multi_vae(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) block_out_channels = pipe.vae.config.block_out_channels norm_num_groups = pipe.vae.config.norm_num_groups vae_classes = [AutoencoderKL, AsymmetricAutoencoderKL, ConsistencyDecoderVAE, AutoencoderTiny] configs = [ get_autoencoder_kl_config(block_out_channels, norm_num_groups), get_asym_autoencoder_kl_config(block_out_channels, norm_num_groups), get_consistency_vae_config(block_out_channels, norm_num_groups), get_autoencoder_tiny_config(block_out_channels), ] out_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] for vae_cls, config in zip(vae_classes, configs): vae = vae_cls(**config) vae = vae.to(torch_device) components["vae"] = vae vae_pipe = self.pipeline_class(**components) out_vae_np = vae_pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] assert out_vae_np.shape == out_np.shape @require_torch class PipelineFromPipeTesterMixin: @property def original_pipeline_class(self): if "xl" in self.pipeline_class.__name__.lower(): original_pipeline_class = StableDiffusionXLPipeline elif "kolors" in self.pipeline_class.__name__.lower(): original_pipeline_class = KolorsPipeline else: original_pipeline_class = StableDiffusionPipeline return original_pipeline_class def get_dummy_inputs_pipe(self, device, seed=0): inputs = self.get_dummy_inputs(device, seed=seed) inputs["output_type"] = "np" inputs["return_dict"] = False return inputs def get_dummy_inputs_for_pipe_original(self, device, seed=0): inputs = {} for k, v in self.get_dummy_inputs_pipe(device, seed=seed).items(): if k in set(inspect.signature(self.original_pipeline_class.__call__).parameters.keys()): inputs[k] = v return inputs def test_from_pipe_consistent_config(self): if self.original_pipeline_class == StableDiffusionPipeline: original_repo = "hf-internal-testing/tiny-stable-diffusion-pipe" original_kwargs = {"requires_safety_checker": False} elif self.original_pipeline_class == StableDiffusionXLPipeline: original_repo = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" original_kwargs = {"requires_aesthetics_score": True, "force_zeros_for_empty_prompt": False} elif self.original_pipeline_class == KolorsPipeline: original_repo = "hf-internal-testing/tiny-kolors-pipe" original_kwargs = {"force_zeros_for_empty_prompt": False} else: raise ValueError( "original_pipeline_class must be either StableDiffusionPipeline or StableDiffusionXLPipeline" ) # create original_pipeline_class(sd/sdxl) pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) # original_pipeline_class(sd/sdxl) -> pipeline_class pipe_components = self.get_dummy_components() pipe_additional_components = {} for name, component in pipe_components.items(): if name not in pipe_original.components: pipe_additional_components[name] = component pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) # pipeline_class -> original_pipeline_class(sd/sdxl) original_pipe_additional_components = {} for name, component in pipe_original.components.items(): if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): original_pipe_additional_components[name] = component pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) # compare the config original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} assert original_config_2 == original_config def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): components = self.get_dummy_components() original_expected_modules, _ = self.original_pipeline_class._get_signature_keys(self.original_pipeline_class) # pipeline components that are also expected to be in the original pipeline original_pipe_components = {} # additional components that are not in the pipeline, but expected in the original pipeline original_pipe_additional_components = {} # additional components that are in the pipeline, but not expected in the original pipeline current_pipe_additional_components = {} for name, component in components.items(): if name in original_expected_modules: original_pipe_components[name] = component else: current_pipe_additional_components[name] = component for name in original_expected_modules: if name not in original_pipe_components: if name in self.original_pipeline_class._optional_components: original_pipe_additional_components[name] = None else: raise ValueError(f"missing required module for {self.original_pipeline_class.__class__}: {name}") pipe_original = self.original_pipeline_class(**original_pipe_components, **original_pipe_additional_components) for component in pipe_original.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_original.to(torch_device) pipe_original.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_for_pipe_original(torch_device) output_original = pipe_original(**inputs)[0] pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_pipe(torch_device) output = pipe(**inputs)[0] pipe_from_original = self.pipeline_class.from_pipe(pipe_original, **current_pipe_additional_components) pipe_from_original.to(torch_device) pipe_from_original.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_pipe(torch_device) output_from_original = pipe_from_original(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_from_original)).max() self.assertLess( max_diff, expected_max_diff, "The outputs of the pipelines created with `from_pipe` and `__init__` are different.", ) inputs = self.get_dummy_inputs_for_pipe_original(torch_device) output_original_2 = pipe_original(**inputs)[0] max_diff = np.abs(to_np(output_original) - to_np(output_original_2)).max() self.assertLess(max_diff, expected_max_diff, "`from_pipe` should not change the output of original pipeline.") for component in pipe_original.components.values(): if hasattr(component, "attn_processors"): assert all(type(proc) == AttnProcessor for proc in component.attn_processors.values()), ( "`from_pipe` changed the attention processor in original pipeline." ) @require_accelerator @require_accelerate_version_greater("0.14.0") def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_pipe(torch_device) output = pipe(**inputs)[0] original_expected_modules, _ = self.original_pipeline_class._get_signature_keys(self.original_pipeline_class) # pipeline components that are also expected to be in the original pipeline original_pipe_components = {} # additional components that are not in the pipeline, but expected in the original pipeline original_pipe_additional_components = {} # additional components that are in the pipeline, but not expected in the original pipeline current_pipe_additional_components = {} for name, component in components.items(): if name in original_expected_modules: original_pipe_components[name] = component else: current_pipe_additional_components[name] = component for name in original_expected_modules: if name not in original_pipe_components: if name in self.original_pipeline_class._optional_components: original_pipe_additional_components[name] = None else: raise ValueError(f"missing required module for {self.original_pipeline_class.__class__}: {name}") pipe_original = self.original_pipeline_class(**original_pipe_components, **original_pipe_additional_components) for component in pipe_original.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_original.set_progress_bar_config(disable=None) pipe_from_original = self.pipeline_class.from_pipe(pipe_original, **current_pipe_additional_components) for component in pipe_from_original.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_from_original.enable_model_cpu_offload(device=torch_device) pipe_from_original.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_pipe(torch_device) output_from_original = pipe_from_original(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_from_original)).max() self.assertLess( max_diff, expected_max_diff, "The outputs of the pipelines created with `from_pipe` and `__init__` are different.", ) @require_torch class PipelineKarrasSchedulerTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each PyTorch pipeline that makes use of KarrasDiffusionSchedulers equivalence of dict and tuple outputs, etc. """ def test_karras_schedulers_shape( self, num_inference_steps_for_strength=4, num_inference_steps_for_strength_for_iterations=5 ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=True) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 2 if "strength" in inputs: inputs["num_inference_steps"] = num_inference_steps_for_strength inputs["strength"] = 0.5 outputs = [] for scheduler_enum in KarrasDiffusionSchedulers: if "KDPM2" in scheduler_enum.name: inputs["num_inference_steps"] = num_inference_steps_for_strength_for_iterations scheduler_cls = getattr(diffusers, scheduler_enum.name) pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config) output = pipe(**inputs)[0] outputs.append(output) if "KDPM2" in scheduler_enum.name: inputs["num_inference_steps"] = 2 assert check_same_shape(outputs) @require_torch class PipelineTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline, equivalence of dict and tuple outputs, etc. """ # Canonical parameters that are passed to `__call__` regardless # of the type of pipeline. They are always optional and have common # sense default values. required_optional_params = frozenset( [ "num_inference_steps", "num_images_per_prompt", "generator", "latents", "output_type", "return_dict", ] ) # set these parameters to False in the child class if the pipeline does not support the corresponding functionality test_attention_slicing = True test_xformers_attention = True test_layerwise_casting = False test_group_offloading = False supports_dduf = True def get_generator(self, seed): device = torch_device if torch_device != "mps" else "cpu" generator = torch.Generator(device).manual_seed(seed) return generator @property def pipeline_class(self) -> Union[Callable, DiffusionPipeline]: raise NotImplementedError( "You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. " "See existing pipeline tests for reference." ) def get_dummy_components(self): raise NotImplementedError( "You need to implement `get_dummy_components(self)` in the child test class. " "See existing pipeline tests for reference." ) def get_dummy_inputs(self, device, seed=0): raise NotImplementedError( "You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. " "See existing pipeline tests for reference." ) @property def params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `params` in the child test class. " "`params` are checked for if all values are present in `__call__`'s signature." " You can set `params` using one of the common set of parameters defined in `pipeline_params.py`" " e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to " "image pipelines, including prompts and prompt embedding overrides." "If your pipeline's set of arguments has minor changes from one of the common sets of arguments, " "do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline " "with non-configurable height and width arguments should set the attribute as " "`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. " "See existing pipeline tests for reference." ) @property def batch_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `batch_params` in the child test class. " "`batch_params` are the parameters required to be batched when passed to the pipeline's " "`__call__` method. `pipeline_params.py` provides some common sets of parameters such as " "`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's " "set of batch arguments has minor changes from one of the common sets of batch arguments, " "do not make modifications to the existing common sets of batch arguments. I.e. a text to " "image pipeline `negative_prompt` is not batched should set the attribute as " "`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. " "See existing pipeline tests for reference." ) @property def callback_cfg_params(self) -> frozenset: raise NotImplementedError( "You need to set the attribute `callback_cfg_params` in the child test class that requires to run test_callback_cfg. " "`callback_cfg_params` are the parameters that needs to be passed to the pipeline's callback " "function when dynamically adjusting `guidance_scale`. They are variables that require special" "treatment when `do_classifier_free_guidance` is `True`. `pipeline_params.py` provides some common" " sets of parameters such as `TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS`. If your pipeline's " "set of cfg arguments has minor changes from one of the common sets of cfg arguments, " "do not make modifications to the existing common sets of cfg arguments. I.e. for inpaint pipeline, you " " need to adjust batch size of `mask` and `masked_image_latents` so should set the attribute as" "`callback_cfg_params = TEXT_TO_IMAGE_CFG_PARAMS.union({'mask', 'masked_image_latents'})`" ) def setUp(self): # clean up the VRAM before each test super().setUp() torch.compiler.reset() gc.collect() backend_empty_cache(torch_device) # Skip tests for pipelines that inherit from DeprecatedPipelineMixin from diffusers.pipelines.pipeline_utils import DeprecatedPipelineMixin if hasattr(self, "pipeline_class") and issubclass(self.pipeline_class, DeprecatedPipelineMixin): import pytest pytest.skip(reason=f"Deprecated Pipeline: {self.pipeline_class.__name__}") def tearDown(self): # clean up the VRAM after each test in case of CUDA runtime errors super().tearDown() torch.compiler.reset() gc.collect() backend_empty_cache(torch_device) def test_save_load_local(self, expected_max_difference=5e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] logger = logging.get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel(diffusers.logging.INFO) with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) with CaptureLogger(logger) as cap_logger: pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() for name in pipe_loaded.components.keys(): if name not in pipe_loaded._optional_components: assert name in str(cap_logger) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) def test_pipeline_call_signature(self): self.assertTrue( hasattr(self.pipeline_class, "__call__"), f"{self.pipeline_class} should have a `__call__` method" ) parameters = inspect.signature(self.pipeline_class.__call__).parameters optional_parameters = set() for k, v in parameters.items(): if v.default != inspect._empty: optional_parameters.add(k) parameters = set(parameters.keys()) parameters.remove("self") parameters.discard("kwargs") # kwargs can be added if arguments of pipeline call function are deprecated remaining_required_parameters = set() for param in self.params: if param not in parameters: remaining_required_parameters.add(param) self.assertTrue( len(remaining_required_parameters) == 0, f"Required parameters not present: {remaining_required_parameters}", ) remaining_required_optional_parameters = set() for param in self.required_optional_params: if param not in optional_parameters: remaining_required_optional_parameters.add(param) self.assertTrue( len(remaining_required_optional_parameters) == 0, f"Required optional parameters not present: {remaining_required_optional_parameters}", ) def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes) def _test_inference_batch_consistent( self, batch_sizes=[2], additional_params_copy_to_batched_inputs=["num_inference_steps"], batch_generator=True ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # prepare batched inputs batched_inputs = [] for batch_size in batch_sizes: batched_input = {} batched_input.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_input[name][-1] = 100 * "very long" else: batched_input[name] = batch_size * [value] if batch_generator and "generator" in inputs: batched_input["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_input["batch_size"] = batch_size batched_inputs.append(batched_input) logger.setLevel(level=diffusers.logging.WARNING) for batch_size, batched_input in zip(batch_sizes, batched_inputs): output = pipe(**batched_input) assert len(output[0]) == batch_size def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4): self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff) def _test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff def test_dict_tuple_outputs_equivalent(self, expected_slice=None, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" if expected_slice is None: output = pipe(**self.get_dummy_inputs(generator_device))[0] else: output = expected_slice output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0] if expected_slice is None: max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() else: if output_tuple.ndim != 5: max_diff = np.abs(to_np(output) - to_np(output_tuple)[0, -3:, -3:, -1].flatten()).max() else: max_diff = np.abs(to_np(output) - to_np(output_tuple)[0, -3:, -3:, -1, -1].flatten()).max() self.assertLess(max_diff, expected_max_difference) def test_components_function(self): init_components = self.get_dummy_components() init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))} pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) components = self.get_dummy_components() pipe_fp16 = self.pipeline_class(**components) for component in pipe_fp16.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in inputs: inputs["generator"] = self.get_generator(0) output = pipe(**inputs)[0] fp16_inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in fp16_inputs: fp16_inputs["generator"] = self.get_generator(0) output_fp16 = pipe_fp16(**fp16_inputs)[0] if isinstance(output, torch.Tensor): output = output.cpu() output_fp16 = output_fp16.cpu() max_diff = numpy_cosine_similarity_distance(output.flatten(), output_fp16.flatten()) assert max_diff < expected_max_diff @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess( max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." ) def test_save_load_optional_components(self, expected_max_difference=1e-4): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to(torch_device) model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == torch_device for device in model_devices)) output_device = pipe(**self.get_dummy_inputs(torch_device))[0] self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_attention_slicing_forward_pass(self, expected_max_diff=1e-3): self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff) def _test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) if test_mean_pixel_difference: assert_mean_pixel_difference(to_np(output_with_slicing1[0]), to_np(output_without_slicing[0])) assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0])) @require_accelerator @require_accelerate_version_greater("0.14.0") def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): import accelerate components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output_without_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload(device=torch_device) assert pipe._execution_device.type == torch_device inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly offloaded_modules = { k: v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload } # 1. all offloaded modules should be saved to cpu and moved to meta device self.assertTrue( all(v.device.type == "meta" for v in offloaded_modules.values()), f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'meta']}", ) # 2. all offloaded modules should have hook installed self.assertTrue( all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", ) # 3. all offloaded modules should have correct hooks installed, should be either one of these two # - `AlignDevicesHook` # - a SequentialHook` that contains `AlignDevicesHook` offloaded_modules_with_incorrect_hooks = {} for k, v in offloaded_modules.items(): if hasattr(v, "_hf_hook"): if isinstance(v._hf_hook, accelerate.hooks.SequentialHook): # if it is a `SequentialHook`, we loop through its `hooks` attribute to check if it only contains `AlignDevicesHook` for hook in v._hf_hook.hooks: if not isinstance(hook, accelerate.hooks.AlignDevicesHook): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook.hooks[0]) elif not isinstance(v._hf_hook, accelerate.hooks.AlignDevicesHook): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) self.assertTrue( len(offloaded_modules_with_incorrect_hooks) == 0, f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) @require_accelerator @require_accelerate_version_greater("0.17.0") def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): import accelerate generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output_without_offload = pipe(**inputs)[0] pipe.enable_model_cpu_offload(device=torch_device) assert pipe._execution_device.type == torch_device inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly offloaded_modules = { k: v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload } # 1. check if all offloaded modules are saved to cpu self.assertTrue( all(v.device.type == "cpu" for v in offloaded_modules.values()), f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'cpu']}", ) # 2. check if all offloaded modules have hooks installed self.assertTrue( all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", ) # 3. check if all offloaded modules have correct type of hooks installed, should be `CpuOffload` offloaded_modules_with_incorrect_hooks = {} for k, v in offloaded_modules.items(): if hasattr(v, "_hf_hook") and not isinstance(v._hf_hook, accelerate.hooks.CpuOffload): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) self.assertTrue( len(offloaded_modules_with_incorrect_hooks) == 0, f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) @require_accelerator @require_accelerate_version_greater("0.17.0") def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.set_progress_bar_config(disable=None) pipe.enable_model_cpu_offload() inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] pipe.enable_model_cpu_offload() inputs = self.get_dummy_inputs(generator_device) output_with_offload_twice = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_with_offload_twice)).max() self.assertLess( max_diff, expected_max_diff, "running CPU offloading 2nd time should not affect the inference results" ) # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly offloaded_modules = { k: v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload } # 1. check if all offloaded modules are saved to cpu self.assertTrue( all(v.device.type == "cpu" for v in offloaded_modules.values()), f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'cpu']}", ) # 2. check if all offloaded modules have hooks installed self.assertTrue( all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", ) # 3. check if all offloaded modules have correct type of hooks installed, should be `CpuOffload` offloaded_modules_with_incorrect_hooks = {} for k, v in offloaded_modules.items(): if hasattr(v, "_hf_hook") and not isinstance(v._hf_hook, accelerate.hooks.CpuOffload): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) self.assertTrue( len(offloaded_modules_with_incorrect_hooks) == 0, f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) @require_accelerator @require_accelerate_version_greater("0.14.0") def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.set_progress_bar_config(disable=None) pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload_twice = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_with_offload_twice)).max() self.assertLess( max_diff, expected_max_diff, "running sequential offloading second time should have the inference results" ) # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly offloaded_modules = { k: v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload } # 1. check if all offloaded modules are moved to meta device self.assertTrue( all(v.device.type == "meta" for v in offloaded_modules.values()), f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'meta']}", ) # 2. check if all offloaded modules have hook installed self.assertTrue( all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", ) # 3. check if all offloaded modules have correct hooks installed, should be either one of these two # - `AlignDevicesHook` # - a SequentialHook` that contains `AlignDevicesHook` offloaded_modules_with_incorrect_hooks = {} for k, v in offloaded_modules.items(): if hasattr(v, "_hf_hook"): if isinstance(v._hf_hook, accelerate.hooks.SequentialHook): # if it is a `SequentialHook`, we loop through its `hooks` attribute to check if it only contains `AlignDevicesHook` for hook in v._hf_hook.hooks: if not isinstance(hook, accelerate.hooks.AlignDevicesHook): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook.hooks[0]) elif not isinstance(v._hf_hook, accelerate.hooks.AlignDevicesHook): offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) self.assertTrue( len(offloaded_modules_with_incorrect_hooks) == 0, f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass() def _test_xformers_attention_forwardGenerator_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-4 ): if not self.test_xformers_attention: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs)[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs)[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) if test_max_difference: max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "XFormers attention should not affect the inference results") if test_mean_pixel_difference: assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0]) def test_num_images_per_prompt(self): sig = inspect.signature(self.pipeline_class.__call__) if "num_images_per_prompt" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_cfg(self): sig = inspect.signature(self.pipeline_class.__call__) if "guidance_scale" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["guidance_scale"] = 1.0 out_no_cfg = pipe(**inputs)[0] inputs["guidance_scale"] = 7.5 out_cfg = pipe(**inputs)[0] assert out_cfg.shape == out_no_cfg.shape def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] inputs["output_type"] = "latent" output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 def test_callback_cfg(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return if "guidance_scale" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_increase_guidance(pipe, i, t, callback_kwargs): pipe._guidance_scale += 1.0 return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # use cfg guidance because some pipelines modify the shape of the latents # outside of the denoising loop inputs["guidance_scale"] = 2.0 inputs["callback_on_step_end"] = callback_increase_guidance inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs _ = pipe(**inputs)[0] # we increase the guidance scale by 1.0 at every step # check that the guidance scale is increased by the number of scheduler timesteps # accounts for models that modify the number of inference steps based on strength assert pipe.guidance_scale == (inputs["guidance_scale"] + pipe.num_timesteps) def test_serialization_with_variants(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) model_components = [ component_name for component_name, component in pipe.components.items() if isinstance(component, nn.Module) ] variant = "fp16" with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) with open(f"{tmpdir}/model_index.json", "r") as f: config = json.load(f) for subfolder in os.listdir(tmpdir): if not os.path.isfile(subfolder) and subfolder in model_components: folder_path = os.path.join(tmpdir, subfolder) is_folder = os.path.isdir(folder_path) and subfolder in config assert is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) def test_loading_with_variants(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) variant = "fp16" def is_nan(tensor): if tensor.ndimension() == 0: has_nan = torch.isnan(tensor).item() else: has_nan = torch.isnan(tensor).any() return has_nan with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, variant=variant) model_components_pipe = { component_name: component for component_name, component in pipe.components.items() if isinstance(component, nn.Module) } model_components_pipe_loaded = { component_name: component for component_name, component in pipe_loaded.components.items() if isinstance(component, nn.Module) } for component_name in model_components_pipe: pipe_component = model_components_pipe[component_name] pipe_loaded_component = model_components_pipe_loaded[component_name] for p1, p2 in zip(pipe_component.parameters(), pipe_loaded_component.parameters()): # nan check for luminanext (mps). if not (is_nan(p1) and is_nan(p2)): self.assertTrue(torch.equal(p1, p2)) def test_loading_with_incorrect_variants_raises_error(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) variant = "fp16" with tempfile.TemporaryDirectory() as tmpdir: # Don't save with variants. pipe.save_pretrained(tmpdir, safe_serialization=False) with self.assertRaises(ValueError) as error: _ = self.pipeline_class.from_pretrained(tmpdir, variant=variant) assert f"You are trying to load the model files of the `variant={variant}`" in str(error.exception) def test_encode_prompt_works_in_isolation(self, extra_required_param_value_dict=None, atol=1e-4, rtol=1e-4): if not hasattr(self.pipeline_class, "encode_prompt"): return components = self.get_dummy_components() # We initialize the pipeline with only text encoders and tokenizers, # mimicking a real-world scenario. components_with_text_encoders = {} for k in components: if "text" in k or "tokenizer" in k: components_with_text_encoders[k] = components[k] else: components_with_text_encoders[k] = None pipe_with_just_text_encoder = self.pipeline_class(**components_with_text_encoders) pipe_with_just_text_encoder = pipe_with_just_text_encoder.to(torch_device) # Get inputs and also the args of `encode_prompts`. inputs = self.get_dummy_inputs(torch_device) encode_prompt_signature = inspect.signature(pipe_with_just_text_encoder.encode_prompt) encode_prompt_parameters = list(encode_prompt_signature.parameters.values()) # Required args in encode_prompt with those with no default. required_params = [] for param in encode_prompt_parameters: if param.name == "self" or param.name == "kwargs": continue if param.default is inspect.Parameter.empty: required_params.append(param.name) # Craft inputs for the `encode_prompt()` method to run in isolation. encode_prompt_param_names = [p.name for p in encode_prompt_parameters if p.name != "self"] input_keys = list(inputs.keys()) encode_prompt_inputs = {k: inputs.pop(k) for k in input_keys if k in encode_prompt_param_names} pipe_call_signature = inspect.signature(pipe_with_just_text_encoder.__call__) pipe_call_parameters = pipe_call_signature.parameters # For each required arg in encode_prompt, check if it's missing # in encode_prompt_inputs. If so, see if __call__ has a default # for that arg and use it if available. for required_param_name in required_params: if required_param_name not in encode_prompt_inputs: pipe_call_param = pipe_call_parameters.get(required_param_name, None) if pipe_call_param is not None and pipe_call_param.default is not inspect.Parameter.empty: # Use the default from pipe.__call__ encode_prompt_inputs[required_param_name] = pipe_call_param.default elif extra_required_param_value_dict is not None and isinstance(extra_required_param_value_dict, dict): encode_prompt_inputs[required_param_name] = extra_required_param_value_dict[required_param_name] else: raise ValueError( f"Required parameter '{required_param_name}' in " f"encode_prompt has no default in either encode_prompt or __call__." ) # Compute `encode_prompt()`. with torch.no_grad(): encoded_prompt_outputs = pipe_with_just_text_encoder.encode_prompt(**encode_prompt_inputs) # Programmatically determine the return names of `encode_prompt.` ast_visitor = ReturnNameVisitor() encode_prompt_tree = ast_visitor.get_ast_tree(cls=self.pipeline_class) ast_visitor.visit(encode_prompt_tree) prompt_embed_kwargs = ast_visitor.return_names prompt_embeds_kwargs = dict(zip(prompt_embed_kwargs, encoded_prompt_outputs)) # Pack the outputs of `encode_prompt`. adapted_prompt_embeds_kwargs = { k: prompt_embeds_kwargs.pop(k) for k in list(prompt_embeds_kwargs.keys()) if k in pipe_call_parameters } # now initialize a pipeline without text encoders and compute outputs with the # `encode_prompt()` outputs and other relevant inputs. components_with_text_encoders = {} for k in components: if "text" in k or "tokenizer" in k: components_with_text_encoders[k] = None else: components_with_text_encoders[k] = components[k] pipe_without_text_encoders = self.pipeline_class(**components_with_text_encoders).to(torch_device) # Set `negative_prompt` to None as we have already calculated its embeds # if it was present in `inputs`. This is because otherwise we will interfere wrongly # for non-None `negative_prompt` values as defaults (PixArt for example). pipe_without_tes_inputs = {**inputs, **adapted_prompt_embeds_kwargs} if ( pipe_call_parameters.get("negative_prompt", None) is not None and pipe_call_parameters.get("negative_prompt").default is not None ): pipe_without_tes_inputs.update({"negative_prompt": None}) # Pipelines like attend and excite have `prompt` as a required argument. if ( pipe_call_parameters.get("prompt", None) is not None and pipe_call_parameters.get("prompt").default is inspect.Parameter.empty and pipe_call_parameters.get("prompt_embeds", None) is not None and pipe_call_parameters.get("prompt_embeds").default is None ): pipe_without_tes_inputs.update({"prompt": None}) pipe_out = pipe_without_text_encoders(**pipe_without_tes_inputs)[0] # Compare against regular pipeline outputs. full_pipe = self.pipeline_class(**components).to(torch_device) inputs = self.get_dummy_inputs(torch_device) pipe_out_2 = full_pipe(**inputs)[0] if isinstance(pipe_out, np.ndarray) and isinstance(pipe_out_2, np.ndarray): self.assertTrue(np.allclose(pipe_out, pipe_out_2, atol=atol, rtol=rtol)) elif isinstance(pipe_out, torch.Tensor) and isinstance(pipe_out_2, torch.Tensor): self.assertTrue(torch.allclose(pipe_out, pipe_out_2, atol=atol, rtol=rtol)) def test_StableDiffusionMixin_component(self): """Any pipeline that have LDMFuncMixin should have vae and unet components.""" if not issubclass(self.pipeline_class, StableDiffusionMixin): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) self.assertTrue(hasattr(pipe, "vae") and isinstance(pipe.vae, (AutoencoderKL, AutoencoderTiny))) self.assertTrue( hasattr(pipe, "unet") and isinstance( pipe.unet, (UNet2DConditionModel, UNet3DConditionModel, I2VGenXLUNet, UNetMotionModel, UNetControlNetXSModel), ) ) @require_hf_hub_version_greater("0.26.5") @require_transformers_version_greater("4.47.1") def test_save_load_dduf(self, atol=1e-4, rtol=1e-4): if not self.supports_dduf: return from huggingface_hub import export_folder_as_dduf components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device="cpu") inputs.pop("generator") inputs["generator"] = torch.manual_seed(0) pipeline_out = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: dduf_filename = os.path.join(tmpdir, f"{pipe.__class__.__name__.lower()}.dduf") pipe.save_pretrained(tmpdir, safe_serialization=True) export_folder_as_dduf(dduf_filename, folder_path=tmpdir) loaded_pipe = self.pipeline_class.from_pretrained(tmpdir, dduf_file=dduf_filename).to(torch_device) inputs["generator"] = torch.manual_seed(0) loaded_pipeline_out = loaded_pipe(**inputs)[0] if isinstance(pipeline_out, np.ndarray) and isinstance(loaded_pipeline_out, np.ndarray): assert np.allclose(pipeline_out, loaded_pipeline_out, atol=atol, rtol=rtol) elif isinstance(pipeline_out, torch.Tensor) and isinstance(loaded_pipeline_out, torch.Tensor): assert torch.allclose(pipeline_out, loaded_pipeline_out, atol=atol, rtol=rtol) def test_layerwise_casting_inference(self): if not self.test_layerwise_casting: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device, dtype=torch.bfloat16) pipe.set_progress_bar_config(disable=None) denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet denoiser.enable_layerwise_casting(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16) inputs = self.get_dummy_inputs(torch_device) _ = pipe(**inputs)[0] @require_torch_accelerator def test_group_offloading_inference(self): if not self.test_group_offloading: return def create_pipe(): torch.manual_seed(0) components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) return pipe def enable_group_offload_on_component(pipe, group_offloading_kwargs): # We intentionally don't test VAE's here. This is because some tests enable tiling on the VAE. If # tiling is enabled and a forward pass is run, when accelerator streams are used, the execution order of # the layers is not traced correctly. This causes errors. For apply group offloading to VAE, a # warmup forward pass (even with dummy small inputs) is recommended. for component_name in [ "text_encoder", "text_encoder_2", "text_encoder_3", "transformer", "unet", "controlnet", ]: if not hasattr(pipe, component_name): continue component = getattr(pipe, component_name) if not getattr(component, "_supports_group_offloading", True): continue if hasattr(component, "enable_group_offload"): # For diffusers ModelMixin implementations component.enable_group_offload(torch.device(torch_device), **group_offloading_kwargs) else: # For other models not part of diffusers apply_group_offloading( component, onload_device=torch.device(torch_device), **group_offloading_kwargs ) self.assertTrue( all( module._diffusers_hook.get_hook("group_offloading") is not None for module in component.modules() if hasattr(module, "_diffusers_hook") ) ) for component_name in ["vae", "vqvae", "image_encoder"]: component = getattr(pipe, component_name, None) if isinstance(component, torch.nn.Module): component.to(torch_device) def run_forward(pipe): torch.manual_seed(0) inputs = self.get_dummy_inputs(torch_device) return pipe(**inputs)[0] pipe = create_pipe().to(torch_device) output_without_group_offloading = run_forward(pipe) pipe = create_pipe() enable_group_offload_on_component(pipe, {"offload_type": "block_level", "num_blocks_per_group": 1}) output_with_group_offloading1 = run_forward(pipe) pipe = create_pipe() enable_group_offload_on_component(pipe, {"offload_type": "leaf_level"}) output_with_group_offloading2 = run_forward(pipe) if torch.is_tensor(output_without_group_offloading): output_without_group_offloading = output_without_group_offloading.detach().cpu().numpy() output_with_group_offloading1 = output_with_group_offloading1.detach().cpu().numpy() output_with_group_offloading2 = output_with_group_offloading2.detach().cpu().numpy() self.assertTrue(np.allclose(output_without_group_offloading, output_with_group_offloading1, atol=1e-4)) self.assertTrue(np.allclose(output_without_group_offloading, output_with_group_offloading2, atol=1e-4)) def test_torch_dtype_dict(self): components = self.get_dummy_components() if not components: self.skipTest("No dummy components defined.") pipe = self.pipeline_class(**components) specified_key = next(iter(components.keys())) with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: pipe.save_pretrained(tmpdirname, safe_serialization=False) torch_dtype_dict = {specified_key: torch.bfloat16, "default": torch.float16} loaded_pipe = self.pipeline_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype_dict) for name, component in loaded_pipe.components.items(): if isinstance(component, torch.nn.Module) and hasattr(component, "dtype"): expected_dtype = torch_dtype_dict.get(name, torch_dtype_dict.get("default", torch.float32)) self.assertEqual( component.dtype, expected_dtype, f"Component '{name}' has dtype {component.dtype} but expected {expected_dtype}", ) @require_torch_accelerator def test_pipeline_with_accelerator_device_map(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) torch.manual_seed(0) inputs = self.get_dummy_inputs(torch_device) inputs["generator"] = torch.manual_seed(0) out = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) loaded_pipe = self.pipeline_class.from_pretrained(tmpdir, device_map=torch_device) for component in loaded_pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() inputs["generator"] = torch.manual_seed(0) loaded_out = loaded_pipe(**inputs)[0] max_diff = np.abs(to_np(out) - to_np(loaded_out)).max() self.assertLess(max_diff, expected_max_difference) @is_staging_test class PipelinePushToHubTester(unittest.TestCase): identifier = uuid.uuid4() repo_id = f"test-pipeline-{identifier}" org_repo_id = f"valid_org/{repo_id}-org" def get_pipeline_components(self): unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) with tempfile.TemporaryDirectory() as tmpdir: dummy_vocab = {"<|startoftext|>": 0, "<|endoftext|>": 1, "!": 2} vocab_path = os.path.join(tmpdir, "vocab.json") with open(vocab_path, "w") as f: json.dump(dummy_vocab, f) merges = "Ġ t\nĠt h" merges_path = os.path.join(tmpdir, "merges.txt") with open(merges_path, "w") as f: f.writelines(merges) tokenizer = CLIPTokenizer(vocab_file=vocab_path, merges_file=merges_path) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def test_push_to_hub(self): components = self.get_pipeline_components() pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub(self.repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}", subfolder="unet") unet = components["unet"] for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: pipeline.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}", subfolder="unet") for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.repo_id, token=TOKEN) def test_push_to_hub_in_organization(self): components = self.get_pipeline_components() pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub(self.org_repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id, subfolder="unet") unet = components["unet"] for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.org_repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: pipeline.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id, subfolder="unet") for p1, p2 in zip(unet.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.org_repo_id, token=TOKEN) @unittest.skipIf( not is_jinja_available(), reason="Model card tests cannot be performed without Jinja installed.", ) def test_push_to_hub_library_name(self): components = self.get_pipeline_components() pipeline = StableDiffusionPipeline(**components) pipeline.push_to_hub(self.repo_id, token=TOKEN) model_card = ModelCard.load(f"{USER}/{self.repo_id}", token=TOKEN).data assert model_card.library_name == "diffusers" # Reset repo delete_repo(self.repo_id, token=TOKEN) class PyramidAttentionBroadcastTesterMixin: pab_config = PyramidAttentionBroadcastConfig( spatial_attention_block_skip_range=2, spatial_attention_timestep_skip_range=(100, 800), spatial_attention_block_identifiers=["transformer_blocks"], ) def test_pyramid_attention_broadcast_layers(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator num_layers = 0 num_single_layers = 0 dummy_component_kwargs = {} dummy_component_parameters = inspect.signature(self.get_dummy_components).parameters if "num_layers" in dummy_component_parameters: num_layers = 2 dummy_component_kwargs["num_layers"] = num_layers if "num_single_layers" in dummy_component_parameters: num_single_layers = 2 dummy_component_kwargs["num_single_layers"] = num_single_layers components = self.get_dummy_components(**dummy_component_kwargs) pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) self.pab_config.current_timestep_callback = lambda: pipe.current_timestep denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet denoiser.enable_cache(self.pab_config) expected_hooks = 0 if self.pab_config.spatial_attention_block_skip_range is not None: expected_hooks += num_layers + num_single_layers if self.pab_config.temporal_attention_block_skip_range is not None: expected_hooks += num_layers + num_single_layers if self.pab_config.cross_attention_block_skip_range is not None: expected_hooks += num_layers + num_single_layers denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet count = 0 for module in denoiser.modules(): if hasattr(module, "_diffusers_hook"): hook = module._diffusers_hook.get_hook("pyramid_attention_broadcast") if hook is None: continue count += 1 self.assertTrue( isinstance(hook, PyramidAttentionBroadcastHook), "Hook should be of type PyramidAttentionBroadcastHook.", ) self.assertTrue(hook.state.cache is None, "Cache should be None at initialization.") self.assertEqual(count, expected_hooks, "Number of hooks should match the expected number.") # Perform dummy inference step to ensure state is updated def pab_state_check_callback(pipe, i, t, kwargs): for module in denoiser.modules(): if hasattr(module, "_diffusers_hook"): hook = module._diffusers_hook.get_hook("pyramid_attention_broadcast") if hook is None: continue self.assertTrue( hook.state.cache is not None, "Cache should have updated during inference.", ) self.assertTrue( hook.state.iteration == i + 1, "Hook iteration state should have updated during inference.", ) return {} inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 2 inputs["callback_on_step_end"] = pab_state_check_callback pipe(**inputs)[0] # After inference, reset_stateful_hooks is called within the pipeline, which should have reset the states for module in denoiser.modules(): if hasattr(module, "_diffusers_hook"): hook = module._diffusers_hook.get_hook("pyramid_attention_broadcast") if hook is None: continue self.assertTrue( hook.state.cache is None, "Cache should be reset to None after inference.", ) self.assertTrue( hook.state.iteration == 0, "Iteration should be reset to 0 after inference.", ) def test_pyramid_attention_broadcast_inference(self, expected_atol: float = 0.2): # We need to use higher tolerance because we are using a random model. With a converged/trained # model, the tolerance can be lower. device = "cpu" # ensure determinism for the device-dependent torch.Generator num_layers = 2 components = self.get_dummy_components(num_layers=num_layers) pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) # Run inference without PAB inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 4 output = pipe(**inputs)[0] original_image_slice = output.flatten() original_image_slice = np.concatenate((original_image_slice[:8], original_image_slice[-8:])) # Run inference with PAB enabled self.pab_config.current_timestep_callback = lambda: pipe.current_timestep denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet denoiser.enable_cache(self.pab_config) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 4 output = pipe(**inputs)[0] image_slice_pab_enabled = output.flatten() image_slice_pab_enabled = np.concatenate((image_slice_pab_enabled[:8], image_slice_pab_enabled[-8:])) # Run inference with PAB disabled denoiser.disable_cache() inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 4 output = pipe(**inputs)[0] image_slice_pab_disabled = output.flatten() image_slice_pab_disabled = np.concatenate((image_slice_pab_disabled[:8], image_slice_pab_disabled[-8:])) assert np.allclose(original_image_slice, image_slice_pab_enabled, atol=expected_atol), ( "PAB outputs should not differ much in specified timestep range." ) assert np.allclose(original_image_slice, image_slice_pab_disabled, atol=1e-4), ( "Outputs from normal inference and after disabling cache should not differ." ) class FasterCacheTesterMixin: faster_cache_config = FasterCacheConfig( spatial_attention_block_skip_range=2, spatial_attention_timestep_skip_range=(-1, 901), unconditional_batch_skip_range=2, attention_weight_callback=lambda _: 0.5, ) def test_faster_cache_basic_warning_or_errors_raised(self): components = self.get_dummy_components() logger = logging.get_logger("diffusers.hooks.faster_cache") logger.setLevel(logging.INFO) # Check if warning is raise when no attention_weight_callback is provided pipe = self.pipeline_class(**components) with CaptureLogger(logger) as cap_logger: config = FasterCacheConfig(spatial_attention_block_skip_range=2, attention_weight_callback=None) apply_faster_cache(pipe.transformer, config) self.assertTrue("No `attention_weight_callback` provided when enabling FasterCache" in cap_logger.out) # Check if error raised when unsupported tensor format used pipe = self.pipeline_class(**components) with self.assertRaises(ValueError): config = FasterCacheConfig(spatial_attention_block_skip_range=2, tensor_format="BFHWC") apply_faster_cache(pipe.transformer, config) def test_faster_cache_inference(self, expected_atol: float = 0.1): device = "cpu" # ensure determinism for the device-dependent torch.Generator def create_pipe(): torch.manual_seed(0) num_layers = 2 components = self.get_dummy_components(num_layers=num_layers) pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) return pipe def run_forward(pipe): torch.manual_seed(0) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 4 return pipe(**inputs)[0] # Run inference without FasterCache pipe = create_pipe() output = run_forward(pipe).flatten() original_image_slice = np.concatenate((output[:8], output[-8:])) # Run inference with FasterCache enabled self.faster_cache_config.current_timestep_callback = lambda: pipe.current_timestep pipe = create_pipe() pipe.transformer.enable_cache(self.faster_cache_config) output = run_forward(pipe).flatten() image_slice_faster_cache_enabled = np.concatenate((output[:8], output[-8:])) # Run inference with FasterCache disabled pipe.transformer.disable_cache() output = run_forward(pipe).flatten() image_slice_faster_cache_disabled = np.concatenate((output[:8], output[-8:])) assert np.allclose(original_image_slice, image_slice_faster_cache_enabled, atol=expected_atol), ( "FasterCache outputs should not differ much in specified timestep range." ) assert np.allclose(original_image_slice, image_slice_faster_cache_disabled, atol=1e-4), ( "Outputs from normal inference and after disabling cache should not differ." ) def test_faster_cache_state(self): from diffusers.hooks.faster_cache import _FASTER_CACHE_BLOCK_HOOK, _FASTER_CACHE_DENOISER_HOOK device = "cpu" # ensure determinism for the device-dependent torch.Generator num_layers = 0 num_single_layers = 0 dummy_component_kwargs = {} dummy_component_parameters = inspect.signature(self.get_dummy_components).parameters if "num_layers" in dummy_component_parameters: num_layers = 2 dummy_component_kwargs["num_layers"] = num_layers if "num_single_layers" in dummy_component_parameters: num_single_layers = 2 dummy_component_kwargs["num_single_layers"] = num_single_layers components = self.get_dummy_components(**dummy_component_kwargs) pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) self.faster_cache_config.current_timestep_callback = lambda: pipe.current_timestep pipe.transformer.enable_cache(self.faster_cache_config) expected_hooks = 0 if self.faster_cache_config.spatial_attention_block_skip_range is not None: expected_hooks += num_layers + num_single_layers if self.faster_cache_config.temporal_attention_block_skip_range is not None: expected_hooks += num_layers + num_single_layers # Check if faster_cache denoiser hook is attached denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet self.assertTrue( hasattr(denoiser, "_diffusers_hook") and isinstance(denoiser._diffusers_hook.get_hook(_FASTER_CACHE_DENOISER_HOOK), FasterCacheDenoiserHook), "Hook should be of type FasterCacheDenoiserHook.", ) # Check if all blocks have faster_cache block hook attached count = 0 for name, module in denoiser.named_modules(): if hasattr(module, "_diffusers_hook"): if name == "": # Skip the root denoiser module continue count += 1 self.assertTrue( isinstance(module._diffusers_hook.get_hook(_FASTER_CACHE_BLOCK_HOOK), FasterCacheBlockHook), "Hook should be of type FasterCacheBlockHook.", ) self.assertEqual(count, expected_hooks, "Number of hooks should match expected number.") # Perform inference to ensure that states are updated correctly def faster_cache_state_check_callback(pipe, i, t, kwargs): for name, module in denoiser.named_modules(): if not hasattr(module, "_diffusers_hook"): continue if name == "": # Root denoiser module state = module._diffusers_hook.get_hook(_FASTER_CACHE_DENOISER_HOOK).state if not self.faster_cache_config.is_guidance_distilled: self.assertTrue(state.low_frequency_delta is not None, "Low frequency delta should be set.") self.assertTrue(state.high_frequency_delta is not None, "High frequency delta should be set.") else: # Internal blocks state = module._diffusers_hook.get_hook(_FASTER_CACHE_BLOCK_HOOK).state self.assertTrue(state.cache is not None and len(state.cache) == 2, "Cache should be set.") self.assertTrue(state.iteration == i + 1, "Hook iteration state should have updated during inference.") return {} inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 4 inputs["callback_on_step_end"] = faster_cache_state_check_callback _ = pipe(**inputs)[0] # After inference, reset_stateful_hooks is called within the pipeline, which should have reset the states for name, module in denoiser.named_modules(): if not hasattr(module, "_diffusers_hook"): continue if name == "": # Root denoiser module state = module._diffusers_hook.get_hook(_FASTER_CACHE_DENOISER_HOOK).state self.assertTrue(state.iteration == 0, "Iteration should be reset to 0.") self.assertTrue(state.low_frequency_delta is None, "Low frequency delta should be reset to None.") self.assertTrue(state.high_frequency_delta is None, "High frequency delta should be reset to None.") else: # Internal blocks state = module._diffusers_hook.get_hook(_FASTER_CACHE_BLOCK_HOOK).state self.assertTrue(state.iteration == 0, "Iteration should be reset to 0.") self.assertTrue(state.batch_size is None, "Batch size should be reset to None.") self.assertTrue(state.cache is None, "Cache should be reset to None.") # TODO(aryan, dhruv): the cache tester mixins should probably be rewritten so that more models can be tested out # of the box once there is better cache support/implementation class FirstBlockCacheTesterMixin: # threshold is intentionally set higher than usual values since we're testing with random unconverged models # that will not satisfy the expected properties of the denoiser for caching to be effective first_block_cache_config = FirstBlockCacheConfig(threshold=0.8) def test_first_block_cache_inference(self, expected_atol: float = 0.1): device = "cpu" # ensure determinism for the device-dependent torch.Generator def create_pipe(): torch.manual_seed(0) num_layers = 2 components = self.get_dummy_components(num_layers=num_layers) pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) return pipe def run_forward(pipe): torch.manual_seed(0) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 4 return pipe(**inputs)[0] # Run inference without FirstBlockCache pipe = create_pipe() output = run_forward(pipe).flatten() original_image_slice = np.concatenate((output[:8], output[-8:])) # Run inference with FirstBlockCache enabled pipe = create_pipe() pipe.transformer.enable_cache(self.first_block_cache_config) output = run_forward(pipe).flatten() image_slice_fbc_enabled = np.concatenate((output[:8], output[-8:])) # Run inference with FirstBlockCache disabled pipe.transformer.disable_cache() output = run_forward(pipe).flatten() image_slice_fbc_disabled = np.concatenate((output[:8], output[-8:])) assert np.allclose(original_image_slice, image_slice_fbc_enabled, atol=expected_atol), ( "FirstBlockCache outputs should not differ much." ) assert np.allclose(original_image_slice, image_slice_fbc_disabled, atol=1e-4), ( "Outputs from normal inference and after disabling cache should not differ." ) # Some models (e.g. unCLIP) are extremely likely to significantly deviate depending on which hardware is used. # This helper function is used to check that the image doesn't deviate on average more than 10 pixels from a # reference image. def assert_mean_pixel_difference(image, expected_image, expected_max_diff=10): image = np.asarray(DiffusionPipeline.numpy_to_pil(image)[0], dtype=np.float32) expected_image = np.asarray(DiffusionPipeline.numpy_to_pil(expected_image)[0], dtype=np.float32) avg_diff = np.abs(image - expected_image).mean() assert avg_diff < expected_max_diff, f"Error image deviates {avg_diff} pixels on average"
diffusers/tests/pipelines/test_pipelines_common.py/0
{ "file_path": "diffusers/tests/pipelines/test_pipelines_common.py", "repo_id": "diffusers", "token_count": 57310 }
206
# coding=utf-8 # Copyright 2025 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import tempfile import unittest import numpy as np import pytest import safetensors.torch from huggingface_hub import hf_hub_download from PIL import Image from diffusers import ( BitsAndBytesConfig, DiffusionPipeline, FluxControlPipeline, FluxTransformer2DModel, SD3Transformer2DModel, ) from diffusers.quantizers import PipelineQuantizationConfig from diffusers.utils import is_accelerate_version, logging from diffusers.utils.testing_utils import ( CaptureLogger, backend_empty_cache, is_bitsandbytes_available, is_torch_available, is_transformers_available, load_pt, numpy_cosine_similarity_distance, require_accelerate, require_bitsandbytes_version_greater, require_peft_backend, require_torch, require_torch_accelerator, require_torch_version_greater, require_transformers_version_greater, slow, torch_device, ) from ..test_torch_compile_utils import QuantCompileTests def get_some_linear_layer(model): if model.__class__.__name__ in ["SD3Transformer2DModel", "FluxTransformer2DModel"]: return model.transformer_blocks[0].attn.to_q else: return NotImplementedError("Don't know what layer to retrieve here.") if is_transformers_available(): from transformers import BitsAndBytesConfig as BnbConfig from transformers import T5EncoderModel if is_torch_available(): import torch from ..utils import LoRALayer, get_memory_consumption_stat if is_bitsandbytes_available(): import bitsandbytes as bnb from diffusers.quantizers.bitsandbytes.utils import replace_with_bnb_linear @require_bitsandbytes_version_greater("0.43.2") @require_accelerate @require_torch @require_torch_accelerator @slow class Base4bitTests(unittest.TestCase): # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only SD3 to test our module model_name = "stabilityai/stable-diffusion-3-medium-diffusers" # This was obtained on audace so the number might slightly change expected_rel_difference = 3.69 expected_memory_saving_ratio = 0.8 prompt = "a beautiful sunset amidst the mountains." num_inference_steps = 10 seed = 0 @classmethod def setUpClass(cls): cls.is_deterministic_enabled = torch.are_deterministic_algorithms_enabled() if not cls.is_deterministic_enabled: torch.use_deterministic_algorithms(True) @classmethod def tearDownClass(cls): if not cls.is_deterministic_enabled: torch.use_deterministic_algorithms(False) def get_dummy_inputs(self): prompt_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt", torch_device, ) pooled_prompt_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/pooled_prompt_embeds.pt", torch_device, ) latent_model_input = load_pt( "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/latent_model_input.pt", torch_device, ) input_dict_for_transformer = { "hidden_states": latent_model_input, "encoder_hidden_states": prompt_embeds, "pooled_projections": pooled_prompt_embeds, "timestep": torch.Tensor([1.0]), "return_dict": False, } return input_dict_for_transformer class BnB4BitBasicTests(Base4bitTests): def setUp(self): gc.collect() backend_empty_cache(torch_device) # Models self.model_fp16 = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", torch_dtype=torch.float16 ) nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) self.model_4bit = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device ) def tearDown(self): if hasattr(self, "model_fp16"): del self.model_fp16 if hasattr(self, "model_4bit"): del self.model_4bit gc.collect() backend_empty_cache(torch_device) def test_quantization_num_parameters(self): r""" Test if the number of returned parameters is correct """ num_params_4bit = self.model_4bit.num_parameters() num_params_fp16 = self.model_fp16.num_parameters() self.assertEqual(num_params_4bit, num_params_fp16) def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_4bit.config self.assertTrue("quantization_config" in config) _ = config["quantization_config"].to_dict() _ = config["quantization_config"].to_diff_dict() _ = config["quantization_config"].to_json_string() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ mem_fp16 = self.model_fp16.get_memory_footprint() mem_4bit = self.model_4bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_4bit, self.expected_rel_difference, delta=1e-2) linear = get_some_linear_layer(self.model_4bit) self.assertTrue(linear.weight.__class__ == bnb.nn.Params4bit) def test_model_memory_usage(self): # Delete to not let anything interfere. del self.model_4bit, self.model_fp16 # Re-instantiate. inputs = self.get_dummy_inputs() inputs = { k: v.to(device=torch_device, dtype=torch.float16) for k, v in inputs.items() if not isinstance(v, bool) } model_fp16 = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", torch_dtype=torch.float16 ).to(torch_device) unquantized_model_memory = get_memory_consumption_stat(model_fp16, inputs) del model_fp16 nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) model_4bit = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", quantization_config=nf4_config, torch_dtype=torch.float16 ) quantized_model_memory = get_memory_consumption_stat(model_4bit, inputs) assert unquantized_model_memory / quantized_model_memory >= self.expected_memory_saving_ratio def test_original_dtype(self): r""" A simple test to check if the model successfully stores the original dtype """ self.assertTrue("_pre_quantization_dtype" in self.model_4bit.config) self.assertFalse("_pre_quantization_dtype" in self.model_fp16.config) self.assertTrue(self.model_4bit.config["_pre_quantization_dtype"] == torch.float16) def test_keep_modules_in_fp32(self): r""" A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32. Also ensures if inference works. """ fp32_modules = SD3Transformer2DModel._keep_in_fp32_modules SD3Transformer2DModel._keep_in_fp32_modules = ["proj_out"] nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) model = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device ) for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): if name in model._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.float32) else: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uint8) # test if inference works. with torch.no_grad() and torch.amp.autocast(torch_device, dtype=torch.float16): input_dict_for_transformer = self.get_dummy_inputs() model_inputs = { k: v.to(device=torch_device) for k, v in input_dict_for_transformer.items() if not isinstance(v, bool) } model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) _ = model(**model_inputs) SD3Transformer2DModel._keep_in_fp32_modules = fp32_modules def test_linear_are_4bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ self.model_fp16.get_memory_footprint() self.model_4bit.get_memory_footprint() for name, module in self.model_4bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["proj_out"]: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uint8) def test_config_from_pretrained(self): transformer_4bit = FluxTransformer2DModel.from_pretrained( "hf-internal-testing/flux.1-dev-nf4-pkg", subfolder="transformer" ) linear = get_some_linear_layer(transformer_4bit) self.assertTrue(linear.weight.__class__ == bnb.nn.Params4bit) self.assertTrue(hasattr(linear.weight, "quant_state")) self.assertTrue(linear.weight.quant_state.__class__ == bnb.functional.QuantState) def test_device_assignment(self): mem_before = self.model_4bit.get_memory_footprint() # Move to CPU self.model_4bit.to("cpu") self.assertEqual(self.model_4bit.device.type, "cpu") self.assertAlmostEqual(self.model_4bit.get_memory_footprint(), mem_before) # Move back to CUDA device for device in [0, f"{torch_device}", f"{torch_device}:0", "call()"]: if device == "call()": self.model_4bit.to(f"{torch_device}:0") else: self.model_4bit.to(device) self.assertEqual(self.model_4bit.device, torch.device(0)) self.assertAlmostEqual(self.model_4bit.get_memory_footprint(), mem_before) self.model_4bit.to("cpu") def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after converting it in 4-bit will throw an error. Checks also if other models are casted correctly. Device placement, however, is supported. """ with self.assertRaises(ValueError): # Tries with a `dtype` self.model_4bit.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` and `dtype` self.model_4bit.to(device=f"{torch_device}:0", dtype=torch.float16) with self.assertRaises(ValueError): # Tries with a cast self.model_4bit.float() with self.assertRaises(ValueError): # Tries with a cast self.model_4bit.half() # This should work self.model_4bit.to(torch_device) # Test if we did not break anything self.model_fp16 = self.model_fp16.to(dtype=torch.float32, device=torch_device) input_dict_for_transformer = self.get_dummy_inputs() model_inputs = { k: v.to(dtype=torch.float32, device=torch_device) for k, v in input_dict_for_transformer.items() if not isinstance(v, bool) } model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) with torch.no_grad(): _ = self.model_fp16(**model_inputs) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float() # Check that this does not throw an error _ = self.model_fp16.to(torch_device) def test_bnb_4bit_wrong_config(self): r""" Test whether creating a bnb config with unsupported values leads to errors. """ with self.assertRaises(ValueError): _ = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_storage="add") def test_bnb_4bit_errors_loading_incorrect_state_dict(self): r""" Test if loading with an incorrect state dict raises an error. """ with tempfile.TemporaryDirectory() as tmpdirname: nf4_config = BitsAndBytesConfig(load_in_4bit=True) model_4bit = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device ) model_4bit.save_pretrained(tmpdirname) del model_4bit with self.assertRaises(ValueError) as err_context: state_dict = safetensors.torch.load_file( os.path.join(tmpdirname, "diffusion_pytorch_model.safetensors") ) # corrupt the state dict key_to_target = "context_embedder.weight" # can be other keys too. compatible_param = state_dict[key_to_target] corrupted_param = torch.randn(compatible_param.shape[0] - 1, 1) state_dict[key_to_target] = bnb.nn.Params4bit(corrupted_param, requires_grad=False) safetensors.torch.save_file( state_dict, os.path.join(tmpdirname, "diffusion_pytorch_model.safetensors") ) _ = SD3Transformer2DModel.from_pretrained(tmpdirname) assert key_to_target in str(err_context.exception) def test_bnb_4bit_logs_warning_for_no_quantization(self): model_with_no_linear = torch.nn.Sequential(torch.nn.Conv2d(4, 4, 3), torch.nn.ReLU()) quantization_config = BitsAndBytesConfig(load_in_4bit=True) logger = logging.get_logger("diffusers.quantizers.bitsandbytes.utils") logger.setLevel(30) with CaptureLogger(logger) as cap_logger: _ = replace_with_bnb_linear(model_with_no_linear, quantization_config=quantization_config) assert ( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." in cap_logger.out ) class BnB4BitTrainingTests(Base4bitTests): def setUp(self): gc.collect() backend_empty_cache(torch_device) nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) self.model_4bit = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device ) def test_training(self): # Step 1: freeze all parameters for param in self.model_4bit.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in self.model_4bit.named_modules(): if "Attention" in repr(type(module)): module.to_k = LoRALayer(module.to_k, rank=4) module.to_q = LoRALayer(module.to_q, rank=4) module.to_v = LoRALayer(module.to_v, rank=4) # Step 3: dummy batch input_dict_for_transformer = self.get_dummy_inputs() model_inputs = { k: v.to(device=torch_device) for k, v in input_dict_for_transformer.items() if not isinstance(v, bool) } model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) # Step 4: Check if the gradient is not None with torch.amp.autocast(torch_device, dtype=torch.float16): out = self.model_4bit(**model_inputs)[0] out.norm().backward() for module in self.model_4bit.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) @require_transformers_version_greater("4.44.0") class SlowBnb4BitTests(Base4bitTests): def setUp(self) -> None: gc.collect() backend_empty_cache(torch_device) nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) model_4bit = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device ) self.pipeline_4bit = DiffusionPipeline.from_pretrained( self.model_name, transformer=model_4bit, torch_dtype=torch.float16 ) self.pipeline_4bit.enable_model_cpu_offload() def tearDown(self): del self.pipeline_4bit gc.collect() backend_empty_cache(torch_device) def test_quality(self): output = self.pipeline_4bit( prompt=self.prompt, num_inference_steps=self.num_inference_steps, generator=torch.manual_seed(self.seed), output_type="np", ).images out_slice = output[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.1123, 0.1296, 0.1609, 0.1042, 0.1230, 0.1274, 0.0928, 0.1165, 0.1216]) max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) self.assertTrue(max_diff < 1e-2) def test_generate_quality_dequantize(self): r""" Test that loading the model and unquantize it produce correct results. """ self.pipeline_4bit.transformer.dequantize() output = self.pipeline_4bit( prompt=self.prompt, num_inference_steps=self.num_inference_steps, generator=torch.manual_seed(self.seed), output_type="np", ).images out_slice = output[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.1216, 0.1387, 0.1584, 0.1152, 0.1318, 0.1282, 0.1062, 0.1226, 0.1228]) max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) self.assertTrue(max_diff < 1e-3) # Since we offloaded the `pipeline_4bit.transformer` to CPU (result of `enable_model_cpu_offload()), check # the following. self.assertTrue(self.pipeline_4bit.transformer.device.type == "cpu") # calling it again shouldn't be a problem _ = self.pipeline_4bit( prompt=self.prompt, num_inference_steps=2, generator=torch.manual_seed(self.seed), output_type="np", ).images def test_moving_to_cpu_throws_warning(self): nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) model_4bit = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device ) logger = logging.get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel(30) with CaptureLogger(logger) as cap_logger: # Because `model.dtype` will return torch.float16 as SD3 transformer has # a conv layer as the first layer. _ = DiffusionPipeline.from_pretrained( self.model_name, transformer=model_4bit, torch_dtype=torch.float16 ).to("cpu") assert "Pipelines loaded with `dtype=torch.float16`" in cap_logger.out @pytest.mark.xfail( condition=is_accelerate_version("<=", "1.1.1"), reason="Test will pass after https://github.com/huggingface/accelerate/pull/3223 is in a release.", strict=True, ) def test_pipeline_cuda_placement_works_with_nf4(self): transformer_nf4_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) transformer_4bit = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", quantization_config=transformer_nf4_config, torch_dtype=torch.float16, device_map=torch_device, ) text_encoder_3_nf4_config = BnbConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) text_encoder_3_4bit = T5EncoderModel.from_pretrained( self.model_name, subfolder="text_encoder_3", quantization_config=text_encoder_3_nf4_config, torch_dtype=torch.float16, device_map=torch_device, ) # CUDA device placement works. pipeline_4bit = DiffusionPipeline.from_pretrained( self.model_name, transformer=transformer_4bit, text_encoder_3=text_encoder_3_4bit, torch_dtype=torch.float16, ).to(torch_device) # Check if inference works. _ = pipeline_4bit(self.prompt, max_sequence_length=20, num_inference_steps=2) del pipeline_4bit def test_device_map(self): """ Test if the quantized model is working properly with "auto". cpu/disk offloading as well doesn't work with bnb. """ def get_dummy_tensor_inputs(device=None, seed: int = 0): batch_size = 1 num_latent_channels = 4 num_image_channels = 3 height = width = 4 sequence_length = 48 embedding_dim = 32 torch.manual_seed(seed) hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to( device, dtype=torch.bfloat16 ) torch.manual_seed(seed) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to( device, dtype=torch.bfloat16 ) torch.manual_seed(seed) pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) text_ids = torch.randn((sequence_length, num_image_channels)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) image_ids = torch.randn((height * width, num_image_channels)).to(device, dtype=torch.bfloat16) timestep = torch.tensor([1.0]).to(device, dtype=torch.bfloat16).expand(batch_size) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_prompt_embeds, "txt_ids": text_ids, "img_ids": image_ids, "timestep": timestep, } inputs = get_dummy_tensor_inputs(torch_device) expected_slice = np.array( [0.47070312, 0.00390625, -0.03662109, -0.19628906, -0.53125, 0.5234375, -0.17089844, -0.59375, 0.578125] ) # non sharded quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16 ) quantized_model = FluxTransformer2DModel.from_pretrained( "hf-internal-testing/tiny-flux-pipe", subfolder="transformer", quantization_config=quantization_config, device_map="auto", torch_dtype=torch.bfloat16, ) weight = quantized_model.transformer_blocks[0].ff.net[2].weight self.assertTrue(isinstance(weight, bnb.nn.modules.Params4bit)) output = quantized_model(**inputs)[0] output_slice = output.flatten()[-9:].detach().float().cpu().numpy() self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) # sharded quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16 ) quantized_model = FluxTransformer2DModel.from_pretrained( "hf-internal-testing/tiny-flux-sharded", subfolder="transformer", quantization_config=quantization_config, device_map="auto", torch_dtype=torch.bfloat16, ) weight = quantized_model.transformer_blocks[0].ff.net[2].weight self.assertTrue(isinstance(weight, bnb.nn.modules.Params4bit)) output = quantized_model(**inputs)[0] output_slice = output.flatten()[-9:].detach().float().cpu().numpy() self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) @require_transformers_version_greater("4.44.0") class SlowBnb4BitFluxTests(Base4bitTests): def setUp(self) -> None: gc.collect() backend_empty_cache(torch_device) model_id = "hf-internal-testing/flux.1-dev-nf4-pkg" t5_4bit = T5EncoderModel.from_pretrained(model_id, subfolder="text_encoder_2") transformer_4bit = FluxTransformer2DModel.from_pretrained(model_id, subfolder="transformer") self.pipeline_4bit = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", text_encoder_2=t5_4bit, transformer=transformer_4bit, torch_dtype=torch.float16, ) self.pipeline_4bit.enable_model_cpu_offload() def tearDown(self): del self.pipeline_4bit gc.collect() backend_empty_cache(torch_device) def test_quality(self): # keep the resolution and max tokens to a lower number for faster execution. output = self.pipeline_4bit( prompt=self.prompt, num_inference_steps=self.num_inference_steps, generator=torch.manual_seed(self.seed), height=256, width=256, max_sequence_length=64, output_type="np", ).images out_slice = output[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.0583, 0.0586, 0.0632, 0.0815, 0.0813, 0.0947, 0.1040, 0.1145, 0.1265]) max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) self.assertTrue(max_diff < 1e-3) @require_peft_backend def test_lora_loading(self): self.pipeline_4bit.load_lora_weights( hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd" ) self.pipeline_4bit.set_adapters("hyper-sd", adapter_weights=0.125) output = self.pipeline_4bit( prompt=self.prompt, height=256, width=256, max_sequence_length=64, output_type="np", num_inference_steps=8, generator=torch.Generator().manual_seed(42), ).images out_slice = output[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.5347, 0.5342, 0.5283, 0.5093, 0.4988, 0.5093, 0.5044, 0.5015, 0.4946]) max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) self.assertTrue(max_diff < 1e-3) @require_transformers_version_greater("4.44.0") @require_peft_backend class SlowBnb4BitFluxControlWithLoraTests(Base4bitTests): def setUp(self) -> None: gc.collect() backend_empty_cache(torch_device) self.pipeline_4bit = FluxControlPipeline.from_pretrained("eramth/flux-4bit", torch_dtype=torch.float16) self.pipeline_4bit.enable_model_cpu_offload() def tearDown(self): del self.pipeline_4bit gc.collect() backend_empty_cache(torch_device) def test_lora_loading(self): self.pipeline_4bit.load_lora_weights("black-forest-labs/FLUX.1-Canny-dev-lora") output = self.pipeline_4bit( prompt=self.prompt, control_image=Image.new(mode="RGB", size=(256, 256)), height=256, width=256, max_sequence_length=64, output_type="np", num_inference_steps=8, generator=torch.Generator().manual_seed(42), ).images out_slice = output[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.1636, 0.1675, 0.1982, 0.1743, 0.1809, 0.1936, 0.1743, 0.2095, 0.2139]) max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) self.assertTrue(max_diff < 1e-3, msg=f"{out_slice=} != {expected_slice=}") @slow class BaseBnb4BitSerializationTests(Base4bitTests): def tearDown(self): gc.collect() backend_empty_cache(torch_device) def test_serialization(self, quant_type="nf4", double_quant=True, safe_serialization=True): r""" Test whether it is possible to serialize a model in 4-bit. Uses most typical params as default. See ExtendedSerializationTest class for more params combinations. """ self.quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type=quant_type, bnb_4bit_use_double_quant=double_quant, bnb_4bit_compute_dtype=torch.bfloat16, ) model_0 = SD3Transformer2DModel.from_pretrained( self.model_name, subfolder="transformer", quantization_config=self.quantization_config, device_map=torch_device, ) self.assertTrue("_pre_quantization_dtype" in model_0.config) with tempfile.TemporaryDirectory() as tmpdirname: model_0.save_pretrained(tmpdirname, safe_serialization=safe_serialization) config = SD3Transformer2DModel.load_config(tmpdirname) self.assertTrue("quantization_config" in config) self.assertTrue("_pre_quantization_dtype" not in config) model_1 = SD3Transformer2DModel.from_pretrained(tmpdirname) # checking quantized linear module weight linear = get_some_linear_layer(model_1) self.assertTrue(linear.weight.__class__ == bnb.nn.Params4bit) self.assertTrue(hasattr(linear.weight, "quant_state")) self.assertTrue(linear.weight.quant_state.__class__ == bnb.functional.QuantState) # checking memory footpring self.assertAlmostEqual(model_0.get_memory_footprint() / model_1.get_memory_footprint(), 1, places=2) # Matching all parameters and their quant_state items: d0 = dict(model_0.named_parameters()) d1 = dict(model_1.named_parameters()) self.assertTrue(d0.keys() == d1.keys()) for k in d0.keys(): self.assertTrue(d0[k].shape == d1[k].shape) self.assertTrue(d0[k].device.type == d1[k].device.type) self.assertTrue(d0[k].device == d1[k].device) self.assertTrue(d0[k].dtype == d1[k].dtype) self.assertTrue(torch.equal(d0[k], d1[k].to(d0[k].device))) if isinstance(d0[k], bnb.nn.modules.Params4bit): for v0, v1 in zip( d0[k].quant_state.as_dict().values(), d1[k].quant_state.as_dict().values(), ): if isinstance(v0, torch.Tensor): self.assertTrue(torch.equal(v0, v1.to(v0.device))) else: self.assertTrue(v0 == v1) # comparing forward() outputs dummy_inputs = self.get_dummy_inputs() inputs = {k: v.to(torch_device) for k, v in dummy_inputs.items() if isinstance(v, torch.Tensor)} inputs.update({k: v for k, v in dummy_inputs.items() if k not in inputs}) out_0 = model_0(**inputs)[0] out_1 = model_1(**inputs)[0] self.assertTrue(torch.equal(out_0, out_1)) class ExtendedSerializationTest(BaseBnb4BitSerializationTests): """ tests more combinations of parameters """ def test_nf4_single_unsafe(self): self.test_serialization(quant_type="nf4", double_quant=False, safe_serialization=False) def test_nf4_single_safe(self): self.test_serialization(quant_type="nf4", double_quant=False, safe_serialization=True) def test_nf4_double_unsafe(self): self.test_serialization(quant_type="nf4", double_quant=True, safe_serialization=False) # nf4 double safetensors quantization is tested in test_serialization() method from the parent class def test_fp4_single_unsafe(self): self.test_serialization(quant_type="fp4", double_quant=False, safe_serialization=False) def test_fp4_single_safe(self): self.test_serialization(quant_type="fp4", double_quant=False, safe_serialization=True) def test_fp4_double_unsafe(self): self.test_serialization(quant_type="fp4", double_quant=True, safe_serialization=False) def test_fp4_double_safe(self): self.test_serialization(quant_type="fp4", double_quant=True, safe_serialization=True) @require_torch_version_greater("2.7.1") @require_bitsandbytes_version_greater("0.45.5") class Bnb4BitCompileTests(QuantCompileTests, unittest.TestCase): @property def quantization_config(self): return PipelineQuantizationConfig( quant_backend="bitsandbytes_4bit", quant_kwargs={ "load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16, }, components_to_quantize=["transformer", "text_encoder_2"], ) @require_bitsandbytes_version_greater("0.46.1") def test_torch_compile(self): torch._dynamo.config.capture_dynamic_output_shape_ops = True super().test_torch_compile() def test_torch_compile_with_group_offload_leaf(self): super()._test_torch_compile_with_group_offload_leaf(use_stream=True)
diffusers/tests/quantization/bnb/test_4bit.py/0
{ "file_path": "diffusers/tests/quantization/bnb/test_4bit.py", "repo_id": "diffusers", "token_count": 16303 }
207
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class CMStochasticIterativeSchedulerTest(SchedulerCommonTest): scheduler_classes = (CMStochasticIterativeScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**kwargs) return config # Override test_step_shape to add CMStochasticIterativeScheduler-specific logic regarding timesteps # Problem is that we don't know two timesteps that will always be in the timestep schedule from only the scheduler # config; scaled sigma_max is always in the timestep schedule, but sigma_min is in the sigma schedule while scaled # sigma_min is not in the timestep schedule def test_step_shape(self): num_inference_steps = 10 scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timestep_0 = scheduler.timesteps[0] timestep_1 = scheduler.timesteps[1] sample = self.dummy_sample residual = 0.1 * sample output_0 = scheduler.step(residual, timestep_0, sample).prev_sample output_1 = scheduler.step(residual, timestep_1, sample).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_clip_denoised(self): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=clip_denoised) def test_full_loop_no_noise_onestep(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 1 scheduler.set_timesteps(num_inference_steps) timesteps = scheduler.timesteps generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(timesteps): # 1. scale model input scaled_sample = scheduler.scale_model_input(sample, t) # 2. predict noise residual residual = model(scaled_sample, t) # 3. predict previous sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 192.7614) < 1e-2 assert abs(result_mean.item() - 0.2510) < 1e-3 def test_full_loop_no_noise_multistep(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [106, 0] scheduler.set_timesteps(timesteps=timesteps) timesteps = scheduler.timesteps generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input scaled_sample = scheduler.scale_model_input(sample, t) # 2. predict noise residual residual = model(scaled_sample, t) # 3. predict previous sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 347.6357) < 1e-2 assert abs(result_mean.item() - 0.4527) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_inference_steps = 10 t_start = 8 scheduler.set_timesteps(num_inference_steps) timesteps = scheduler.timesteps generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: # 1. scale model input scaled_sample = scheduler.scale_model_input(sample, t) # 2. predict noise residual residual = model(scaled_sample, t) # 3. predict previous sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 763.9186) < 1e-2, f" expected result sum 763.9186, but get {result_sum}" assert abs(result_mean.item() - 0.9947) < 1e-3, f" expected result mean 0.9947, but get {result_mean}" def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [39, 30, 12, 15, 0] with self.assertRaises(ValueError, msg="`timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [39, 30, 12, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps)
diffusers/tests/schedulers/test_scheduler_consistency_model.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_consistency_model.py", "repo_id": "diffusers", "token_count": 3029 }
208
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import ( Lumina2Transformer2DModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, torch_device, ) enable_full_determinism() @require_torch_accelerator class Lumina2Transformer2DModelSingleFileTests(unittest.TestCase): model_class = Lumina2Transformer2DModel ckpt_path = "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors" alternate_keys_ckpt_paths = [ "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors" ] repo_id = "Alpha-VLLM/Lumina-Image-2.0" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_single_file_components(self): model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer") model_single_file = self.model_class.from_single_file(self.ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between single file loading and pretrained loading" ) def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: backend_empty_cache(torch_device) model = self.model_class.from_single_file(ckpt_path) del model gc.collect() backend_empty_cache(torch_device)
diffusers/tests/single_file/test_lumina2_transformer.py/0
{ "file_path": "diffusers/tests/single_file/test_lumina2_transformer.py", "repo_id": "diffusers", "token_count": 1016 }
209
import gc import unittest import pytest import torch from diffusers import ( StableDiffusionUpscalePipeline, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from .single_file_testing_utils import SDSingleFileTesterMixin enable_full_determinism() @slow @require_torch_accelerator class StableDiffusionUpscalePipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionUpscalePipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/blob/main/x4-upscaler-ema.safetensors" original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml" repo_id = "stabilityai/stable-diffusion-x4-upscaler" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_single_file_format_inference_is_same_as_pretrained(self): image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) prompt = "a cat sitting on a park bench" pipe = StableDiffusionUpscalePipeline.from_pretrained(self.repo_id) pipe.enable_model_cpu_offload(device=torch_device) generator = torch.Generator("cpu").manual_seed(0) output = pipe(prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3) image_from_pretrained = output.images[0] pipe_from_single_file = StableDiffusionUpscalePipeline.from_single_file(self.ckpt_path) pipe_from_single_file.enable_model_cpu_offload(device=torch_device) generator = torch.Generator("cpu").manual_seed(0) output_from_single_file = pipe_from_single_file( prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3 ) image_from_single_file = output_from_single_file.images[0] assert image_from_pretrained.shape == (512, 512, 3) assert image_from_single_file.shape == (512, 512, 3) assert ( numpy_cosine_similarity_distance(image_from_pretrained.flatten(), image_from_single_file.flatten()) < 1e-3 ) @pytest.mark.xfail( condition=True, reason="Test fails because of mismatches in the configs but it is very hard to properly fix this considering downstream usecase.", strict=True, ) def test_single_file_components_with_original_config(self): super().test_single_file_components_with_original_config() @pytest.mark.xfail( condition=True, reason="Test fails because of mismatches in the configs but it is very hard to properly fix this considering downstream usecase.", strict=True, ) def test_single_file_components_with_original_config_local_files_only(self): super().test_single_file_components_with_original_config_local_files_only()
diffusers/tests/single_file/test_stable_diffusion_upscale_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_stable_diffusion_upscale_single_file.py", "repo_id": "diffusers", "token_count": 1323 }
210
import argparse import inspect import sys from pathlib import Path from typing import List, Type root_dir = Path(__file__).parent.parent.absolute() sys.path.insert(0, str(root_dir)) parser = argparse.ArgumentParser() parser.add_argument("--type", type=str, default=None) args = parser.parse_args() def get_test_methods_from_class(cls: Type) -> List[str]: """ Get all test method names from a given class. Only returns methods that start with 'test_'. """ test_methods = [] for name, obj in inspect.getmembers(cls): if name.startswith("test_") and inspect.isfunction(obj): test_methods.append(name) return sorted(test_methods) def generate_pytest_pattern(test_methods: List[str]) -> str: """Generate pytest pattern string for the -k flag.""" return " or ".join(test_methods) def generate_pattern_for_mixin(mixin_class: Type) -> str: """ Generate pytest pattern for a specific mixin class. """ if mixin_cls is None: return "" test_methods = get_test_methods_from_class(mixin_class) return generate_pytest_pattern(test_methods) if __name__ == "__main__": mixin_cls = None if args.type == "pipeline": from tests.pipelines.test_pipelines_common import PipelineTesterMixin mixin_cls = PipelineTesterMixin elif args.type == "models": from tests.models.test_modeling_common import ModelTesterMixin mixin_cls = ModelTesterMixin elif args.type == "lora": from tests.lora.utils import PeftLoraLoaderMixinTests mixin_cls = PeftLoraLoaderMixinTests pattern = generate_pattern_for_mixin(mixin_cls) print(pattern)
diffusers/utils/extract_tests_from_mixin.py/0
{ "file_path": "diffusers/utils/extract_tests_from_mixin.py", "repo_id": "diffusers", "token_count": 642 }
211
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This Dockerfile is designed for a lerobot user who wants to # experiment with the project. It starts from an Python Slim base image. # docker build -f docker/Dockerfile.user -t lerobot-user . # docker run -it --rm lerobot-user # Configure the base image ARG PYTHON_VERSION=3.10 FROM python:${PYTHON_VERSION}-slim # Configure environment variables ENV DEBIAN_FRONTEND=noninteractive \ MUJOCO_GL=egl \ PATH=/lerobot/.venv/bin:$PATH # Install system dependencies and uv (as root) RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential git curl libglib2.0-0 libegl1-mesa-dev ffmpeg \ libusb-1.0-0-dev speech-dispatcher libgeos-dev portaudio19-dev \ && curl -LsSf https://astral.sh/uv/install.sh | sh \ && mv /root/.local/bin/uv /usr/local/bin/uv \ && useradd --create-home --shell /bin/bash user_lerobot \ && usermod -aG sudo user_lerobot \ && apt-get clean && rm -rf /var/lib/apt/lists/* # Create application directory and set permissions WORKDIR /lerobot RUN chown -R user_lerobot:user_lerobot /lerobot # Switch to the non-root user USER user_lerobot # Environment variables for the testing ENV HOME=/home/user_lerobot \ HF_HOME=/home/user_lerobot/.cache/huggingface \ HF_LEROBOT_HOME=/home/user_lerobot/.cache/huggingface/lerobot \ TORCH_HOME=/home/user_lerobot/.cache/torch \ TRITON_CACHE_DIR=/home/user_lerobot/.cache/triton # Create the virtual environment # We use a virtual environment inside the container—even though the container itself \ # provides isolation—to closely resemble local development and allow users to \ # run other Python projects in the same container without dependency conflicts. RUN uv venv # Install Python dependencies for caching COPY --chown=user_lerobot:user_lerobot pyproject.toml README.md MANIFEST.in ./ COPY --chown=user_lerobot:user_lerobot src/ src/ RUN uv pip install --no-cache ".[all]" # Copy the rest of the application code # Make sure to have the git-LFS files for testing COPY --chown=user_lerobot:user_lerobot . . # Set the default command CMD ["/bin/bash"]
lerobot/docker/Dockerfile.user/0
{ "file_path": "lerobot/docker/Dockerfile.user", "repo_id": "lerobot", "token_count": 905 }
212
# Koch v1.1 In the steps below, we explain how to assemble the Koch v1.1 robot. ## Order and assemble the parts Follow the sourcing and assembling instructions provided in this [README](https://github.com/jess-moss/koch-v1-1). This will guide you through setting up both the follower and leader arms, as shown in the image below. For a visual walkthrough of the assembly process, you can refer to [this video tutorial](https://youtu.be/8nQIg9BwwTk). > [!WARNING] > Since the production of this video, we simplified the configuration phase. Because of this, two things differ from the instructions in that video: > > - Don't plug in all the motor cables right away and wait to be instructed to do so in [Configure the motors](#configure-the-motors). > - Don't screw in the controller board (PCB) to the base right away and wait for being instructed to do so in [Configure the motors](#configure-the-motors). ## Install LeRobot 🤗 To install LeRobot follow, our [Installation Guide](./installation) In addition to these instructions, you need to install the Dynamixel SDK: ```bash pip install -e ".[dynamixel]" ``` ## Configure the motors ### 1. Find the USB ports associated with each arm To find the port for each bus servo adapter, run this script: ```bash lerobot-find-port ``` <hfoptions id="example"> <hfoption id="Mac"> Example output: ``` Finding all available ports for the MotorBus. ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] Remove the USB cable from your MotorsBus and press Enter when done. [...Disconnect corresponding leader or follower arm and press Enter...] The port of this MotorsBus is /dev/tty.usbmodem575E0032081 Reconnect the USB cable. ``` Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your leader or follower arm. </hfoption> <hfoption id="Linux"> On Linux, you might need to give access to the USB ports by running: ```bash sudo chmod 666 /dev/ttyACM0 sudo chmod 666 /dev/ttyACM1 ``` Example output: ``` Finding all available ports for the MotorBus. ['/dev/ttyACM0', '/dev/ttyACM1'] Remove the usb cable from your MotorsBus and press Enter when done. [...Disconnect corresponding leader or follower arm and press Enter...] The port of this MotorsBus is /dev/ttyACM1 Reconnect the USB cable. ``` Where the found port is: `/dev/ttyACM1` corresponding to your leader or follower arm. </hfoption> </hfoptions> ### 2. Set the motors ids and baudrates Each motor is identified by a unique id on the bus. When brand new, motors usually come with a default id of `1`. For the communication to work properly between the motors and the controller, we first need to set a unique, different id to each motor. Additionally, the speed at which data is transmitted on the bus is determined by the baudrate. In order to talk to each other, the controller and all the motors need to be configured with the same baudrate. To that end, we first need to connect to each motor individually with the controller in order to set these. Since we will write these parameters in the non-volatile section of the motors' internal memory (EEPROM), we'll only need to do this once. If you are repurposing motors from another robot, you will probably also need to perform this step, as the ids and baudrate likely won't match. #### Follower Connect the usb cable from your computer and the 5V power supply to the follower arm's controller board. Then, run the following command or run the API example with the port you got from the previous step. You'll also need to give your leader arm a name with the `id` parameter. For a visual reference on how to set the motor ids please refer to [this video](https://huggingface.co/docs/lerobot/en/so101#setup-motors-video) where we follow the process for the SO101 arm. <hfoptions id="setup_motors"> <hfoption id="Command"> ```bash lerobot-setup-motors \ --robot.type=koch_follower \ --robot.port=/dev/tty.usbmodem575E0031751 # <- paste here the port found at previous step ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python from lerobot.robots.koch_follower import KochFollower, KochFollowerConfig config = KochFollowerConfig( port="/dev/tty.usbmodem575E0031751", id="my_awesome_follower_arm", ) follower = KochFollower(config) follower.setup_motors() ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> You should see the following instruction. ``` Connect the controller board to the 'gripper' motor only and press enter. ``` As instructed, plug the gripper's motor. Make sure it's the only motor connected to the board, and that the motor itself is not yet daisy-chained to any other motor. As you press `[Enter]`, the script will automatically set the id and baudrate for that motor. <details> <summary>Troubleshooting</summary> If you get an error at that point, check your cables and make sure they are plugged in properly: <ul> <li>Power supply</li> <li>USB cable between your computer and the controller board</li> <li>The 3-pin cable from the controller board to the motor</li> </ul> If you are using a Waveshare controller board, make sure that the two jumpers are set on the `B` channel (USB). </details> You should then see the following message: ``` 'gripper' motor id set to 6 ``` Followed by the next instruction: ``` Connect the controller board to the 'wrist_roll' motor only and press enter. ``` You can disconnect the 3-pin cable from the controller board but you can leave it connected to the gripper motor on the other end as it will already be in the right place. Now, plug in another 3-pin cable to the wrist roll motor and connect it to the controller board. As with the previous motor, make sure it is the only motor connected to the board and that the motor itself isn't connected to any other one. Repeat the operation for each motor as instructed. > [!TIP] > Check your cabling at each step before pressing Enter. For instance, the power supply cable might disconnect as you manipulate the board. When you are done, the script will simply finish, at which point the motors are ready to be used. You can now plug the 3-pin cable from each motor to the next one, and the cable from the first motor (the 'shoulder pan' with id=1) to the controller board, which can now be attached to the base of the arm. #### Leader Do the same steps for the leader arm but modify the command or script accordingly. <hfoptions id="setup_motors"> <hfoption id="Command"> ```bash lerobot-setup-motors \ --teleop.type=koch_leader \ --teleop.port=/dev/tty.usbmodem575E0031751 \ # <- paste here the port found at previous step ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python from lerobot.teleoperators.koch_leader import KochLeader, KochLeaderConfig config = KochLeaderConfig( port="/dev/tty.usbmodem575E0031751", id="my_awesome_leader_arm", ) leader = KochLeader(config) leader.setup_motors() ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> ## Calibrate Next, you'll need to calibrate your robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. The calibration process is very important because it allows a neural network trained on one robot to work on another. #### Follower Run the following command or API example to calibrate the follower arm: <hfoptions id="calibrate_follower"> <hfoption id="Command"> ```bash lerobot-calibrate \ --robot.type=koch_follower \ --robot.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot --robot.id=my_awesome_follower_arm # <- Give the robot a unique name ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python from lerobot.robots.koch_follower import KochFollowerConfig, KochFollower config = KochFollowerConfig( port="/dev/tty.usbmodem585A0076891", id="my_awesome_follower_arm", ) follower = KochFollower(config) follower.connect(calibrate=False) follower.calibrate() follower.disconnect() ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> We unified the calibration method for most robots. Thus, the calibration steps for this Koch arm are the same as the steps for the SO100 and SO101. First, we have to move the robot to the position where each joint is in the middle of its range, then we press `Enter`. Secondly, we move all joints through their full range of motion. A video of this same process for the SO101 as reference can be found [here](https://huggingface.co/docs/lerobot/en/so101#calibration-video). #### Leader Do the same steps to calibrate the leader arm, run the following command or API example: <hfoptions id="calibrate_leader"> <hfoption id="Command"> ```bash lerobot-calibrate \ --teleop.type=koch_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ # <- The port of your robot --teleop.id=my_awesome_leader_arm # <- Give the robot a unique name ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python from lerobot.teleoperators.koch_leader import KochLeaderConfig, KochLeader config = KochLeaderConfig( port="/dev/tty.usbmodem575E0031751", id="my_awesome_leader_arm", ) leader = KochLeader(config) leader.connect(calibrate=False) leader.calibrate() leader.disconnect() ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./getting_started_real_world_robot) > [!TIP] > If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb).
lerobot/docs/source/koch.mdx/0
{ "file_path": "lerobot/docs/source/koch.mdx", "repo_id": "lerobot", "token_count": 2924 }
213
from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets.utils import hw_to_dataset_features from lerobot.policies.act.modeling_act import ACTPolicy from lerobot.record import record_loop from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig from lerobot.utils.control_utils import init_keyboard_listener from lerobot.utils.utils import log_say from lerobot.utils.visualization_utils import _init_rerun NUM_EPISODES = 2 FPS = 30 EPISODE_TIME_SEC = 60 TASK_DESCRIPTION = "My task description" # Create the robot and teleoperator configurations robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi") robot = LeKiwiClient(robot_config) policy = ACTPolicy.from_pretrained("<hf_username>/<policy_repo_id>") # Configure the dataset features action_features = hw_to_dataset_features(robot.action_features, "action") obs_features = hw_to_dataset_features(robot.observation_features, "observation") dataset_features = {**action_features, **obs_features} # Create the dataset dataset = LeRobotDataset.create( repo_id="<hf_username>/<eval_dataset_repo_id>", fps=FPS, features=dataset_features, robot_type=robot.name, use_videos=True, image_writer_threads=4, ) # To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi` robot.connect() _init_rerun(session_name="recording") listener, events = init_keyboard_listener() if not robot.is_connected: raise ValueError("Robot is not connected!") recorded_episodes = 0 while recorded_episodes < NUM_EPISODES and not events["stop_recording"]: log_say(f"Running inference, recording eval episode {recorded_episodes} of {NUM_EPISODES}") # Run the policy inference loop record_loop( robot=robot, events=events, fps=FPS, policy=policy, dataset=dataset, control_time_s=EPISODE_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, ) # Logic for reset env if not events["stop_recording"] and ( (recorded_episodes < NUM_EPISODES - 1) or events["rerecord_episode"] ): log_say("Reset the environment") record_loop( robot=robot, events=events, fps=FPS, control_time_s=EPISODE_TIME_SEC, single_task=TASK_DESCRIPTION, display_data=True, ) if events["rerecord_episode"]: log_say("Re-record episode") events["rerecord_episode"] = False events["exit_early"] = False dataset.clear_episode_buffer() continue dataset.save_episode() recorded_episodes += 1 # Upload to hub and clean up dataset.push_to_hub() robot.disconnect() listener.stop()
lerobot/examples/lekiwi/evaluate.py/0
{ "file_path": "lerobot/examples/lekiwi/evaluate.py", "repo_id": "lerobot", "token_count": 1146 }
214
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from lerobot import ( policies, # noqa: F401 ) from lerobot.datasets.transforms import ImageTransformsConfig from lerobot.datasets.video_utils import get_safe_default_codec @dataclass class DatasetConfig: # You may provide a list of datasets here. `train.py` creates them all and concatenates them. Note: only data # keys common between the datasets are kept. Each dataset gets and additional transform that inserts the # "dataset_index" into the returned item. The index mapping is made according to the order in which the # datasets are provided. repo_id: str # Root directory where the dataset will be stored (e.g. 'dataset/path'). root: str | None = None episodes: list[int] | None = None image_transforms: ImageTransformsConfig = field(default_factory=ImageTransformsConfig) revision: str | None = None use_imagenet_stats: bool = True video_backend: str = field(default_factory=get_safe_default_codec) @dataclass class WandBConfig: enable: bool = False # Set to true to disable saving an artifact despite training.save_checkpoint=True disable_artifact: bool = False project: str = "lerobot" entity: str | None = None notes: str | None = None run_id: str | None = None mode: str | None = None # Allowed values: 'online', 'offline' 'disabled'. Defaults to 'online' @dataclass class EvalConfig: n_episodes: int = 50 # `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv. batch_size: int = 50 # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). use_async_envs: bool = False def __post_init__(self): if self.batch_size > self.n_episodes: raise ValueError( "The eval batch size is greater than the number of eval episodes " f"({self.batch_size} > {self.n_episodes}). As a result, {self.batch_size} " f"eval environments will be instantiated, but only {self.n_episodes} will be used. " "This might significantly slow down evaluation. To fix this, you should update your command " f"to increase the number of episodes to match the batch size (e.g. `eval.n_episodes={self.batch_size}`), " f"or lower the batch size (e.g. `eval.batch_size={self.n_episodes}`)." )
lerobot/src/lerobot/configs/default.py/0
{ "file_path": "lerobot/src/lerobot/configs/default.py", "repo_id": "lerobot", "token_count": 1038 }
215
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from collections.abc import Callable, Sequence from dataclasses import dataclass, field from typing import Any import torch from torchvision.transforms import v2 from torchvision.transforms.v2 import ( Transform, functional as F, # noqa: N812 ) class RandomSubsetApply(Transform): """Apply a random subset of N transformations from a list of transformations. Args: transforms: list of transformations. p: represents the multinomial probabilities (with no replacement) used for sampling the transform. If the sum of the weights is not 1, they will be normalized. If ``None`` (default), all transforms have the same probability. n_subset: number of transformations to apply. If ``None``, all transforms are applied. Must be in [1, len(transforms)]. random_order: apply transformations in a random order. """ def __init__( self, transforms: Sequence[Callable], p: list[float] | None = None, n_subset: int | None = None, random_order: bool = False, ) -> None: super().__init__() if not isinstance(transforms, Sequence): raise TypeError("Argument transforms should be a sequence of callables") if p is None: p = [1] * len(transforms) elif len(p) != len(transforms): raise ValueError( f"Length of p doesn't match the number of transforms: {len(p)} != {len(transforms)}" ) if n_subset is None: n_subset = len(transforms) elif not isinstance(n_subset, int): raise TypeError("n_subset should be an int or None") elif not (1 <= n_subset <= len(transforms)): raise ValueError(f"n_subset should be in the interval [1, {len(transforms)}]") self.transforms = transforms total = sum(p) self.p = [prob / total for prob in p] self.n_subset = n_subset self.random_order = random_order self.selected_transforms = None def forward(self, *inputs: Any) -> Any: needs_unpacking = len(inputs) > 1 selected_indices = torch.multinomial(torch.tensor(self.p), self.n_subset) if not self.random_order: selected_indices = selected_indices.sort().values self.selected_transforms = [self.transforms[i] for i in selected_indices] for transform in self.selected_transforms: outputs = transform(*inputs) inputs = outputs if needs_unpacking else (outputs,) return outputs def extra_repr(self) -> str: return ( f"transforms={self.transforms}, " f"p={self.p}, " f"n_subset={self.n_subset}, " f"random_order={self.random_order}" ) class SharpnessJitter(Transform): """Randomly change the sharpness of an image or video. Similar to a v2.RandomAdjustSharpness with p=1 and a sharpness_factor sampled randomly. While v2.RandomAdjustSharpness applies — with a given probability — a fixed sharpness_factor to an image, SharpnessJitter applies a random sharpness_factor each time. This is to have a more diverse set of augmentations as a result. A sharpness_factor of 0 gives a blurred image, 1 gives the original image while 2 increases the sharpness by a factor of 2. If the input is a :class:`torch.Tensor`, it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. Args: sharpness: How much to jitter sharpness. sharpness_factor is chosen uniformly from [max(0, 1 - sharpness), 1 + sharpness] or the given [min, max]. Should be non negative numbers. """ def __init__(self, sharpness: float | Sequence[float]) -> None: super().__init__() self.sharpness = self._check_input(sharpness) def _check_input(self, sharpness): if isinstance(sharpness, (int, float)): if sharpness < 0: raise ValueError("If sharpness is a single number, it must be non negative.") sharpness = [1.0 - sharpness, 1.0 + sharpness] sharpness[0] = max(sharpness[0], 0.0) elif isinstance(sharpness, collections.abc.Sequence) and len(sharpness) == 2: sharpness = [float(v) for v in sharpness] else: raise TypeError(f"{sharpness=} should be a single number or a sequence with length 2.") if not 0.0 <= sharpness[0] <= sharpness[1]: raise ValueError(f"sharpness values should be between (0., inf), but got {sharpness}.") return float(sharpness[0]), float(sharpness[1]) def make_params(self, flat_inputs: list[Any]) -> dict[str, Any]: sharpness_factor = torch.empty(1).uniform_(self.sharpness[0], self.sharpness[1]).item() return {"sharpness_factor": sharpness_factor} def transform(self, inpt: Any, params: dict[str, Any]) -> Any: sharpness_factor = params["sharpness_factor"] return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=sharpness_factor) @dataclass class ImageTransformConfig: """ For each transform, the following parameters are available: weight: This represents the multinomial probability (with no replacement) used for sampling the transform. If the sum of the weights is not 1, they will be normalized. type: The name of the class used. This is either a class available under torchvision.transforms.v2 or a custom transform defined here. kwargs: Lower & upper bound respectively used for sampling the transform's parameter (following uniform distribution) when it's applied. """ weight: float = 1.0 type: str = "Identity" kwargs: dict[str, Any] = field(default_factory=dict) @dataclass class ImageTransformsConfig: """ These transforms are all using standard torchvision.transforms.v2 You can find out how these transformations affect images here: https://pytorch.org/vision/0.18/auto_examples/transforms/plot_transforms_illustrations.html We use a custom RandomSubsetApply container to sample them. """ # Set this flag to `true` to enable transforms during training enable: bool = False # This is the maximum number of transforms (sampled from these below) that will be applied to each frame. # It's an integer in the interval [1, number_of_available_transforms]. max_num_transforms: int = 3 # By default, transforms are applied in Torchvision's suggested order (shown below). # Set this to True to apply them in a random order. random_order: bool = False tfs: dict[str, ImageTransformConfig] = field( default_factory=lambda: { "brightness": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"brightness": (0.8, 1.2)}, ), "contrast": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"contrast": (0.8, 1.2)}, ), "saturation": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"saturation": (0.5, 1.5)}, ), "hue": ImageTransformConfig( weight=1.0, type="ColorJitter", kwargs={"hue": (-0.05, 0.05)}, ), "sharpness": ImageTransformConfig( weight=1.0, type="SharpnessJitter", kwargs={"sharpness": (0.5, 1.5)}, ), } ) def make_transform_from_config(cfg: ImageTransformConfig): if cfg.type == "Identity": return v2.Identity(**cfg.kwargs) elif cfg.type == "ColorJitter": return v2.ColorJitter(**cfg.kwargs) elif cfg.type == "SharpnessJitter": return SharpnessJitter(**cfg.kwargs) else: raise ValueError(f"Transform '{cfg.type}' is not valid.") class ImageTransforms(Transform): """A class to compose image transforms based on configuration.""" def __init__(self, cfg: ImageTransformsConfig) -> None: super().__init__() self._cfg = cfg self.weights = [] self.transforms = {} for tf_name, tf_cfg in cfg.tfs.items(): if tf_cfg.weight <= 0.0: continue self.transforms[tf_name] = make_transform_from_config(tf_cfg) self.weights.append(tf_cfg.weight) n_subset = min(len(self.transforms), cfg.max_num_transforms) if n_subset == 0 or not cfg.enable: self.tf = v2.Identity() else: self.tf = RandomSubsetApply( transforms=list(self.transforms.values()), p=self.weights, n_subset=n_subset, random_order=cfg.random_order, ) def forward(self, *inputs: Any) -> Any: return self.tf(*inputs)
lerobot/src/lerobot/datasets/transforms.py/0
{ "file_path": "lerobot/src/lerobot/datasets/transforms.py", "repo_id": "lerobot", "token_count": 3925 }
216
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np class RobotKinematics: """Robot kinematics using placo library for forward and inverse kinematics.""" def __init__( self, urdf_path: str, target_frame_name: str = "gripper_frame_link", joint_names: list[str] = None, ): """ Initialize placo-based kinematics solver. Args: urdf_path: Path to the robot URDF file target_frame_name: Name of the end-effector frame in the URDF joint_names: List of joint names to use for the kinematics solver """ try: import placo except ImportError as e: raise ImportError( "placo is required for RobotKinematics. " "Please install the optional dependencies of `kinematics` in the package." ) from e self.robot = placo.RobotWrapper(urdf_path) self.solver = placo.KinematicsSolver(self.robot) self.solver.mask_fbase(True) # Fix the base self.target_frame_name = target_frame_name # Set joint names self.joint_names = list(self.robot.joint_names()) if joint_names is None else joint_names # Initialize frame task for IK self.tip_frame = self.solver.add_frame_task(self.target_frame_name, np.eye(4)) def forward_kinematics(self, joint_pos_deg): """ Compute forward kinematics for given joint configuration given the target frame name in the constructor. Args: joint_pos_deg: Joint positions in degrees (numpy array) Returns: 4x4 transformation matrix of the end-effector pose """ # Convert degrees to radians joint_pos_rad = np.deg2rad(joint_pos_deg[: len(self.joint_names)]) # Update joint positions in placo robot for i, joint_name in enumerate(self.joint_names): self.robot.set_joint(joint_name, joint_pos_rad[i]) # Update kinematics self.robot.update_kinematics() # Get the transformation matrix return self.robot.get_T_world_frame(self.target_frame_name) def inverse_kinematics( self, current_joint_pos, desired_ee_pose, position_weight=1.0, orientation_weight=0.01 ): """ Compute inverse kinematics using placo solver. Args: current_joint_pos: Current joint positions in degrees (used as initial guess) desired_ee_pose: Target end-effector pose as a 4x4 transformation matrix position_weight: Weight for position constraint in IK orientation_weight: Weight for orientation constraint in IK, set to 0.0 to only constrain position Returns: Joint positions in degrees that achieve the desired end-effector pose """ # Convert current joint positions to radians for initial guess current_joint_rad = np.deg2rad(current_joint_pos[: len(self.joint_names)]) # Set current joint positions as initial guess for i, joint_name in enumerate(self.joint_names): self.robot.set_joint(joint_name, current_joint_rad[i]) # Update the target pose for the frame task self.tip_frame.T_world_frame = desired_ee_pose # Configure the task based on position_only flag self.tip_frame.configure(self.target_frame_name, "soft", position_weight, orientation_weight) # Solve IK self.solver.solve(True) self.robot.update_kinematics() # Extract joint positions joint_pos_rad = [] for joint_name in self.joint_names: joint = self.robot.get_joint(joint_name) joint_pos_rad.append(joint) # Convert back to degrees joint_pos_deg = np.rad2deg(joint_pos_rad) # Preserve gripper position if present in current_joint_pos if len(current_joint_pos) > len(self.joint_names): result = np.zeros_like(current_joint_pos) result[: len(self.joint_names)] = joint_pos_deg result[len(self.joint_names) :] = current_joint_pos[len(self.joint_names) :] return result else: return joint_pos_deg
lerobot/src/lerobot/model/kinematics.py/0
{ "file_path": "lerobot/src/lerobot/model/kinematics.py", "repo_id": "lerobot", "token_count": 1957 }
217
#!/usr/bin/env python # Copyright 2024 Tony Z. Zhao and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import NormalizationMode from lerobot.optim.optimizers import AdamWConfig @PreTrainedConfig.register_subclass("act") @dataclass class ACTConfig(PreTrainedConfig): """Configuration class for the Action Chunking Transformers policy. Defaults are configured for training on bimanual Aloha tasks like "insertion" or "transfer". The parameters you will most likely need to change are the ones which depend on the environment / sensors. Those are: `input_shapes` and 'output_shapes`. Notes on the inputs and outputs: - Either: - At least one key starting with "observation.image is required as an input. AND/OR - The key "observation.environment_state" is required as input. - If there are multiple keys beginning with "observation.images." they are treated as multiple camera views. Right now we only support all images having the same shape. - May optionally work without an "observation.state" key for the proprioceptive robot state. - "action" is required as an output key. Args: n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the current step and additional steps going back). chunk_size: The size of the action prediction "chunks" in units of environment steps. n_action_steps: The number of action steps to run in the environment for one invocation of the policy. This should be no greater than the chunk size. For example, if the chunk size size 100, you may set this to 50. This would mean that the model predicts 100 steps worth of actions, runs 50 in the environment, and throws the other 50 out. input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents the input data name, and the value is a list indicating the dimensions of the corresponding data. For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96], indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't include batch dimension or temporal dimension. output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents the output data name, and the value is a list indicating the dimensions of the corresponding data. For example, "action" refers to an output shape of [14], indicating 14-dimensional actions. Importantly, `output_shapes` doesn't include batch dimension or temporal dimension. input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"), and the value specifies the normalization mode to apply. The two available modes are "mean_std" which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a [-1, 1] range. output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the original scale. Note that this is also used for normalizing the training targets. vision_backbone: Name of the torchvision resnet backbone to use for encoding images. pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone. `None` means no pretrained weights. replace_final_stride_with_dilation: Whether to replace the ResNet's final 2x2 stride with a dilated convolution. pre_norm: Whether to use "pre-norm" in the transformer blocks. dim_model: The transformer blocks' main hidden dimension. n_heads: The number of heads to use in the transformer blocks' multi-head attention. dim_feedforward: The dimension to expand the transformer's hidden dimension to in the feed-forward layers. feedforward_activation: The activation to use in the transformer block's feed-forward layers. n_encoder_layers: The number of transformer layers to use for the transformer encoder. n_decoder_layers: The number of transformer layers to use for the transformer decoder. use_vae: Whether to use a variational objective during training. This introduces another transformer which is used as the VAE's encoder (not to be confused with the transformer encoder - see documentation in the policy class). latent_dim: The VAE's latent dimension. n_vae_encoder_layers: The number of transformer layers to use for the VAE's encoder. temporal_ensemble_coeff: Coefficient for the exponential weighting scheme to apply for temporal ensembling. Defaults to None which means temporal ensembling is not used. `n_action_steps` must be 1 when using this feature, as inference needs to happen at every step to form an ensemble. For more information on how ensembling works, please see `ACTTemporalEnsembler`. dropout: Dropout to use in the transformer layers (see code for details). kl_weight: The weight to use for the KL-divergence component of the loss if the variational objective is enabled. Loss is then calculated as: `reconstruction_loss + kl_weight * kld_loss`. """ # Input / output structure. n_obs_steps: int = 1 chunk_size: int = 100 n_action_steps: int = 100 normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { "VISUAL": NormalizationMode.MEAN_STD, "STATE": NormalizationMode.MEAN_STD, "ACTION": NormalizationMode.MEAN_STD, } ) # Architecture. # Vision backbone. vision_backbone: str = "resnet18" pretrained_backbone_weights: str | None = "ResNet18_Weights.IMAGENET1K_V1" replace_final_stride_with_dilation: int = False # Transformer layers. pre_norm: bool = False dim_model: int = 512 n_heads: int = 8 dim_feedforward: int = 3200 feedforward_activation: str = "relu" n_encoder_layers: int = 4 # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code # that means only the first layer is used. Here we match the original implementation by setting this to 1. # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. n_decoder_layers: int = 1 # VAE. use_vae: bool = True latent_dim: int = 32 n_vae_encoder_layers: int = 4 # Inference. # Note: the value used in ACT when temporal ensembling is enabled is 0.01. temporal_ensemble_coeff: float | None = None # Training and loss computation. dropout: float = 0.1 kl_weight: float = 10.0 # Training preset optimizer_lr: float = 1e-5 optimizer_weight_decay: float = 1e-4 optimizer_lr_backbone: float = 1e-5 def __post_init__(self): super().__post_init__() """Input validation (not exhaustive).""" if not self.vision_backbone.startswith("resnet"): raise ValueError( f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}." ) if self.temporal_ensemble_coeff is not None and self.n_action_steps > 1: raise NotImplementedError( "`n_action_steps` must be 1 when using temporal ensembling. This is " "because the policy needs to be queried every step to compute the ensembled action." ) if self.n_action_steps > self.chunk_size: raise ValueError( f"The chunk size is the upper bound for the number of action steps per model invocation. Got " f"{self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`." ) if self.n_obs_steps != 1: raise ValueError( f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`" ) def get_optimizer_preset(self) -> AdamWConfig: return AdamWConfig( lr=self.optimizer_lr, weight_decay=self.optimizer_weight_decay, ) def get_scheduler_preset(self) -> None: return None def validate_features(self) -> None: if not self.image_features and not self.env_state_feature: raise ValueError("You must provide at least one image or the environment state among the inputs.") @property def observation_delta_indices(self) -> None: return None @property def action_delta_indices(self) -> list: return list(range(self.chunk_size)) @property def reward_delta_indices(self) -> None: return None
lerobot/src/lerobot/policies/act/configuration_act.py/0
{ "file_path": "lerobot/src/lerobot/policies/act/configuration_act.py", "repo_id": "lerobot", "token_count": 3350 }
218
#!/usr/bin/env python # Copyright 2025 Physical Intelligence and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ π0+FAST: Efficient Action Tokenization for Vision-Language-Action Models [Paper](https://huggingface.co/papers/2501.09747) [Jax code](https://github.com/Physical-Intelligence/openpi) Designed by Physical Intelligence. Ported from Jax by Hugging Face. Disclaimer: It is not expected to perform as well as the original implementation. Example of finetuning the pi0+FAST pretrained model (`pi0_fast_base` in `openpi`): ```bash lerobot-train \ --policy.path=lerobot/pi0fast_base \ --dataset.repo_id=danaaubakirova/koch_test ``` Example of training the pi0+FAST neural network with from scratch: ```bash lerobot-train \ --policy.type=pi0fast \ --dataset.repo_id=danaaubakirova/koch_test ``` Example of using the pi0 pretrained model outside LeRobot training framework: ```python policy = PI0FASTPolicy.from_pretrained("lerobot/pi0fast_base") ``` """ from collections import deque from functools import partial import numpy as np import torch import torch.nn.functional as F # noqa: N812 from PIL import Image from scipy.fft import idct from torch import Tensor, nn from transformers import AutoProcessor, AutoTokenizer, PaliGemmaForConditionalGeneration from transformers.cache_utils import HybridCache, StaticCache from transformers.models.auto import CONFIG_MAPPING from lerobot.constants import ACTION, OBS_STATE from lerobot.policies.normalize import Normalize, Unnormalize from lerobot.policies.pi0fast.configuration_pi0fast import PI0FASTConfig from lerobot.policies.pretrained import PreTrainedPolicy PRECISION = { "float16": torch.float16, "float32": torch.float32, "bfloat16": torch.bfloat16, } def normalize(x, min_val, max_val): return (x - min_val) / (max_val - min_val) def unnormalize(x, min_val, max_val): return x * (max_val - min_val) + min_val def safe_arcsin(value): # This ensures that the input stays within # [−1,1] to avoid invalid values for arcsin return torch.arcsin(torch.clamp(value, -1.0, 1.0)) def aloha_gripper_to_angular(value): # Aloha transforms the gripper positions into a linear space. The following code # reverses this transformation to be consistent with pi0 which is pretrained in # angular space. # # These values are coming from the Aloha code: # PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED value = unnormalize(value, min_val=0.01844, max_val=0.05800) # This is the inverse of the angular to linear transformation inside the Interbotix code. def linear_to_radian(linear_position, arm_length, horn_radius): value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position) return safe_arcsin(value) # The constants are taken from the Interbotix code. value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022) # Normalize to [0, 1]. # The values 0.4 and 1.5 were measured on an actual Trossen robot. return normalize(value, min_val=0.4, max_val=1.5) def aloha_gripper_from_angular(value): # Convert from the gripper position used by pi0 to the gripper position that is used by Aloha. # Note that the units are still angular but the range is different. # The values 0.4 and 1.5 were measured on an actual Trossen robot. value = unnormalize(value, min_val=0.4, max_val=1.5) # These values are coming from the Aloha code: # PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE return normalize(value, min_val=-0.6213, max_val=1.4910) def aloha_gripper_from_angular_inv(value): # Directly inverts the gripper_from_angular function. value = unnormalize(value, min_val=-0.6213, max_val=1.4910) return normalize(value, min_val=0.4, max_val=1.5) class PI0FASTPolicy(PreTrainedPolicy): """Wrapper class around PI0FAST tokenizer and model to train and run inference within LeRobot.""" config_class = PI0FASTConfig name = "pi0fast" def __init__( self, config: PI0FASTConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None, ): """ Args: config: Policy configuration class instance or None, in which case the default instantiation of the configuration class is used. dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__(config) config.validate_features() self.config = config self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) self.normalize_targets = Normalize( config.output_features, config.normalization_mapping, dataset_stats ) self.unnormalize_outputs = Unnormalize( config.output_features, config.normalization_mapping, dataset_stats ) self.language_tokenizer = AutoProcessor.from_pretrained("google/paligemma-3b-pt-224") self.model = PI0FAST(config) self.reset() def reset(self): """This should be called whenever the environment is reset.""" self._action_queue = deque([], maxlen=self.config.n_action_steps) @classmethod def from_pretrained(cls, *args, **kwargs): """Override the from_pretrained method to display important disclaimer.""" print( "⚠️ DISCLAIMER: The PI0FAST model is ported from JAX by the Hugging Face team. \n" " It is not expected to perform as well as the original implementation. \n" " Original implementation: https://github.com/Physical-Intelligence/openpi" ) return super().from_pretrained(*args, **kwargs) def get_optim_params(self) -> dict: return self.parameters() def _pi_aloha_decode_state(self, state): # Flip the joints. for motor_idx in [1, 2, 8, 9]: state[:, motor_idx] *= -1 # Reverse the gripper transformation that is being applied by the Aloha runtime. for motor_idx in [6, 13]: state[:, motor_idx] = aloha_gripper_to_angular(state[:, motor_idx]) return state def _pi_aloha_encode_actions(self, actions): # Flip the joints. for motor_idx in [1, 2, 8, 9]: actions[:, :, motor_idx] *= -1 # Reverse the gripper transformation that is being applied by the Aloha runtime. for motor_idx in [6, 13]: actions[:, :, motor_idx] = aloha_gripper_from_angular(actions[:, :, motor_idx]) return actions def _pi_aloha_encode_actions_inv(self, actions): # Flip the joints again. for motor_idx in [1, 2, 8, 9]: actions[:, :, motor_idx] *= -1 # Reverse the gripper transformation that is being applied by the Aloha runtime. for motor_idx in [6, 13]: actions[:, :, motor_idx] = aloha_gripper_from_angular_inv(actions[:, :, motor_idx]) return actions @torch.no_grad() def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: """Predict a chunk of actions given environment observations.""" raise NotImplementedError("Currently not implemented for PI0FAST") @torch.no_grad() def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select a single action given environment observations. This method wraps `select_actions` in order to return one action at a time for execution in the environment. It works by managing the actions in a queue and only calling `select_actions` when the queue is empty. """ self.eval() if self.config.adapt_to_pi_aloha: batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) batch = self.normalize_inputs(batch) # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. if len(self._action_queue) == 0: actions = self.model.generate_actions(batch) actions = actions[:, : self.config.n_action_steps] original_action_dim = self.config.action_feature.shape[ 0 ] # self.config.max_action_dim # self.config.action_feature.shape[0] actions = actions[:, :, :original_action_dim] actions = self.unnormalize_outputs({"action": actions})["action"] if self.config.adapt_to_pi_aloha: actions = self._pi_aloha_encode_actions(actions) # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue # effectively has shape (n_action_steps, batch_size, *), hence the transpose. self._action_queue.extend(actions.transpose(0, 1)) return self._action_queue.popleft() def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: if self.config.adapt_to_pi_aloha: batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) batch[ACTION] = self._pi_aloha_encode_actions_inv(batch[ACTION]) batch = self.normalize_inputs(batch) batch = self.normalize_targets(batch) loss_dict = self.model.forward(batch) return loss_dict["loss"], loss_dict def block_causal_update_causal_mask( attention_mask, token_type_ids=None, past_key_values=None, cache_position=None, input_tensor=None, attn_implementation: str = "eager", dtype: torch.dtype = "float32", ): """ Update the causal mask during training and generation. It can be customized to different attention masks. """ if attn_implementation == "flash_attention_2": if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None using_static_cache = isinstance(past_key_values, StaticCache) min_dtype = torch.finfo(dtype).min if input_tensor is None: input_tensor = attention_mask inputs_lead_dim, sequence_length = input_tensor.shape[:2] if using_static_cache or isinstance(past_key_values, HybridCache): target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else cache_position[0] + sequence_length + 1 ) # Handle precomputed attention masks if attention_mask is not None and attention_mask.dim() == 4: return attention_mask # Causal mask initialization causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) # Standard causal masking (triu ensures tokens can only attend to past) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) # Apply block causal mask if token_type_ids is not None: token_type_ids = token_type_ids.to(causal_mask.device).bool() cumsum = torch.cumsum(token_type_ids, dim=1) block_causal_mask = cumsum[:, None, :] <= cumsum[:, :, None] # Combine causal_mask with block-wise attention mask causal_mask = torch.where(block_causal_mask, 0.0, causal_mask) causal_mask = causal_mask[:, None, :, :] else: # Apply past cache position constraint causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape( -1, 1 ) causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1) else: # Apply past cache position constraint causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape( -1, 1 ) causal_mask = causal_mask[None, None, :, :].expand(inputs_lead_dim, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # Copy to contiguous memory for in-place edits mask_length = attention_mask.shape[-1] # Apply padding mask padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask def prepare_inputs_for_generation( # self, input_ids, past_key_values=None, inputs_embeds=None, cache_position=None, position_ids=None, pixel_values=None, attention_mask=None, token_type_ids=None, use_cache=True, num_logits_to_keep=None, labels=None, self=None, **kwargs, ): # create block causal attention if cache_position[0] > 0 and input_ids.shape[1] > 0: input_tensor = input_ids[:, -1:] new_positions = ( torch.ones( (position_ids.shape[0], input_ids.shape[1]), dtype=position_ids.dtype, device=position_ids.device, ).cumsum(-1) + position_ids[:, -1:] ) position_ids = torch.cat([position_ids, new_positions], dim=-1) else: input_tensor = inputs_embeds attention_mask = block_causal_update_causal_mask( attention_mask=attention_mask, past_key_values=past_key_values, cache_position=cache_position, input_tensor=input_tensor, token_type_ids=token_type_ids, dtype=self.dtype, attn_implementation=self.config.text_config._attn_implementation, ) # Overwritten -- custom `position_ids` and `pixel_values` handling model_inputs = self.language_model.prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, cache_position=cache_position, use_cache=use_cache, num_logits_to_keep=num_logits_to_keep, token_type_ids=token_type_ids, **kwargs, ) # Position_ids in Paligemma are 1-indexed if model_inputs.get("position_ids") is not None: model_inputs["position_ids"] += 1 # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model. NOTE: use_cache=False needs pixel_values always if cache_position[0] == 0: model_inputs["pixel_values"] = pixel_values is_training = token_type_ids is not None and labels is not None if cache_position[0] == 0 and isinstance(past_key_values, HybridCache): input_tensor = inputs_embeds if inputs_embeds is not None else input_ids causal_mask = self._update_causal_mask( attention_mask, token_type_ids, past_key_values, cache_position, input_tensor, is_training ) model_inputs["attention_mask"] = causal_mask return model_inputs class PI0FAST(nn.Module): def __init__(self, config: PI0FASTConfig): super().__init__() self.config = config # TODO: move tokenizers in Policy fast_tokenizer_path = "physical-intelligence/fast" pi0_paligemma_path = "google/paligemma-3b-pt-224" self.paligemma_tokenizer = AutoTokenizer.from_pretrained(pi0_paligemma_path) self.processor = AutoProcessor.from_pretrained(pi0_paligemma_path) self.fast_tokenizer = AutoProcessor.from_pretrained(fast_tokenizer_path, trust_remote_code=True) self.fast_skip_tokens = self.config.fast_skip_tokens self.max_input_seq_len = self.config.max_input_seq_len self.action_horizon = self.config.chunk_size self.action_dim = self.config.action_feature.shape[ 0 ] # self.config.max_action_dim # self.config.action_feature.shape[0] precision = config.precision torch_precision = PRECISION.get(precision, torch.float32) self.pad_token_id = ( self.paligemma_tokenizer.pad_token_id if hasattr(self.paligemma_tokenizer, "pad_token_id") else self.paligemma_tokenizer.eos_token_id ) paligemma_config = CONFIG_MAPPING["paligemma"]( transformers_version="4.48.1", _vocab_size=257152, bos_token_id=2, eos_token_id=1, hidden_size=2048, image_token_index=257152, model_type="paligemma", pad_token_id=0, projection_dim=2048, text_config={ "hidden_activation": "gelu_pytorch_tanh", "hidden_size": 2048, "intermediate_size": 16384, "model_type": "gemma", "num_attention_heads": 8, "num_hidden_layers": 18, "num_image_tokens": 256, "num_key_value_heads": 1, "torch_dtype": precision, "vocab_size": 257152, "_attn_implementation": "eager", }, vision_config={ "hidden_size": 1152, "intermediate_size": 4304, "model_type": "siglip_vision_model", "num_attention_heads": 16, "num_hidden_layers": 27, "num_image_tokens": 256, "patch_size": 14, "projection_dim": 2048, "projector_hidden_act": "gelu_pytorch_tanh", "torch_dtype": precision, "vision_use_head": False, }, ) self.pi0_paligemma = PaliGemmaForConditionalGeneration(config=paligemma_config) self.pi0_paligemma.prepare_inputs_for_generation = partial( prepare_inputs_for_generation, self=self.pi0_paligemma ) # change important stuff in bf16 params_to_change_dtype = [ "language_model", "vision_tower", "multi_modal", ] for name, param in self.pi0_paligemma.named_parameters(): if any(selector in name for selector in params_to_change_dtype): param.data = param.data.to(dtype=torch_precision) self.set_requires_grad() self.image_keys = self.config.image_features.keys() # TODO: Remove this once we bump transformers to >4.52.0 because the attribute will be removed # AttributeError: 'PaliGemmaConfig' object has no attribute 'ignore_index' self.ignore_index = self.pi0_paligemma.config.ignore_index self.padding_side = self.config.padding_side def set_requires_grad(self): if self.config.freeze_vision_encoder: self.pi0_paligemma.vision_tower.eval() for params in self.pi0_paligemma.vision_tower.parameters(): params.requires_grad = False # To avoid unused params issue with distributed training if self.config.freeze_lm_head: for name, params in self.pi0_paligemma.named_parameters(): if "embed_tokens" in name: # lm heads and embedding layer are tied params.requires_grad = False def embed_tokens(self, tokens: torch.Tensor): return self.pi0_paligemma.language_model.model.embed_tokens(tokens) def prepare_inputs_for_generation(self, *args, **kwargs): return self.pi0_paligemma.prepare_inputs_for_generation(*args, **kwargs) def prepare_images(self, batch): """Preprocess LeRobot batch into Pi0 inputs""" images = [] img_masks = [] present_img_keys = [key for key in self.image_keys if key in batch] if len(present_img_keys) == 0: raise ValueError( f"All image features are missing from the batch. At least one expected. (batch: {batch.keys()}) (image_features:{self.config.image_features})" ) # Preprocess image features present in the batch num_empty_cameras = 0 for key in self.image_keys: if key in present_img_keys: img = batch[key] if self.config.resize_imgs_with_padding is not None: img = resize_with_pad( img, *self.config.resize_imgs_with_padding, pad_value=0, interpolate_like_pi=self.config.interpolate_like_pi, ) # Normalize from range [0,1] to [-1,1] as expected by siglip img = img * 2.0 - 1.0 bsize = img.shape[0] device = img.device mask = torch.ones(bsize, dtype=torch.bool, device=device) else: if num_empty_cameras >= self.config.empty_cameras: continue img = torch.ones_like(img) * -1 bsize = img.shape[0] device = img.device mask = torch.ones(bsize, dtype=torch.bool, device=device) num_empty_cameras += 1 images.append(img) img_masks.append(mask) return images, img_masks def normalize_actions(self, actions: torch.Tensor) -> torch.Tensor: mins = actions.amin(dim=(1, 2), keepdim=True) # [0] maxs = actions.amax(dim=(1, 2), keepdim=True) # [0] return 2 * (actions - mins) / (maxs - mins + 1e-8) - 1 def _act_tokens_to_paligemma_tokens(self, tokens: torch.Tensor) -> torch.Tensor: out = self.paligemma_tokenizer.vocab_size - 1 - self.fast_skip_tokens - tokens return out def fast_tokenizer_wrapper(self, actions_norm): """ A wrapper for self.fast_tokenizer that ensures batch processing, conversion to PyTorch tensors, and returns a dictionary without padding. """ batch_tokens = self.fast_tokenizer(actions_norm) fast_out = self.processor.tokenizer.pad({"input_ids": batch_tokens}, return_tensors="pt") return fast_out def create_token_type_ids(self, padded_mask: torch.Tensor, prefix_len: int) -> torch.Tensor: token_type_ids = torch.zeros_like(padded_mask, dtype=torch.bool) # Compute cumulative sum mask cumsum_mask = (padded_mask != 0).cumsum(dim=1) # Suffix block (everything after prefix_len) suffix_mask = cumsum_mask > prefix_len token_type_ids = suffix_mask return token_type_ids def create_input_tokens(self, state, lang_text, actions=None): bsize = state.shape[0] device = state.device bins = torch.linspace(-1, 1, 256 + 1, device=device)[:-1] discretized = torch.bucketize(state, bins) - 1 discretized = discretized[:, :32] prefix_texts = [] state_text = [] for txt, disc in zip(lang_text, discretized, strict=False): cleaned = txt.lower().strip().replace("_", " ") state_str = " ".join(str(val.item()) for val in disc) prefix_texts.append(f"Task: {cleaned}, State: {state_str};\n") state_text.append(f"State: {state_str};\n") prefix_out = self.paligemma_tokenizer( prefix_texts, add_special_tokens=True, return_tensors="pt", padding="longest", truncation=False ) prefix_ids = prefix_out["input_ids"].to(device) prefix_mask = prefix_out["attention_mask"].to(device) prefix_lens = prefix_mask.sum(dim=1)[:, None].cpu() if actions is not None: actions_norm = self.normalize_actions(actions) actions_pad = F.pad( actions_norm, (0, max(0, self.config.max_action_dim - actions_norm.shape[2])), value=0 )[:, :, : self.config.max_action_dim] fast_out = self.fast_tokenizer_wrapper( actions_pad.cpu(), ) act_ids = fast_out["input_ids"] act_mask = fast_out["attention_mask"].to(device) act_ids = self._act_tokens_to_paligemma_tokens(act_ids).to(device) # Replace action with 0 to pad tokens act_ids = torch.where( act_ids == self.paligemma_tokenizer.vocab_size - 1 - self.fast_skip_tokens, self.pad_token_id, act_ids, ) eos_token = torch.tensor( [self.paligemma_tokenizer.eos_token_id], dtype=torch.long, device=device ).expand(bsize, -1) eos_mask = torch.tensor([1], dtype=torch.long, device=device).expand(bsize, -1) bos = self.paligemma_tokenizer("Action: ", add_special_tokens=False, return_tensors="pt") bos_token = bos["input_ids"].expand(act_ids.shape[0], -1).to(device) bos_mask = bos["attention_mask"].expand(act_ids.shape[0], -1).to(device) act_ids = torch.cat([bos_token, act_ids, eos_token], dim=1) act_mask = torch.cat([bos_mask, act_mask, eos_mask], dim=1) act_mask = act_mask.to(device) else: act_ids = torch.empty(bsize, self.pad_token_id, dtype=torch.long, device=device) act_mask = torch.empty(bsize, 0, dtype=torch.long, device=device) final_ids = torch.cat([prefix_ids, act_ids], dim=1) final_mask = torch.cat([prefix_mask, act_mask], dim=1) batch_inputs = {"input_ids": final_ids.tolist(), "attention_mask": final_mask.tolist()} # Use tokenizer pad function padded_output = self.paligemma_tokenizer.pad( batch_inputs, padding="longest", max_length=180, return_tensors="pt" ) padded_mask = padded_output["attention_mask"] # define tensor of padding lengths att_mask = (padded_mask != 0).cumsum(dim=1) > prefix_lens token_type_ids = self.create_token_type_ids(padded_mask=padded_mask, prefix_len=prefix_lens) padded_output["padded_mask"] = padded_output.pop("attention_mask") padded_output["attention_mask"] = att_mask # loss is computed not on prefix, and not on padding padded_output["loss_mask"] = att_mask & padded_output["padded_mask"] padded_output["token_type_ids"] = token_type_ids return padded_output def shift_padding_side( self, tokens: torch.Tensor, ar_mask: torch.Tensor, padding_mask: torch.Tensor, loss_mask: torch.Tensor, targets: torch.Tensor, token_type_ids: torch.Tensor, padding_side: str = "right", ) -> tuple[torch.Tensor]: if padding_side not in ["right", "left"]: return tokens, ar_mask, padding_mask, loss_mask, targets, token_type_ids new_tokens = torch.empty_like(tokens) new_ar_masks = torch.empty_like(ar_mask) new_padding_mask = torch.empty_like(padding_mask) new_loss_mask = torch.empty_like(loss_mask) new_targets = torch.empty_like(targets) new_token_type_ids = torch.empty_like(token_type_ids) batch_size = tokens.shape[0] for i in range(batch_size): padding_indices = torch.where(padding_mask[i] == 0)[0] non_padding_indices = torch.where(padding_mask[i] == 1)[0] if padding_side == "left": new_indices = torch.cat((padding_indices, non_padding_indices), dim=0) else: new_indices = torch.cat((non_padding_indices, padding_indices), dim=0) new_tokens[i] = tokens[i].index_select(0, new_indices) new_ar_masks[i] = ar_mask[i].index_select(0, new_indices) new_padding_mask[i] = padding_mask[i].index_select(0, new_indices) new_loss_mask[i] = loss_mask[i].index_select(0, new_indices) new_targets[i] = targets[i].index_select(0, new_indices) new_token_type_ids[i] = token_type_ids[i].index_select(0, new_indices) return new_tokens, new_ar_masks, new_padding_mask, new_loss_mask, new_targets, new_token_type_ids def forward(self, batch: dict[str, Tensor]): device = batch[OBS_STATE].device # TODO: keep like this or move to the policy .forward images, img_masks = self.prepare_images(batch) padded_outs = self.create_input_tokens( state=batch[OBS_STATE], lang_text=batch["task"], actions=batch[ACTION], ) embs, pad_masks, _, targets, loss_mask, token_type_ids = self.embed_inputs( images, img_masks, padded_outs["input_ids"], padded_outs["padded_mask"], padded_outs["attention_mask"], padded_outs["loss_mask"], padded_outs["token_type_ids"], padding_side=self.padding_side, ) position_ids = torch.cumsum(pad_masks, dim=1) - 1 token_type_ids = token_type_ids.to(dtype=torch.int64) past_seen_tokens = 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + embs.shape[1], device=embs.device) pad_masks = block_causal_update_causal_mask( attention_mask=pad_masks, past_key_values=None, cache_position=cache_position, input_tensor=embs, token_type_ids=token_type_ids, dtype=self.pi0_paligemma.dtype, attn_implementation=self.pi0_paligemma.config.text_config._attn_implementation, ) outputs = self.pi0_paligemma.forward( input_ids=None, token_type_ids=None, attention_mask=pad_masks, position_ids=position_ids, past_key_values=None, inputs_embeds=embs, use_cache=False, labels=None, ) logits = outputs.logits loss_fct = nn.CrossEntropyLoss(reduction="none") # Shift left for next-step prediction logits = logits[:, :-1, :] targets = targets[:, 1:].to(device) # Shift targets loss_mask = loss_mask[:, 1:].to(device) # Ensure correct shape # Compute per-token loss token_loss = loss_fct(logits.reshape(-1, logits.shape[-1]), targets.reshape(-1)) # Apply loss mask token_loss = token_loss * loss_mask.reshape(-1) # Compute final loss loss = token_loss.sum() / torch.clamp(loss_mask.sum(), min=1) # Return loss dictionary loss_dict = {"ce_loss": loss.item(), "loss": loss} return loss_dict def decode_actions_with_fast( self, tokens: list[list[int]], *, time_horizon: int | None = None, action_dim: int | None = None, relaxed_decoding: bool = True, ) -> np.array: """ Adapt original decoding in FAST to always return actions instead of zeros. """ self.time_horizon = ( time_horizon or self.fast_tokenizer.time_horizon or self.fast_tokenizer.called_time_horizon ) self.action_dim = ( action_dim or self.fast_tokenizer.action_dim or self.fast_tokenizer.called_action_dim ) # Cache the time horizon and action dimension for the next call self.called_time_horizon = self.time_horizon self.called_action_dim = self.action_dim assert self.time_horizon is not None and self.action_dim is not None, ( "Tokenizer not initialized, call encode() once or pass in time_horizon and action_dim." ) decoded_actions = [] for token in tokens: try: decoded_tokens = self.fast_tokenizer.bpe_tokenizer.decode(token) decoded_dct_coeff = np.array(list(map(ord, decoded_tokens))) + self.fast_tokenizer.min_token if relaxed_decoding: # Expected sequence length expected_seq_len = self.time_horizon * self.action_dim diff = expected_seq_len - decoded_dct_coeff.shape[0] # Apply truncation if too long if diff < 0: decoded_dct_coeff = decoded_dct_coeff[:expected_seq_len] # Truncate on the right # Apply padding if too short elif diff > 0: decoded_dct_coeff = np.pad( decoded_dct_coeff, (0, diff), mode="constant", constant_values=0 ) decoded_dct_coeff = decoded_dct_coeff.reshape(-1, self.action_dim) assert decoded_dct_coeff.shape == ( self.time_horizon, self.action_dim, ), ( f"Decoded DCT coefficients have shape {decoded_dct_coeff.shape}, expected ({self.time_horizon}, {self.action_dim})" ) except Exception as e: print(f"Error decoding tokens: {e}") print(f"Tokens: {token}") decoded_dct_coeff = np.zeros((self.time_horizon, self.action_dim)) decoded_actions.append(idct(decoded_dct_coeff / self.fast_tokenizer.scale, axis=0, norm="ortho")) return np.stack(decoded_actions) def extract_actions(self, tokens: torch.Tensor, action_horizon: int, action_dim: int) -> torch.Tensor: """ Extracts actions from predicted output tokens using the FAST model. Args: tokens (torch.Tensor): The input tensor of tokenized outputs. action_horizon (int): The number of timesteps for actions. action_dim (int): The dimensionality of each action. Returns: torch.Tensor: The extracted actions as a tensor of shape (action_horizon, action_dim). """ # Decode predicted output tokens decoded_tokens = self.paligemma_tokenizer.batch_decode(tokens, skip_special_tokens=True) cleaned_tokens = [ tokens_sequence.replace("Action:", "").replace(":", "").strip().split("|")[0].strip() for tokens_sequence in decoded_tokens ] raw_action_tokens = [ self.processor.tokenizer.encode(sample_tokens, return_tensors="pt", padding=False) for sample_tokens in cleaned_tokens ] # something like this should be robust #looks good action_tokens = [ self._act_tokens_to_paligemma_tokens(raw_action_token) for raw_action_token in raw_action_tokens ] # returns the tensor of decoded actions per sample in a list decoded_actions = [ torch.tensor( self.decode_actions_with_fast( tok.tolist(), time_horizon=action_horizon, action_dim=action_dim, relaxed_decoding=self.config.relaxed_action_decoding, ), device=tokens.device, ).squeeze(0) for tok in action_tokens ] return torch.stack( decoded_actions, dim=0, ) def generate_actions(self, batch: dict[str, Tensor]): # TODO: keep like this or move to the policy .forward images, img_masks = self.prepare_images(batch) padded_outs = self.create_input_tokens(state=batch[OBS_STATE], lang_text=batch["task"], actions=None) embs, pad_masks, att_masks2, targets, loss_mask, token_type_ids = self.embed_inputs( images, img_masks, padded_outs["input_ids"], padded_outs["padded_mask"], padded_outs["attention_mask"], padded_outs["loss_mask"], padded_outs["token_type_ids"], padding_side="left", ) token_type_ids = token_type_ids.to(dtype=torch.int64) prefix_position_ids = torch.cumsum(pad_masks, dim=1) - 1 output_tokens = self.pi0_paligemma.generate( input_ids=None, attention_mask=pad_masks, position_ids=prefix_position_ids, past_key_values=None, inputs_embeds=embs, use_cache=self.config.use_cache, max_new_tokens=self.config.max_decoding_steps, do_sample=False, num_beams=1, token_type_ids=token_type_ids, ) actions = self.extract_actions(output_tokens, self.action_horizon, self.action_dim) return actions def embed_image(self, image: torch.Tensor): # Handle different transformers versions if hasattr(self.pi0_paligemma, "get_image_features"): return self.pi0_paligemma.get_image_features(image) else: return self.pi0_paligemma.model.get_image_features(image) def embed_inputs( self, images, img_masks, tokens, pad_mask, ar_mask, loss_mask, token_type_ids, padding_side: str = "right", ): # TODO: avoid list in python and torch.cat ; prefer pre-allocation with torch.empty # images are a list of same size # vectorizing everything! device = images[0].device image_embedding_dim = images[0].shape[-1] # TODO should be from self.config all_images = torch.stack(images, dim=1).to(device) b, n, c, h, w = all_images.shape all_images = all_images.view(b * n, c, h, w) embedded = self.embed_image(all_images).to(device) b_n, p, image_embedding_dim = embedded.shape # Extract current dimensions m = b_n // b # Compute the number of images per sample dynamically # Reshape dynamically embedded = embedded.view(b, m, p, image_embedding_dim) tokens_embs = self.embed_tokens(tokens.to(device)) img_masks = torch.stack(img_masks, dim=1).unsqueeze(-1).to(device) num_img_emb = embedded.shape[2] img_pad_masks = img_masks.repeat(1, 1, num_img_emb).view(b, -1) img_att_masks = torch.zeros((b, n, num_img_emb), dtype=torch.long, device=device).reshape(b, -1) image_target_tokens = ( torch.ones((b, n, num_img_emb), dtype=torch.long, device=device) * self.pad_token_id ).reshape(b, -1) image_loss_mask = torch.zeros((b, n, num_img_emb), dtype=torch.long, device=device).reshape(b, -1) embedded = embedded.reshape(b, n * num_img_emb, image_embedding_dim) # Shape: (B, N*P, D) embs = torch.cat([embedded, tokens_embs], dim=1).to(device) pad_masks = torch.cat([img_pad_masks, pad_mask.to(device)], dim=1) att_masks = torch.cat([img_att_masks, ar_mask.to(device)], dim=1) loss_masks = torch.cat([image_loss_mask, loss_mask.to(device)], dim=1) targets = torch.cat([image_target_tokens, tokens.to(device)], dim=1) token_type_ids = torch.cat([img_att_masks, token_type_ids.to(device)], dim=1) # Shift pad tokens to the left (.generate()) or right (.train()) embs, att_masks, pad_masks, loss_masks, targets, token_type_ids = self.shift_padding_side( embs, att_masks, pad_masks, loss_masks, targets, token_type_ids, padding_side=padding_side ) targets = torch.where(targets == self.pad_token_id, self.ignore_index, targets) return embs, pad_masks, att_masks, targets, loss_masks, token_type_ids def resize_with_pad(img, width, height, pad_value=0, interpolate_like_pi=True): # assume no-op when width height fits already if img.ndim != 4: raise ValueError(f"(b,c,h,w) expected, but {img.shape}") cur_height, cur_width = img.shape[2:] ratio = max(cur_width / width, cur_height / height) resized_height = int(cur_height / ratio) resized_width = int(cur_width / ratio) if interpolate_like_pi: img = (img * 255.0).to(dtype=torch.uint8) img = img.permute(0, 2, 3, 1) original_device = img.device img = img.to(device="cpu").numpy() imgs = [] for sub_img in img: sub_img = Image.fromarray(sub_img) resized_img = sub_img.resize((resized_width, resized_height), resample=2) resized_img = torch.from_numpy(np.array(resized_img)) imgs.append(resized_img) img = torch.stack(imgs, dim=0) img = img.permute(0, 3, 1, 2) resized_img = img.to(device=original_device, dtype=torch.float32) / 255.0 else: resized_img = F.interpolate( img, size=(resized_height, resized_width), mode="bilinear", align_corners=False ) pad_height = max(0, int(height - resized_height)) pad_width = max(0, int(width - resized_width)) # pad on left and top of image padded_img = F.pad(resized_img, (pad_width, 0, pad_height, 0), value=pad_value) return padded_img
lerobot/src/lerobot/policies/pi0fast/modeling_pi0fast.py/0
{ "file_path": "lerobot/src/lerobot/policies/pi0fast/modeling_pi0fast.py", "repo_id": "lerobot", "token_count": 18710 }
219
#!/usr/bin/env python # Copyright 2024 Seungjae Lee and Yibin Wang and Haritheja Etukuru # and H. Jin Kim and Nur Muhammad Mahi Shafiullah and Lerrel Pinto # and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from collections import deque from collections.abc import Callable import einops import numpy as np import torch import torch.nn.functional as F # noqa: N812 import torchvision from torch import Tensor, nn from lerobot.constants import ACTION, OBS_IMAGES, OBS_STATE from lerobot.policies.normalize import Normalize, Unnormalize from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.policies.utils import get_device_from_parameters, get_output_shape, populate_queues from lerobot.policies.vqbet.configuration_vqbet import VQBeTConfig from lerobot.policies.vqbet.vqbet_utils import GPT, ResidualVQ # ruff: noqa: N806 class VQBeTPolicy(PreTrainedPolicy): """ VQ-BeT Policy as per "Behavior Generation with Latent Actions" """ config_class = VQBeTConfig name = "vqbet" def __init__( self, config: VQBeTConfig | None = None, dataset_stats: dict[str, dict[str, Tensor]] | None = None, ): """ Args: config: Policy configuration class instance or None, in which case the default instantiation of the configuration class is used. dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__(config) config.validate_features() self.config = config self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) self.normalize_targets = Normalize( config.output_features, config.normalization_mapping, dataset_stats ) self.unnormalize_outputs = Unnormalize( config.output_features, config.normalization_mapping, dataset_stats ) self.vqbet = VQBeTModel(config) self.reset() def get_optim_params(self) -> dict: vqvae_params = ( list(self.vqbet.action_head.vqvae_model.encoder.parameters()) + list(self.vqbet.action_head.vqvae_model.decoder.parameters()) + list(self.vqbet.action_head.vqvae_model.vq_layer.parameters()) ) decay_params, no_decay_params = self.vqbet.policy.configure_parameters() decay_params = ( decay_params + list(self.vqbet.rgb_encoder.parameters()) + list(self.vqbet.state_projector.parameters()) + list(self.vqbet.rgb_feature_projector.parameters()) + [self.vqbet.action_token] + list(self.vqbet.action_head.map_to_cbet_preds_offset.parameters()) ) if self.config.sequentially_select: decay_params = ( decay_params + list(self.vqbet.action_head.map_to_cbet_preds_primary_bin.parameters()) + list(self.vqbet.action_head.map_to_cbet_preds_secondary_bin.parameters()) ) else: decay_params = decay_params + list(self.vqbet.action_head.map_to_cbet_preds_bin.parameters()) return [ { "params": decay_params, }, { "params": vqvae_params, "weight_decay": self.config.optimizer_vqvae_weight_decay, "lr": self.config.optimizer_vqvae_lr, }, { "params": no_decay_params, "weight_decay": 0.0, }, ] def reset(self): """ Clear observation and action queues. Should be called on `env.reset()` queues are populated during rollout of the policy, they contain the n latest observations and actions """ self._queues = { OBS_IMAGES: deque(maxlen=self.config.n_obs_steps), OBS_STATE: deque(maxlen=self.config.n_obs_steps), ACTION: deque(maxlen=self.config.action_chunk_size), } @torch.no_grad() def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} actions = self.vqbet(batch, rollout=True)[:, : self.config.action_chunk_size] actions = self.unnormalize_outputs({ACTION: actions})[ACTION] return actions @torch.no_grad() def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select a single action given environment observations. This method wraps `select_actions` in order to return one action at a time for execution in the environment. It works by managing the actions in a queue and only calling `select_actions` when the queue is empty. """ # NOTE: for offline evaluation, we have action in the batch, so we need to pop it out if ACTION in batch: batch.pop(ACTION) batch = self.normalize_inputs(batch) batch = dict(batch) # shallow copy so that adding a key doesn't modify the original # NOTE: It's important that this happens after stacking the images into a single key. batch["observation.images"] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) self._queues = populate_queues(self._queues, batch) if not self.vqbet.action_head.vqvae_model.discretized.item(): warnings.warn( "To evaluate in the environment, your VQ-BeT model should contain a pretrained Residual VQ.", stacklevel=1, ) if len(self._queues[ACTION]) == 0: actions = self.predict_action_chunk(batch) # since the data in the action queue's dimension is (action_chunk_size, batch_size, action_dim), we transpose the action and fill the queue self._queues[ACTION].extend(actions.transpose(0, 1)) action = self._queues[ACTION].popleft() return action def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: """Run the batch through the model and compute the loss for training or validation.""" batch = self.normalize_inputs(batch) batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) batch = self.normalize_targets(batch) # VQ-BeT discretizes action using VQ-VAE before training BeT (please refer to section 3.2 in the VQ-BeT paper https://huggingface.co/papers/2403.03181) if not self.vqbet.action_head.vqvae_model.discretized.item(): # loss: total loss of training RVQ # n_different_codes: how many of the total possible VQ codes are being used in single batch (how many of them have at least one encoder embedding as a nearest neighbor). This can be at most `vqvae_n_embed * number of layers of RVQ (=2)`. # n_different_combinations: how many different code combinations are being used out of all possible combinations in single batch. This can be at most `vqvae_n_embed ^ number of layers of RVQ (=2)` (hint consider the RVQ as a decision tree). loss, n_different_codes, n_different_combinations, recon_l1_error = ( self.vqbet.action_head.discretize(self.config.n_vqvae_training_steps, batch[ACTION]) ) return loss, { "n_different_codes": n_different_codes, "n_different_combinations": n_different_combinations, "recon_l1_error": recon_l1_error, } # if Residual VQ is already trained, VQ-BeT trains its GPT and bin prediction head / offset prediction head parts. _, loss_dict = self.vqbet(batch, rollout=False) loss = loss_dict.pop("loss") return loss, loss_dict class SpatialSoftmax(nn.Module): """ Spatial Soft Argmax operation described in "Deep Spatial Autoencoders for Visuomotor Learning" by Finn et al. (https://huggingface.co/papers/1509.06113). A minimal port of the robomimic implementation. At a high level, this takes 2D feature maps (from a convnet/ViT) and returns the "center of mass" of activations of each channel, i.e., keypoints in the image space for the policy to focus on. Example: take feature maps of size (512x10x12). We generate a grid of normalized coordinates (10x12x2): ----------------------------------------------------- | (-1., -1.) | (-0.82, -1.) | ... | (1., -1.) | | (-1., -0.78) | (-0.82, -0.78) | ... | (1., -0.78) | | ... | ... | ... | ... | | (-1., 1.) | (-0.82, 1.) | ... | (1., 1.) | ----------------------------------------------------- This is achieved by applying channel-wise softmax over the activations (512x120) and computing the dot product with the coordinates (120x2) to get expected points of maximal activation (512x2). The example above results in 512 keypoints (corresponding to the 512 input channels). We can optionally provide num_kp != None to control the number of keypoints. This is achieved by a first applying a learnable linear mapping (in_channels, H, W) -> (num_kp, H, W). """ def __init__(self, input_shape, num_kp=None): """ Args: input_shape (list): (C, H, W) input feature map shape. num_kp (int): number of keypoints in output. If None, output will have the same number of channels as input. """ super().__init__() assert len(input_shape) == 3 self._in_c, self._in_h, self._in_w = input_shape if num_kp is not None: self.nets = torch.nn.Conv2d(self._in_c, num_kp, kernel_size=1) self._out_c = num_kp else: self.nets = None self._out_c = self._in_c # we could use torch.linspace directly but that seems to behave slightly differently than numpy # and causes a small degradation in pc_success of pre-trained models. pos_x, pos_y = np.meshgrid(np.linspace(-1.0, 1.0, self._in_w), np.linspace(-1.0, 1.0, self._in_h)) pos_x = torch.from_numpy(pos_x.reshape(self._in_h * self._in_w, 1)).float() pos_y = torch.from_numpy(pos_y.reshape(self._in_h * self._in_w, 1)).float() # register as buffer so it's moved to the correct device. self.register_buffer("pos_grid", torch.cat([pos_x, pos_y], dim=1)) def forward(self, features: Tensor) -> Tensor: """ Args: features: (B, C, H, W) input feature maps. Returns: (B, K, 2) image-space coordinates of keypoints. """ if self.nets is not None: features = self.nets(features) # [B, K, H, W] -> [B * K, H * W] where K is number of keypoints features = features.reshape(-1, self._in_h * self._in_w) # 2d softmax normalization attention = F.softmax(features, dim=-1) # [B * K, H * W] x [H * W, 2] -> [B * K, 2] for spatial coordinate mean in x and y dimensions expected_xy = attention @ self.pos_grid # reshape to [B, K, 2] feature_keypoints = expected_xy.view(-1, self._out_c, 2) return feature_keypoints class VQBeTModel(nn.Module): """VQ-BeT: The underlying neural network for VQ-BeT Note: In this code we use the terms `rgb_encoder`, 'policy', `action_head`. The meanings are as follows. - The `rgb_encoder` process rgb-style image observations to one-dimensional embedding vectors - A `policy` is a minGPT architecture, that takes observation sequences and action query tokens to generate `features`. - These `features` pass through the action head, which passes through the code prediction, offset prediction head, and finally generates a prediction for the action chunks. -------------------------------** legend **------------------------------- │ n = n_obs_steps, p = n_action_pred_token, c = action_chunk_size) │ │ o_{t} : visual observation at timestep {t} │ │ s_{t} : state observation at timestep {t} │ │ a_{t} : action at timestep {t} │ │ A_Q : action_query_token │ -------------------------------------------------------------------------- Training Phase 1. Discretize action using Residual VQ (for config.n_vqvae_training_steps steps) ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │ RVQ encoder │ ─► │ Residual │ ─► │ RVQ Decoder │ │ (a_{t}~a_{t+p}) │ │ Code Quantizer │ │ │ │ │ │ │ │ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ Training Phase 2. timestep {t-n+1} timestep {t-n+2} timestep {t} ┌─────┴─────┐ ┌─────┴─────┐ ┌─────┴─────┐ o_{t-n+1} o_{t-n+2} ... o_{t} │ │ │ │ s_{t-n+1} │ s_{t-n+2} ... │ s_{t} p │ │ │ │ │ │ ┌───────┴───────┐ │ │ A_Q │ │ A_Q ... │ │ A_Q ... A_Q │ │ │ │ │ │ │ │ │ │ ┌───▼─────▼─────▼─────▼─────▼─────▼─────────────────▼─────▼─────▼───────────────▼───┐ │ │ │ GPT │ => policy │ │ └───────────────▼─────────────────▼─────────────────────────────▼───────────────▼───┘ │ │ │ │ ┌───┴───┐ ┌───┴───┐ ┌───┴───┐ ┌───┴───┐ code offset code offset code offset code offset ▼ │ ▼ │ ▼ │ ▼ │ => action_head RVQ Decoder │ RVQ Decoder │ RVQ Decoder │ RVQ Decoder │ └── + ──┘ └── + ──┘ └── + ──┘ └── + ──┘ ▼ ▼ ▼ ▼ action chunk action chunk action chunk action chunk a_{t-n+1} ~ a_{t-n+2} ~ a_{t} ~ ... a_{t+p-1} ~ a_{t-n+c} a_{t-n+c+1} a_{t+c-1} a_{t+p+c-1} ▼ ONLY this chunk is used in rollout! """ def __init__(self, config: VQBeTConfig): super().__init__() self.config = config self.rgb_encoder = VQBeTRgbEncoder(config) self.num_images = len(self.config.image_features) # This action query token is used as a prompt for querying action chunks. Please refer to "A_Q" in the image above. # Note: During the forward pass, this token is repeated as many times as needed. The authors also experimented with initializing the necessary number of tokens independently and observed inferior results. self.action_token = nn.Parameter(torch.randn(1, 1, self.config.gpt_input_dim)) # To input state and observation features into GPT layers, we first project the features to fit the shape of input size of GPT. self.state_projector = MLP( config.robot_state_feature.shape[0], hidden_channels=[self.config.gpt_input_dim] ) self.rgb_feature_projector = MLP( self.rgb_encoder.feature_dim, hidden_channels=[self.config.gpt_input_dim] ) # GPT part of VQ-BeT self.policy = GPT(config) # bin prediction head / offset prediction head part of VQ-BeT self.action_head = VQBeTHead(config) # Action tokens for: each observation step, the current action token, and all future action tokens. num_tokens = self.config.n_action_pred_token + self.config.n_obs_steps - 1 self.register_buffer( "select_target_actions_indices", torch.row_stack([torch.arange(i, i + self.config.action_chunk_size) for i in range(num_tokens)]), ) def forward(self, batch: dict[str, Tensor], rollout: bool) -> tuple[dict, dict]: # Input validation. assert set(batch).issuperset({"observation.state", "observation.images"}) batch_size, n_obs_steps = batch["observation.state"].shape[:2] assert n_obs_steps == self.config.n_obs_steps # Extract image feature (first combine batch and sequence dims). img_features = self.rgb_encoder( einops.rearrange(batch["observation.images"], "b s n ... -> (b s n) ...") ) # Separate batch and sequence dims. img_features = einops.rearrange( img_features, "(b s n) ... -> b s n ...", b=batch_size, s=n_obs_steps, n=self.num_images ) # Arrange prior and current observation step tokens as shown in the class docstring. # First project features to token dimension. rgb_tokens = self.rgb_feature_projector( img_features ) # (batch, obs_step, number of different cameras, projection dims) input_tokens = [rgb_tokens[:, :, i] for i in range(rgb_tokens.size(2))] input_tokens.append( self.state_projector(batch["observation.state"]) ) # (batch, obs_step, projection dims) input_tokens.append(einops.repeat(self.action_token, "1 1 d -> b n d", b=batch_size, n=n_obs_steps)) # Interleave tokens by stacking and rearranging. input_tokens = torch.stack(input_tokens, dim=2) input_tokens = einops.rearrange(input_tokens, "b n t d -> b (n t) d") len_additional_action_token = self.config.n_action_pred_token - 1 future_action_tokens = self.action_token.repeat(batch_size, len_additional_action_token, 1) # add additional action query tokens for predicting future action chunks input_tokens = torch.cat([input_tokens, future_action_tokens], dim=1) # get action features (pass through GPT) features = self.policy(input_tokens) # len(self.config.input_features) is the number of different observation modes. # this line gets the index of action prompt tokens. historical_act_pred_index = np.arange(0, n_obs_steps) * (len(self.config.input_features) + 1) + len( self.config.input_features ) # only extract the output tokens at the position of action query: # Behavior Transformer (BeT), and VQ-BeT are both sequence-to-sequence prediction models, # mapping sequential observation to sequential action (please refer to section 2.2 in BeT paper https://huggingface.co/papers/2206.11251). # Thus, it predicts a historical action sequence, in addition to current and future actions (predicting future actions : optional). if len_additional_action_token > 0: features = torch.cat( [features[:, historical_act_pred_index], features[:, -len_additional_action_token:]], dim=1 ) else: features = features[:, historical_act_pred_index] # pass through action head action_head_output = self.action_head(features) # if rollout, VQ-BeT don't calculate loss if rollout: return action_head_output["predicted_action"][:, n_obs_steps - 1, :].reshape( batch_size, self.config.action_chunk_size, -1 ) # else, it calculate overall loss (bin prediction loss, and offset loss) else: output = batch[ACTION][:, self.select_target_actions_indices] loss = self.action_head.loss_fn(action_head_output, output, reduction="mean") return action_head_output, loss class VQBeTHead(nn.Module): def __init__(self, config: VQBeTConfig): """ VQBeTHead takes output of GPT layers, and pass the feature through bin prediction head (`self.map_to_cbet_preds_bin`), and offset prediction head (`self.map_to_cbet_preds_offset`) self.map_to_cbet_preds_bin: outputs probability of each code (for each layer). The input dimension of `self.map_to_cbet_preds_bin` is same with the output of GPT, and the output dimension of `self.map_to_cbet_preds_bin` is `self.vqvae_model.vqvae_num_layers (=fixed as 2) * self.config.vqvae_n_embed`. if the agent select the code sequentially, we use self.map_to_cbet_preds_primary_bin and self.map_to_cbet_preds_secondary_bin instead of self._map_to_cbet_preds_bin. self.map_to_cbet_preds_offset: output the predicted offsets for all the codes in all the layers. The input dimension of ` self.map_to_cbet_preds_offset` is same with the output of GPT, and the output dimension of ` self.map_to_cbet_preds_offset` is `self.vqvae_model.vqvae_num_layers (=fixed as 2) * self.config.vqvae_n_embed * config.action_chunk_size * config.action_feature.shape[0]`. """ super().__init__() self.config = config # init vqvae self.vqvae_model = VqVae(config) if config.sequentially_select: self.map_to_cbet_preds_primary_bin = MLP( in_channels=config.gpt_output_dim, hidden_channels=[self.config.vqvae_n_embed], ) self.map_to_cbet_preds_secondary_bin = MLP( in_channels=config.gpt_output_dim + self.config.vqvae_n_embed, hidden_channels=[self.config.vqvae_n_embed], ) else: self.map_to_cbet_preds_bin = MLP( in_channels=config.gpt_output_dim, hidden_channels=[self.vqvae_model.vqvae_num_layers * self.config.vqvae_n_embed], ) self.map_to_cbet_preds_offset = MLP( in_channels=config.gpt_output_dim, hidden_channels=[ self.vqvae_model.vqvae_num_layers * self.config.vqvae_n_embed * config.action_chunk_size * config.action_feature.shape[0], ], ) # loss self._focal_loss_fn = FocalLoss(gamma=2.0) def discretize(self, n_vqvae_training_steps, actions): # Resize the action sequence data to fit the action chunk size using a sliding window approach. actions = torch.cat( [ actions[:, j : j + self.config.action_chunk_size, :] for j in range(actions.shape[1] + 1 - self.config.action_chunk_size) ], dim=0, ) # `actions` is a tensor of shape (new_batch, action_chunk_size, action_dim) where new_batch is the number of possible chunks created from the original sequences using the sliding window. loss, metric = self.vqvae_model.vqvae_forward(actions) n_different_codes = sum( [len(torch.unique(metric[2][:, i])) for i in range(self.vqvae_model.vqvae_num_layers)] ) n_different_combinations = len(torch.unique(metric[2], dim=0)) recon_l1_error = metric[0].detach().cpu().item() self.vqvae_model.optimized_steps += 1 # if we updated RVQ more than `n_vqvae_training_steps` steps, we freeze the RVQ part. if self.vqvae_model.optimized_steps >= n_vqvae_training_steps: self.vqvae_model.discretized = torch.tensor(True) self.vqvae_model.vq_layer.freeze_codebook = torch.tensor(True) print("Finished discretizing action data!") self.vqvae_model.eval() for param in self.vqvae_model.vq_layer.parameters(): param.requires_grad = False return loss, n_different_codes, n_different_combinations, recon_l1_error def forward(self, x, **kwargs) -> dict: # N is the batch size, and T is number of action query tokens, which are process through same GPT N, T, _ = x.shape # we calculate N and T side parallelly. Thus, the dimensions would be # (batch size * number of action query tokens, action chunk size, action dimension) x = einops.rearrange(x, "N T WA -> (N T) WA") # sample offsets cbet_offsets = self.map_to_cbet_preds_offset(x) cbet_offsets = einops.rearrange( cbet_offsets, "(NT) (G C WA) -> (NT) G C WA", G=self.vqvae_model.vqvae_num_layers, C=self.config.vqvae_n_embed, ) # if self.config.sequentially_select is True, bin prediction head first sample the primary code, and then sample secondary code if self.config.sequentially_select: cbet_primary_logits = self.map_to_cbet_preds_primary_bin(x) # select primary bin first cbet_primary_probs = torch.softmax( cbet_primary_logits / self.config.bet_softmax_temperature, dim=-1 ) NT, choices = cbet_primary_probs.shape sampled_primary_centers = einops.rearrange( torch.multinomial(cbet_primary_probs.view(-1, choices), num_samples=1), "(NT) 1 -> NT", NT=NT, ) cbet_secondary_logits = self.map_to_cbet_preds_secondary_bin( torch.cat( (x, F.one_hot(sampled_primary_centers, num_classes=self.config.vqvae_n_embed)), axis=1, ) ) cbet_secondary_probs = torch.softmax( cbet_secondary_logits / self.config.bet_softmax_temperature, dim=-1 ) sampled_secondary_centers = einops.rearrange( torch.multinomial(cbet_secondary_probs.view(-1, choices), num_samples=1), "(NT) 1 -> NT", NT=NT, ) sampled_centers = torch.stack((sampled_primary_centers, sampled_secondary_centers), axis=1) cbet_logits = torch.stack([cbet_primary_logits, cbet_secondary_logits], dim=1) # if self.config.sequentially_select is False, bin prediction head samples primary and secondary code at once. else: cbet_logits = self.map_to_cbet_preds_bin(x) cbet_logits = einops.rearrange( cbet_logits, "(NT) (G C) -> (NT) G C", G=self.vqvae_model.vqvae_num_layers ) cbet_probs = torch.softmax(cbet_logits / self.config.bet_softmax_temperature, dim=-1) NT, G, choices = cbet_probs.shape sampled_centers = einops.rearrange( torch.multinomial(cbet_probs.view(-1, choices), num_samples=1), "(NT G) 1 -> NT G", NT=NT, ) device = get_device_from_parameters(self) indices = ( torch.arange(NT, device=device).unsqueeze(1), torch.arange(self.vqvae_model.vqvae_num_layers, device=device).unsqueeze(0), sampled_centers, ) # Use advanced indexing to sample the values (Extract the only offsets corresponding to the sampled codes.) sampled_offsets = cbet_offsets[indices] # Then, sum the offsets over the RVQ layers to get a net offset for the bin prediction sampled_offsets = sampled_offsets.sum(dim=1) with torch.no_grad(): # Get the centroids (= vectors corresponding to the codes) of each layer to pass it through RVQ decoder return_decoder_input = self.vqvae_model.get_embeddings_from_code(sampled_centers).clone().detach() # pass the centroids through decoder to get actions. decoded_action = self.vqvae_model.get_action_from_latent(return_decoder_input).clone().detach() # reshaped extracted offset to match with decoded centroids sampled_offsets = einops.rearrange( sampled_offsets, "NT (W A) -> NT W A", W=self.config.action_chunk_size ) # add offset and decoded centroids predicted_action = decoded_action + sampled_offsets predicted_action = einops.rearrange( predicted_action, "(N T) W A -> N T (W A)", N=N, T=T, W=self.config.action_chunk_size, ) return { "cbet_logits": cbet_logits, "predicted_action": predicted_action, "sampled_centers": sampled_centers, "decoded_action": decoded_action, } def loss_fn(self, pred, target, **kwargs): """ for given ground truth action values (target), and prediction (pred) this function calculates the overall loss. predicted_action: predicted action chunk (offset + decoded centroids) sampled_centers: sampled centroids (code of RVQ) decoded_action: decoded action, which is produced by passing sampled_centers through RVQ decoder NT: batch size * T T: number of action query tokens, which are process through same GPT cbet_logits: probability of all codes in each layer """ action_seq = target predicted_action = pred["predicted_action"] sampled_centers = pred["sampled_centers"] decoded_action = pred["decoded_action"] NT = predicted_action.shape[0] * predicted_action.shape[1] cbet_logits = pred["cbet_logits"] predicted_action = einops.rearrange( predicted_action, "N T (W A) -> (N T) W A", W=self.config.action_chunk_size ) action_seq = einops.rearrange(action_seq, "N T W A -> (N T) W A") # Figure out the loss for the actions. # First, we need to find the closest cluster center for each ground truth action. with torch.no_grad(): state_vq, action_bins = self.vqvae_model.get_code(action_seq) # action_bins: NT, G # Now we can compute the loss. # offset loss is L1 distance between the predicted action and ground truth action offset_loss = F.l1_loss(action_seq, predicted_action) # calculate primary code prediction loss cbet_loss1 = self._focal_loss_fn( cbet_logits[:, 0, :], action_bins[:, 0], ) # calculate secondary code prediction loss cbet_loss2 = self._focal_loss_fn( cbet_logits[:, 1, :], action_bins[:, 1], ) # add all the prediction loss cbet_loss = ( cbet_loss1 * self.config.primary_code_loss_weight + cbet_loss2 * self.config.secondary_code_loss_weight ) equal_primary_code_rate = torch.sum((action_bins[:, 0] == sampled_centers[:, 0]).int()) / (NT) equal_secondary_code_rate = torch.sum((action_bins[:, 1] == sampled_centers[:, 1]).int()) / (NT) action_mse_error = torch.mean((action_seq - predicted_action) ** 2) vq_action_error = torch.mean(torch.abs(action_seq - decoded_action)) offset_action_error = torch.mean(torch.abs(action_seq - predicted_action)) action_error_max = torch.max(torch.abs(action_seq - predicted_action)) loss = cbet_loss + self.config.offset_loss_weight * offset_loss loss_dict = { "loss": loss, "classification_loss": cbet_loss.detach().cpu().item(), "offset_loss": offset_loss.detach().cpu().item(), "equal_primary_code_rate": equal_primary_code_rate.detach().cpu().item(), "equal_secondary_code_rate": equal_secondary_code_rate.detach().cpu().item(), "vq_action_error": vq_action_error.detach().cpu().item(), "offset_action_error": offset_action_error.detach().cpu().item(), "action_error_max": action_error_max.detach().cpu().item(), "action_mse_error": action_mse_error.detach().cpu().item(), } return loss_dict class VQBeTRgbEncoder(nn.Module): """Encode an RGB image into a 1D feature vector. Includes the ability to normalize and crop the image first. Same with DiffusionRgbEncoder from modeling_diffusion.py """ def __init__(self, config: VQBeTConfig): super().__init__() # Set up optional preprocessing. if config.crop_shape is not None: self.do_crop = True # Always use center crop for eval self.center_crop = torchvision.transforms.CenterCrop(config.crop_shape) if config.crop_is_random: self.maybe_random_crop = torchvision.transforms.RandomCrop(config.crop_shape) else: self.maybe_random_crop = self.center_crop else: self.do_crop = False # Set up backbone. backbone_model = getattr(torchvision.models, config.vision_backbone)( weights=config.pretrained_backbone_weights ) # Note: This assumes that the layer4 feature map is children()[-3] # TODO(alexander-soare): Use a safer alternative. self.backbone = nn.Sequential(*(list(backbone_model.children())[:-2])) if config.use_group_norm: if config.pretrained_backbone_weights: raise ValueError( "You can't replace BatchNorm in a pretrained model without ruining the weights!" ) self.backbone = _replace_submodules( root_module=self.backbone, predicate=lambda x: isinstance(x, nn.BatchNorm2d), func=lambda x: nn.GroupNorm(num_groups=x.num_features // 16, num_channels=x.num_features), ) # Set up pooling and final layers. # Use a dry run to get the feature map shape. # The dummy input should take the number of image channels from `config.image_features` and it should # use the height and width from `config.crop_shape` if it is provided, otherwise it should use the # height and width from `config.image_features`. images_shape = next(iter(config.image_features.values())).shape dummy_shape_h_w = config.crop_shape if config.crop_shape is not None else images_shape[1:] dummy_shape = (1, images_shape[0], *dummy_shape_h_w) feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:] self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) self.feature_dim = config.spatial_softmax_num_keypoints * 2 self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim) self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: """ Args: x: (B, C, H, W) image tensor with pixel values in [0, 1]. Returns: (B, D) image feature. """ # Preprocess: maybe crop (if it was set up in the __init__). if self.do_crop: if self.training: # noqa: SIM108 x = self.maybe_random_crop(x) else: # Always use center crop for eval. x = self.center_crop(x) # Extract backbone feature. x = torch.flatten(self.pool(self.backbone(x)), start_dim=1) # Final linear layer with non-linearity. x = self.relu(self.out(x)) return x def _replace_submodules( root_module: nn.Module, predicate: Callable[[nn.Module], bool], func: Callable[[nn.Module], nn.Module] ) -> nn.Module: """ Args: root_module: The module for which the submodules need to be replaced predicate: Takes a module as an argument and must return True if the that module is to be replaced. func: Takes a module as an argument and returns a new module to replace it with. Returns: The root module with its submodules replaced. """ if predicate(root_module): return func(root_module) replace_list = [k.split(".") for k, m in root_module.named_modules(remove_duplicate=True) if predicate(m)] for *parents, k in replace_list: parent_module = root_module if len(parents) > 0: parent_module = root_module.get_submodule(".".join(parents)) if isinstance(parent_module, nn.Sequential): src_module = parent_module[int(k)] else: src_module = getattr(parent_module, k) tgt_module = func(src_module) if isinstance(parent_module, nn.Sequential): parent_module[int(k)] = tgt_module else: setattr(parent_module, k, tgt_module) # verify that all BN are replaced assert not any(predicate(m) for _, m in root_module.named_modules(remove_duplicate=True)) return root_module class VqVae(nn.Module): def __init__( self, config: VQBeTConfig, ): """ VQ-VAE is composed of three parts: encoder, vq_layer, and decoder. Encoder and decoder are MLPs consisting of an input, output layer, and hidden layer, respectively. The vq_layer uses residual VQs. This class contains functions for training the encoder and decoder along with the residual VQ layer (for training phase 1), as well as functions to help BeT training part in training phase 2. """ super().__init__() self.config = config # 'discretized' indicates whether the Residual VQ part is trained or not. (After finishing the training, we set discretized=True) self.register_buffer("discretized", torch.tensor(False)) self.optimized_steps = 0 # we use the fixed number of layers for Residual VQ across all environments. self.vqvae_num_layers = 2 self.vq_layer = ResidualVQ( dim=config.vqvae_embedding_dim, num_quantizers=self.vqvae_num_layers, codebook_size=config.vqvae_n_embed, ) self.encoder = MLP( in_channels=self.config.action_feature.shape[0] * self.config.action_chunk_size, hidden_channels=[ config.vqvae_enc_hidden_dim, config.vqvae_enc_hidden_dim, config.vqvae_embedding_dim, ], ) self.decoder = MLP( in_channels=config.vqvae_embedding_dim, hidden_channels=[ config.vqvae_enc_hidden_dim, config.vqvae_enc_hidden_dim, self.config.action_feature.shape[0] * self.config.action_chunk_size, ], ) def get_embeddings_from_code(self, encoding_indices): # This function gets code indices as inputs, and outputs embedding vectors corresponding to the code indices. with torch.no_grad(): z_embed = self.vq_layer.get_codebook_vector_from_indices(encoding_indices) # since the RVQ has multiple layers, it adds the vectors in the axis of layers to provide a vector for that code combination. z_embed = z_embed.sum(dim=0) return z_embed def get_action_from_latent(self, latent): # given latent vector, this function outputs the decoded action. output = self.decoder(latent) if self.config.action_chunk_size == 1: return einops.rearrange(output, "N (T A) -> N T A", A=self.config.action_feature.shape[0]) else: return einops.rearrange(output, "N (T A) -> N T A", A=self.config.action_feature.shape[0]) def get_code(self, state): # in phase 2 of VQ-BeT training, we need a `ground truth labels of action data` to calculate the Focal loss for code prediction head. (please refer to section 3.3 in the paper https://huggingface.co/papers/2403.03181) # this function outputs the `GT code` of given action using frozen encoder and quantization layers. (please refer to Figure 2. in the paper https://huggingface.co/papers/2403.03181) state = einops.rearrange(state, "N T A -> N (T A)") with torch.no_grad(): state_rep = self.encoder(state) state_rep_shape = state_rep.shape[:-1] state_rep_flat = state_rep.view(state_rep.size(0), -1, state_rep.size(1)) state_rep_flat, vq_code, vq_loss_state = self.vq_layer(state_rep_flat) state_vq = state_rep_flat.view(*state_rep_shape, -1) vq_code = vq_code.view(*state_rep_shape, -1) vq_loss_state = torch.sum(vq_loss_state) return state_vq, vq_code def vqvae_forward(self, state): # This function passes the given data through Residual VQ with Encoder and Decoder. Please refer to section 3.2 in the paper https://huggingface.co/papers/2403.03181). state = einops.rearrange(state, "N T A -> N (T A)") # We start with passing action (or action chunk) at:t+n through the encoder ϕ. state_rep = self.encoder(state) state_rep_shape = state_rep.shape[:-1] state_rep_flat = state_rep.view(state_rep.size(0), -1, state_rep.size(1)) # The resulting latent embedding vector x = ϕ(at:t+n) is then mapped to an embedding vector in the codebook of the RVQ layers by the nearest neighbor look-up. state_rep_flat, vq_code, vq_loss_state = self.vq_layer(state_rep_flat) state_vq = state_rep_flat.view(*state_rep_shape, -1) vq_code = vq_code.view(*state_rep_shape, -1) # since the RVQ has multiple layers, it adds the vectors in the axis of layers to provide a vector for that code combination. vq_loss_state = torch.sum(vq_loss_state) # Then, the discretized vector zq(x) is reconstructed as ψ(zq(x)) by passing through the decoder ψ. dec_out = self.decoder(state_vq) # Calculate L1 reconstruction loss encoder_loss = (state - dec_out).abs().mean() # add encoder reconstruction loss and commitment loss rep_loss = encoder_loss + vq_loss_state * 5 metric = ( encoder_loss.clone().detach(), vq_loss_state.clone().detach(), vq_code, rep_loss.item(), ) return rep_loss, metric class FocalLoss(nn.Module): """ From https://github.com/notmahi/miniBET/blob/main/behavior_transformer/bet.py """ def __init__(self, gamma: float = 0, size_average: bool = True): super().__init__() self.gamma = gamma self.size_average = size_average def forward(self, input, target): if len(input.shape) == 3: N, T, _ = input.shape logpt = F.log_softmax(input, dim=-1) logpt = logpt.gather(-1, target.view(N, T, 1)).view(N, T) elif len(input.shape) == 2: logpt = F.log_softmax(input, dim=-1) logpt = logpt.gather(-1, target.view(-1, 1)).view(-1) pt = logpt.exp() loss = -1 * (1 - pt) ** self.gamma * logpt if self.size_average: return loss.mean() else: return loss.sum() class MLP(torch.nn.Sequential): def __init__( self, in_channels: int, hidden_channels: list[int], ): layers = [] in_dim = in_channels for hidden_dim in hidden_channels[:-1]: layers.append(torch.nn.Linear(in_dim, hidden_dim)) layers.append(torch.nn.ReLU()) in_dim = hidden_dim layers.append(torch.nn.Linear(in_dim, hidden_channels[-1])) super().__init__(*layers)
lerobot/src/lerobot/policies/vqbet/modeling_vqbet.py/0
{ "file_path": "lerobot/src/lerobot/policies/vqbet/modeling_vqbet.py", "repo_id": "lerobot", "token_count": 20389 }
220
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from lerobot.cameras import CameraConfig from ..config import RobotConfig @RobotConfig.register_subclass("hope_jr_hand") @dataclass class HopeJrHandConfig(RobotConfig): port: str # Port to connect to the hand side: str # "left" / "right" disable_torque_on_disconnect: bool = True cameras: dict[str, CameraConfig] = field(default_factory=dict) def __post_init__(self): super().__post_init__() if self.side not in ["right", "left"]: raise ValueError(self.side) @RobotConfig.register_subclass("hope_jr_arm") @dataclass class HopeJrArmConfig(RobotConfig): port: str # Port to connect to the hand disable_torque_on_disconnect: bool = True # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as # the number of motors in your follower arms. max_relative_target: int | None = None cameras: dict[str, CameraConfig] = field(default_factory=dict)
lerobot/src/lerobot/robots/hope_jr/config_hope_jr.py/0
{ "file_path": "lerobot/src/lerobot/robots/hope_jr/config_hope_jr.py", "repo_id": "lerobot", "token_count": 554 }
221
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from lerobot.cameras import CameraConfig from ..config import RobotConfig @RobotConfig.register_subclass("so100_follower") @dataclass class SO100FollowerConfig(RobotConfig): # Port to connect to the arm port: str disable_torque_on_disconnect: bool = True # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as # the number of motors in your follower arms. max_relative_target: int | None = None # cameras cameras: dict[str, CameraConfig] = field(default_factory=dict) # Set to `True` for backward compatibility with previous policies/dataset use_degrees: bool = False @RobotConfig.register_subclass("so100_follower_end_effector") @dataclass class SO100FollowerEndEffectorConfig(SO100FollowerConfig): """Configuration for the SO100FollowerEndEffector robot.""" # Path to URDF file for kinematics # NOTE: It is highly recommended to use the urdf in the SO-ARM100 repo: # https://github.com/TheRobotStudio/SO-ARM100/blob/main/Simulation/SO101/so101_new_calib.urdf urdf_path: str | None = None # End-effector frame name in the URDF target_frame_name: str = "gripper_frame_link" # Default bounds for the end-effector position (in meters) end_effector_bounds: dict[str, list[float]] = field( default_factory=lambda: { "min": [-1.0, -1.0, -1.0], # min x, y, z "max": [1.0, 1.0, 1.0], # max x, y, z } ) max_gripper_pos: float = 50 end_effector_step_sizes: dict[str, float] = field( default_factory=lambda: { "x": 0.02, "y": 0.02, "z": 0.02, } )
lerobot/src/lerobot/robots/so100_follower/config_so100_follower.py/0
{ "file_path": "lerobot/src/lerobot/robots/so100_follower/config_so100_follower.py", "repo_id": "lerobot", "token_count": 882 }
222
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import time from functools import cached_property from typing import Any from lerobot.cameras.utils import make_cameras_from_configs from lerobot.constants import OBS_STATE from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from lerobot.motors import Motor, MotorCalibration, MotorNormMode from lerobot.motors.dynamixel import ( DynamixelMotorsBus, OperatingMode, ) from ..robot import Robot from ..utils import ensure_safe_goal_position from .config_viperx import ViperXConfig logger = logging.getLogger(__name__) class ViperX(Robot): """ [ViperX](https://www.trossenrobotics.com/viperx-300) developed by Trossen Robotics """ config_class = ViperXConfig name = "viperx" def __init__( self, config: ViperXConfig, ): raise NotImplementedError super().__init__(config) self.config = config self.bus = DynamixelMotorsBus( port=self.config.port, motors={ "waist": Motor(1, "xm540-w270", MotorNormMode.RANGE_M100_100), "shoulder": Motor(2, "xm540-w270", MotorNormMode.RANGE_M100_100), "shoulder_shadow": Motor(3, "xm540-w270", MotorNormMode.RANGE_M100_100), "elbow": Motor(4, "xm540-w270", MotorNormMode.RANGE_M100_100), "elbow_shadow": Motor(5, "xm540-w270", MotorNormMode.RANGE_M100_100), "forearm_roll": Motor(6, "xm540-w270", MotorNormMode.RANGE_M100_100), "wrist_angle": Motor(7, "xm540-w270", MotorNormMode.RANGE_M100_100), "wrist_rotate": Motor(8, "xm430-w350", MotorNormMode.RANGE_M100_100), "gripper": Motor(9, "xm430-w350", MotorNormMode.RANGE_0_100), }, ) self.cameras = make_cameras_from_configs(config.cameras) @property def _motors_ft(self) -> dict[str, type]: return {f"{motor}.pos": float for motor in self.bus.motors} @property def _cameras_ft(self) -> dict[str, tuple]: return { cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras } @cached_property def observation_features(self) -> dict[str, type | tuple]: return {**self._motors_ft, **self._cameras_ft} @cached_property def action_features(self) -> dict[str, type]: return self._motors_ft @property def is_connected(self) -> bool: return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) def connect(self, calibrate: bool = True) -> None: """ We assume that at connection time, arm is in a rest position, and torque can be safely disabled to run calibration. """ if self.is_connected: raise DeviceAlreadyConnectedError(f"{self} already connected") self.bus.connect() if not self.is_calibrated and calibrate: self.calibrate() for cam in self.cameras.values(): cam.connect() self.configure() logger.info(f"{self} connected.") @property def is_calibrated(self) -> bool: return self.bus.is_calibrated def calibrate(self) -> None: raise NotImplementedError # TODO(aliberts): adapt code below (copied from koch logger.info(f"\nRunning calibration of {self}") self.bus.disable_torque() for motor in self.bus.motors: self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value) input("Move robot to the middle of its range of motion and press ENTER....") homing_offsets = self.bus.set_half_turn_homings() full_turn_motors = ["shoulder_pan", "wrist_roll"] unknown_range_motors = [motor for motor in self.bus.motors if motor not in full_turn_motors] print( f"Move all joints except {full_turn_motors} sequentially through their entire " "ranges of motion.\nRecording positions. Press ENTER to stop..." ) range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors) for motor in full_turn_motors: range_mins[motor] = 0 range_maxes[motor] = 4095 self.calibration = {} for motor, m in self.bus.motors.items(): self.calibration[motor] = MotorCalibration( id=m.id, drive_mode=0, homing_offset=homing_offsets[motor], range_min=range_mins[motor], range_max=range_maxes[motor], ) self.bus.write_calibration(self.calibration) self._save_calibration() logger.info(f"Calibration saved to {self.calibration_fpath}") def configure(self) -> None: with self.bus.torque_disabled(): self.bus.configure_motors() # Set secondary/shadow ID for shoulder and elbow. These joints have two motors. # As a result, if only one of them is required to move to a certain position, # the other will follow. This is to avoid breaking the motors. self.bus.write("Secondary_ID", "shoulder_shadow", 2) self.bus.write("Secondary_ID", "elbow_shadow", 4) # Set a velocity limit of 131 as advised by Trossen Robotics # TODO(aliberts): remove as it's actually useless in position control self.bus.write("Velocity_Limit", 131) # Use 'extended position mode' for all motors except gripper, because in joint mode the servos # can't rotate more than 360 degrees (from 0 to 4095) And some mistake can happen while assembling # the arm, you could end up with a servo with a position 0 or 4095 at a crucial point. # See: https://emanual.robotis.com/docs/en/dxl/x/x_series/#operating-mode11 for motor in self.bus.motors: if motor != "gripper": self.bus.write("Operating_Mode", motor, OperatingMode.EXTENDED_POSITION.value) # Use 'position control current based' for follower gripper to be limited by the limit of the # current. It can grasp an object without forcing too much even tho, it's goal position is a # complete grasp (both gripper fingers are ordered to join and reach a touch). self.bus.write("Operating_Mode", "gripper", OperatingMode.CURRENT_POSITION.value) def get_observation(self) -> dict[str, Any]: """The returned observations do not have a batch dimension.""" if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") obs_dict = {} # Read arm position start = time.perf_counter() obs_dict[OBS_STATE] = self.bus.sync_read("Present_Position") obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()} dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read state: {dt_ms:.1f}ms") # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() obs_dict[cam_key] = cam.async_read() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") return obs_dict def send_action(self, action: dict[str, float]) -> dict[str, float]: """Command arm to move to a target joint configuration. The relative action magnitude may be clipped depending on the configuration parameter `max_relative_target`. In this case, the action sent differs from original action. Thus, this function always returns the action actually sent. Args: action (dict[str, float]): The goal positions for the motors. Returns: dict[str, float]: The action sent to the motors, potentially clipped. """ if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")} # Cap goal position when too far away from present position. # /!\ Slower fps expected due to reading from the follower. if self.config.max_relative_target is not None: present_pos = self.bus.sync_read("Present_Position") goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()} goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target) # Send goal position to the arm self.bus.sync_write("Goal_Position", goal_pos) return {f"{motor}.pos": val for motor, val in goal_pos.items()} def disconnect(self): if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") self.bus.disconnect(self.config.disable_torque_on_disconnect) for cam in self.cameras.values(): cam.disconnect() logger.info(f"{self} disconnected.")
lerobot/src/lerobot/robots/viperx/viperx.py/0
{ "file_path": "lerobot/src/lerobot/robots/viperx/viperx.py", "repo_id": "lerobot", "token_count": 4038 }
223
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Visualize data of **all** frames of any episode of a dataset of type LeRobotDataset. Note: The last frame of the episode doesn't always correspond to a final state. That's because our datasets are composed of transition from state to state up to the antepenultimate state associated to the ultimate action to arrive in the final state. However, there might not be a transition from a final state to another state. Note: This script aims to visualize the data used to train the neural networks. ~What you see is what you get~. When visualizing image modality, it is often expected to observe lossy compression artifacts since these images have been decoded from compressed mp4 videos to save disk space. The compression factor applied has been tuned to not affect success rate. Examples: - Visualize data stored on a local machine: ``` local$ python -m lerobot.scripts.visualize_dataset \ --repo-id lerobot/pusht \ --episode-index 0 ``` - Visualize data stored on a distant machine with a local viewer: ``` distant$ python -m lerobot.scripts.visualize_dataset \ --repo-id lerobot/pusht \ --episode-index 0 \ --save 1 \ --output-dir path/to/directory local$ scp distant:path/to/directory/lerobot_pusht_episode_0.rrd . local$ rerun lerobot_pusht_episode_0.rrd ``` - Visualize data stored on a distant machine through streaming: (You need to forward the websocket port to the distant machine, with `ssh -L 9087:localhost:9087 username@remote-host`) ``` distant$ python -m lerobot.scripts.visualize_dataset \ --repo-id lerobot/pusht \ --episode-index 0 \ --mode distant \ --ws-port 9087 local$ rerun ws://localhost:9087 ``` """ import argparse import gc import logging import time from collections.abc import Iterator from pathlib import Path import numpy as np import rerun as rr import torch import torch.utils.data import tqdm from lerobot.datasets.lerobot_dataset import LeRobotDataset class EpisodeSampler(torch.utils.data.Sampler): def __init__(self, dataset: LeRobotDataset, episode_index: int): from_idx = dataset.episode_data_index["from"][episode_index].item() to_idx = dataset.episode_data_index["to"][episode_index].item() self.frame_ids = range(from_idx, to_idx) def __iter__(self) -> Iterator: return iter(self.frame_ids) def __len__(self) -> int: return len(self.frame_ids) def to_hwc_uint8_numpy(chw_float32_torch: torch.Tensor) -> np.ndarray: assert chw_float32_torch.dtype == torch.float32 assert chw_float32_torch.ndim == 3 c, h, w = chw_float32_torch.shape assert c < h and c < w, f"expect channel first images, but instead {chw_float32_torch.shape}" hwc_uint8_numpy = (chw_float32_torch * 255).type(torch.uint8).permute(1, 2, 0).numpy() return hwc_uint8_numpy def visualize_dataset( dataset: LeRobotDataset, episode_index: int, batch_size: int = 32, num_workers: int = 0, mode: str = "local", web_port: int = 9090, ws_port: int = 9087, save: bool = False, output_dir: Path | None = None, ) -> Path | None: if save: assert output_dir is not None, ( "Set an output directory where to write .rrd files with `--output-dir path/to/directory`." ) repo_id = dataset.repo_id logging.info("Loading dataloader") episode_sampler = EpisodeSampler(dataset, episode_index) dataloader = torch.utils.data.DataLoader( dataset, num_workers=num_workers, batch_size=batch_size, sampler=episode_sampler, ) logging.info("Starting Rerun") if mode not in ["local", "distant"]: raise ValueError(mode) spawn_local_viewer = mode == "local" and not save rr.init(f"{repo_id}/episode_{episode_index}", spawn=spawn_local_viewer) # Manually call python garbage collector after `rr.init` to avoid hanging in a blocking flush # when iterating on a dataloader with `num_workers` > 0 # TODO(rcadene): remove `gc.collect` when rerun version 0.16 is out, which includes a fix gc.collect() if mode == "distant": rr.serve(open_browser=False, web_port=web_port, ws_port=ws_port) logging.info("Logging to Rerun") for batch in tqdm.tqdm(dataloader, total=len(dataloader)): # iterate over the batch for i in range(len(batch["index"])): rr.set_time_sequence("frame_index", batch["frame_index"][i].item()) rr.set_time_seconds("timestamp", batch["timestamp"][i].item()) # display each camera image for key in dataset.meta.camera_keys: # TODO(rcadene): add `.compress()`? is it lossless? rr.log(key, rr.Image(to_hwc_uint8_numpy(batch[key][i]))) # display each dimension of action space (e.g. actuators command) if "action" in batch: for dim_idx, val in enumerate(batch["action"][i]): rr.log(f"action/{dim_idx}", rr.Scalar(val.item())) # display each dimension of observed state space (e.g. agent position in joint space) if "observation.state" in batch: for dim_idx, val in enumerate(batch["observation.state"][i]): rr.log(f"state/{dim_idx}", rr.Scalar(val.item())) if "next.done" in batch: rr.log("next.done", rr.Scalar(batch["next.done"][i].item())) if "next.reward" in batch: rr.log("next.reward", rr.Scalar(batch["next.reward"][i].item())) if "next.success" in batch: rr.log("next.success", rr.Scalar(batch["next.success"][i].item())) if mode == "local" and save: # save .rrd locally output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) repo_id_str = repo_id.replace("/", "_") rrd_path = output_dir / f"{repo_id_str}_episode_{episode_index}.rrd" rr.save(rrd_path) return rrd_path elif mode == "distant": # stop the process from exiting since it is serving the websocket connection try: while True: time.sleep(1) except KeyboardInterrupt: print("Ctrl-C received. Exiting.") def main(): parser = argparse.ArgumentParser() parser.add_argument( "--repo-id", type=str, required=True, help="Name of hugging face repository containing a LeRobotDataset dataset (e.g. `lerobot/pusht`).", ) parser.add_argument( "--episode-index", type=int, required=True, help="Episode to visualize.", ) parser.add_argument( "--root", type=Path, default=None, help="Root directory for the dataset stored locally (e.g. `--root data`). By default, the dataset will be loaded from hugging face cache folder, or downloaded from the hub if available.", ) parser.add_argument( "--output-dir", type=Path, default=None, help="Directory path to write a .rrd file when `--save 1` is set.", ) parser.add_argument( "--batch-size", type=int, default=32, help="Batch size loaded by DataLoader.", ) parser.add_argument( "--num-workers", type=int, default=4, help="Number of processes of Dataloader for loading the data.", ) parser.add_argument( "--mode", type=str, default="local", help=( "Mode of viewing between 'local' or 'distant'. " "'local' requires data to be on a local machine. It spawns a viewer to visualize the data locally. " "'distant' creates a server on the distant machine where the data is stored. " "Visualize the data by connecting to the server with `rerun ws://localhost:PORT` on the local machine." ), ) parser.add_argument( "--web-port", type=int, default=9090, help="Web port for rerun.io when `--mode distant` is set.", ) parser.add_argument( "--ws-port", type=int, default=9087, help="Web socket port for rerun.io when `--mode distant` is set.", ) parser.add_argument( "--save", type=int, default=0, help=( "Save a .rrd file in the directory provided by `--output-dir`. " "It also deactivates the spawning of a viewer. " "Visualize the data by running `rerun path/to/file.rrd` on your local machine." ), ) parser.add_argument( "--tolerance-s", type=float, default=1e-4, help=( "Tolerance in seconds used to ensure data timestamps respect the dataset fps value" "This is argument passed to the constructor of LeRobotDataset and maps to its tolerance_s constructor argument" "If not given, defaults to 1e-4." ), ) args = parser.parse_args() kwargs = vars(args) repo_id = kwargs.pop("repo_id") root = kwargs.pop("root") tolerance_s = kwargs.pop("tolerance_s") logging.info("Loading dataset") dataset = LeRobotDataset(repo_id, root=root, tolerance_s=tolerance_s) visualize_dataset(dataset, **vars(args)) if __name__ == "__main__": main()
lerobot/src/lerobot/scripts/visualize_dataset.py/0
{ "file_path": "lerobot/src/lerobot/scripts/visualize_dataset.py", "repo_id": "lerobot", "token_count": 4084 }
224
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import threading from collections import deque from pprint import pformat from typing import Deque import serial from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from lerobot.motors.motors_bus import MotorCalibration, MotorNormMode from lerobot.utils.utils import enter_pressed, move_cursor_up from ..teleoperator import Teleoperator from .config_homunculus import HomunculusArmConfig logger = logging.getLogger(__name__) class HomunculusArm(Teleoperator): """ Homunculus Arm designed by Hugging Face. """ config_class = HomunculusArmConfig name = "homunculus_arm" def __init__(self, config: HomunculusArmConfig): super().__init__(config) self.config = config self.serial = serial.Serial(config.port, config.baud_rate, timeout=1) self.serial_lock = threading.Lock() self.joints = { "shoulder_pitch": MotorNormMode.RANGE_M100_100, "shoulder_yaw": MotorNormMode.RANGE_M100_100, "shoulder_roll": MotorNormMode.RANGE_M100_100, "elbow_flex": MotorNormMode.RANGE_M100_100, "wrist_roll": MotorNormMode.RANGE_M100_100, "wrist_yaw": MotorNormMode.RANGE_M100_100, "wrist_pitch": MotorNormMode.RANGE_M100_100, } n = 50 # EMA parameters --------------------------------------------------- self.n: int = n self.alpha: float = 2 / (n + 1) # one deque *per joint* so we can inspect raw history if needed self._buffers: dict[str, Deque[int]] = { joint: deque(maxlen=n) for joint in ( "shoulder_pitch", "shoulder_yaw", "shoulder_roll", "elbow_flex", "wrist_roll", "wrist_yaw", "wrist_pitch", ) } # running EMA value per joint – lazily initialised on first read self._ema: dict[str, float | None] = dict.fromkeys(self._buffers) self._state: dict[str, float] | None = None self.new_state_event = threading.Event() self.stop_event = threading.Event() self.thread = threading.Thread(target=self._read_loop, daemon=True, name=f"{self} _read_loop") self.state_lock = threading.Lock() @property def action_features(self) -> dict: return {f"{joint}.pos": float for joint in self.joints} @property def feedback_features(self) -> dict: return {} @property def is_connected(self) -> bool: with self.serial_lock: return self.serial.is_open and self.thread.is_alive() def connect(self, calibrate: bool = True) -> None: if self.is_connected: raise DeviceAlreadyConnectedError(f"{self} already connected") if not self.serial.is_open: self.serial.open() self.thread.start() # wait for the thread to ramp up & 1st state to be ready if not self.new_state_event.wait(timeout=2): raise TimeoutError(f"{self}: Timed out waiting for state after 2s.") if not self.is_calibrated and calibrate: self.calibrate() logger.info(f"{self} connected.") @property def is_calibrated(self) -> bool: return self.calibration_fpath.is_file() def calibrate(self) -> None: print( "\nMove all joints through their entire range of motion." "\nRecording positions. Press ENTER to stop..." ) range_mins, range_maxes = self._record_ranges_of_motion() self.calibration = {} for id_, joint in enumerate(self.joints): self.calibration[joint] = MotorCalibration( id=id_, drive_mode=0, homing_offset=0, range_min=range_mins[joint], range_max=range_maxes[joint], ) self._save_calibration() print("Calibration saved to", self.calibration_fpath) # TODO(Steven): This function is copy/paste from the `HomunculusGlove` class. Consider moving it to an utility to reduce duplicated code. def _record_ranges_of_motion( self, joints: list[str] | None = None, display_values: bool = True ) -> tuple[dict[str, int], dict[str, int]]: """Interactively record the min/max encoder values of each joint. Move the joints while the method streams live positions. Press :kbd:`Enter` to finish. Args: joints (list[str] | None, optional): Joints to record. Defaults to every joint (`None`). display_values (bool, optional): When `True` (default) a live table is printed to the console. Raises: TypeError: `joints` is not `None` or a list. ValueError: any joint's recorded min and max are the same. Returns: tuple[dict[str, int], dict[str, int]]: Two dictionaries *mins* and *maxes* with the extreme values observed for each joint. """ if joints is None: joints = list(self.joints) elif not isinstance(joints, list): raise TypeError(joints) display_len = max(len(key) for key in joints) start_positions = self._read(joints, normalize=False) mins = start_positions.copy() maxes = start_positions.copy() user_pressed_enter = False while not user_pressed_enter: positions = self._read(joints, normalize=False) mins = {joint: int(min(positions[joint], min_)) for joint, min_ in mins.items()} maxes = {joint: int(max(positions[joint], max_)) for joint, max_ in maxes.items()} if display_values: print("\n-------------------------------------------") print(f"{'NAME':<{display_len}} | {'MIN':>6} | {'POS':>6} | {'MAX':>6}") for joint in joints: print( f"{joint:<{display_len}} | {mins[joint]:>6} | {positions[joint]:>6} | {maxes[joint]:>6}" ) if enter_pressed(): user_pressed_enter = True if display_values and not user_pressed_enter: # Move cursor up to overwrite the previous output move_cursor_up(len(joints) + 3) same_min_max = [joint for joint in joints if mins[joint] == maxes[joint]] if same_min_max: raise ValueError(f"Some joints have the same min and max values:\n{pformat(same_min_max)}") return mins, maxes def configure(self) -> None: pass # TODO(Steven): This function is copy/paste from the `HomunculusGlove` class. Consider moving it to an utility to reduce duplicated code. def _normalize(self, values: dict[str, int]) -> dict[str, float]: if not self.calibration: raise RuntimeError(f"{self} has no calibration registered.") normalized_values = {} for joint, val in values.items(): min_ = self.calibration[joint].range_min max_ = self.calibration[joint].range_max drive_mode = self.calibration[joint].drive_mode bounded_val = min(max_, max(min_, val)) if self.joints[joint] is MotorNormMode.RANGE_M100_100: norm = (((bounded_val - min_) / (max_ - min_)) * 200) - 100 normalized_values[joint] = -norm if drive_mode else norm elif self.joints[joint] is MotorNormMode.RANGE_0_100: norm = ((bounded_val - min_) / (max_ - min_)) * 100 normalized_values[joint] = 100 - norm if drive_mode else norm return normalized_values def _apply_ema(self, raw: dict[str, int]) -> dict[str, float]: """Update buffers & running EMA values; return smoothed dict.""" smoothed: dict[str, float] = {} for joint, value in raw.items(): # maintain raw history self._buffers[joint].append(value) # initialise on first run if self._ema[joint] is None: self._ema[joint] = float(value) else: self._ema[joint] = self.alpha * value + (1 - self.alpha) * self._ema[joint] smoothed[joint] = self._ema[joint] return smoothed def _read( self, joints: list[str] | None = None, normalize: bool = True, timeout: float = 1 ) -> dict[str, int | float]: """ Return the most recent (single) values from self.last_d, optionally applying calibration. """ if not self.new_state_event.wait(timeout=timeout): raise TimeoutError(f"{self}: Timed out waiting for state after {timeout}s.") with self.state_lock: state = self._state self.new_state_event.clear() if state is None: raise RuntimeError(f"{self} Internal error: Event set but no state available.") if joints is not None: state = {k: v for k, v in state.items() if k in joints} if normalize: state = self._normalize(state) state = self._apply_ema(state) return state def _read_loop(self): """ Continuously read from the serial buffer in its own thread and sends values to the main thread through a queue. """ while not self.stop_event.is_set(): try: raw_values = None with self.serial_lock: if self.serial.in_waiting > 0: self.serial.flush() raw_values = self.serial.readline().decode("utf-8").strip().split(" ") if raw_values is None or len(raw_values) != 21: # 16 raw + 5 angle values continue joint_angles = { "shoulder_pitch": int(raw_values[19]), "shoulder_yaw": int(raw_values[18]), "shoulder_roll": int(raw_values[20]), "elbow_flex": int(raw_values[17]), "wrist_roll": int(raw_values[16]), "wrist_yaw": int(raw_values[1]), "wrist_pitch": int(raw_values[0]), } with self.state_lock: self._state = joint_angles self.new_state_event.set() except Exception as e: logger.debug(f"Error reading frame in background thread for {self}: {e}") def get_action(self) -> dict[str, float]: joint_positions = self._read() return {f"{joint}.pos": pos for joint, pos in joint_positions.items()} def send_feedback(self, feedback: dict[str, float]) -> None: raise NotImplementedError def disconnect(self) -> None: if not self.is_connected: DeviceNotConnectedError(f"{self} is not connected.") self.stop_event.set() self.thread.join(timeout=1) self.serial.close() logger.info(f"{self} disconnected.")
lerobot/src/lerobot/teleoperators/homunculus/homunculus_arm.py/0
{ "file_path": "lerobot/src/lerobot/teleoperators/homunculus/homunculus_arm.py", "repo_id": "lerobot", "token_count": 5226 }
225
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ######################################################################################## # Utilities ######################################################################################## import logging import traceback from contextlib import nullcontext from copy import copy from functools import cache import numpy as np import torch from deepdiff import DeepDiff from termcolor import colored from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets.utils import DEFAULT_FEATURES from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.robots import Robot def log_control_info(robot: Robot, dt_s, episode_index=None, frame_index=None, fps=None): log_items = [] if episode_index is not None: log_items.append(f"ep:{episode_index}") if frame_index is not None: log_items.append(f"frame:{frame_index}") def log_dt(shortname, dt_val_s): nonlocal log_items, fps info_str = f"{shortname}:{dt_val_s * 1000:5.2f} ({1 / dt_val_s:3.1f}hz)" if fps is not None: actual_fps = 1 / dt_val_s if actual_fps < fps - 1: info_str = colored(info_str, "yellow") log_items.append(info_str) # total step time displayed in milliseconds and its frequency log_dt("dt", dt_s) # TODO(aliberts): move robot-specific logs logic in robot.print_logs() if not robot.robot_type.startswith("stretch"): for name in robot.leader_arms: key = f"read_leader_{name}_pos_dt_s" if key in robot.logs: log_dt("dtRlead", robot.logs[key]) for name in robot.follower_arms: key = f"write_follower_{name}_goal_pos_dt_s" if key in robot.logs: log_dt("dtWfoll", robot.logs[key]) key = f"read_follower_{name}_pos_dt_s" if key in robot.logs: log_dt("dtRfoll", robot.logs[key]) for name in robot.cameras: key = f"read_camera_{name}_dt_s" if key in robot.logs: log_dt(f"dtR{name}", robot.logs[key]) info_str = " ".join(log_items) logging.info(info_str) @cache def is_headless(): """Detects if python is running without a monitor.""" try: import pynput # noqa return False except Exception: print( "Error trying to import pynput. Switching to headless mode. " "As a result, the video stream from the cameras won't be shown, " "and you won't be able to change the control flow with keyboards. " "For more info, see traceback below.\n" ) traceback.print_exc() print() return True def predict_action( observation: dict[str, np.ndarray], policy: PreTrainedPolicy, device: torch.device, use_amp: bool, task: str | None = None, robot_type: str | None = None, ): observation = copy(observation) with ( torch.inference_mode(), torch.autocast(device_type=device.type) if device.type == "cuda" and use_amp else nullcontext(), ): # Convert to pytorch format: channel first and float32 in [0,1] with batch dimension for name in observation: observation[name] = torch.from_numpy(observation[name]) if "image" in name: observation[name] = observation[name].type(torch.float32) / 255 observation[name] = observation[name].permute(2, 0, 1).contiguous() observation[name] = observation[name].unsqueeze(0) observation[name] = observation[name].to(device) observation["task"] = task if task else "" observation["robot_type"] = robot_type if robot_type else "" # Compute the next action with the policy # based on the current observation action = policy.select_action(observation) # Remove batch dimension action = action.squeeze(0) # Move to cpu, if not already the case action = action.to("cpu") return action def init_keyboard_listener(): # Allow to exit early while recording an episode or resetting the environment, # by tapping the right arrow key '->'. This might require a sudo permission # to allow your terminal to monitor keyboard events. events = {} events["exit_early"] = False events["rerecord_episode"] = False events["stop_recording"] = False if is_headless(): logging.warning( "Headless environment detected. On-screen cameras display and keyboard inputs will not be available." ) listener = None return listener, events # Only import pynput if not in a headless environment from pynput import keyboard def on_press(key): try: if key == keyboard.Key.right: print("Right arrow key pressed. Exiting loop...") events["exit_early"] = True elif key == keyboard.Key.left: print("Left arrow key pressed. Exiting loop and rerecord the last episode...") events["rerecord_episode"] = True events["exit_early"] = True elif key == keyboard.Key.esc: print("Escape key pressed. Stopping data recording...") events["stop_recording"] = True events["exit_early"] = True except Exception as e: print(f"Error handling key press: {e}") listener = keyboard.Listener(on_press=on_press) listener.start() return listener, events def sanity_check_dataset_name(repo_id, policy_cfg): _, dataset_name = repo_id.split("/") # either repo_id doesnt start with "eval_" and there is no policy # or repo_id starts with "eval_" and there is a policy # Check if dataset_name starts with "eval_" but policy is missing if dataset_name.startswith("eval_") and policy_cfg is None: raise ValueError( f"Your dataset name begins with 'eval_' ({dataset_name}), but no policy is provided ({policy_cfg.type})." ) # Check if dataset_name does not start with "eval_" but policy is provided if not dataset_name.startswith("eval_") and policy_cfg is not None: raise ValueError( f"Your dataset name does not begin with 'eval_' ({dataset_name}), but a policy is provided ({policy_cfg.type})." ) def sanity_check_dataset_robot_compatibility( dataset: LeRobotDataset, robot: Robot, fps: int, features: dict ) -> None: fields = [ ("robot_type", dataset.meta.robot_type, robot.robot_type), ("fps", dataset.fps, fps), ("features", dataset.features, {**features, **DEFAULT_FEATURES}), ] mismatches = [] for field, dataset_value, present_value in fields: diff = DeepDiff(dataset_value, present_value, exclude_regex_paths=[r".*\['info'\]$"]) if diff: mismatches.append(f"{field}: expected {present_value}, got {dataset_value}") if mismatches: raise ValueError( "Dataset metadata compatibility check failed with mismatches:\n" + "\n".join(mismatches) )
lerobot/src/lerobot/utils/control_utils.py/0
{ "file_path": "lerobot/src/lerobot/utils/control_utils.py", "repo_id": "lerobot", "token_count": 3060 }
226
# Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """End-to-end test of the asynchronous inference stack (client ↔ server). This test spins up a lightweight gRPC `PolicyServer` instance with a stubbed policy network and launches a `RobotClient` that uses a `MockRobot`. The goal is to exercise the full communication loop: 1. Client sends policy specification → Server 2. Client streams observations → Server 3. Server streams action chunks → Client 4. Client executes received actions The test succeeds if at least one action is executed and the server records at least one predicted timestep - demonstrating that the gRPC round-trip works end-to-end using real (but lightweight) protocol messages. """ from __future__ import annotations import threading from concurrent import futures import pytest import torch # Skip entire module if grpc is not available pytest.importorskip("grpc") # ----------------------------------------------------------------------------- # End-to-end test # ----------------------------------------------------------------------------- def test_async_inference_e2e(monkeypatch): """Tests the full asynchronous inference pipeline.""" # Import grpc-dependent modules inside the test function import grpc from lerobot.robots.utils import make_robot_from_config from lerobot.scripts.server.configs import PolicyServerConfig, RobotClientConfig from lerobot.scripts.server.helpers import map_robot_keys_to_lerobot_features from lerobot.scripts.server.policy_server import PolicyServer from lerobot.scripts.server.robot_client import RobotClient from lerobot.transport import ( services_pb2, # type: ignore services_pb2_grpc, # type: ignore ) from tests.mocks.mock_robot import MockRobotConfig # Create a stub policy similar to test_policy_server.py class MockPolicy: """A minimal mock for an actual policy, returning zeros.""" class _Config: robot_type = "dummy_robot" @property def image_features(self): """Empty image features since this test doesn't use images.""" return {} def __init__(self): self.config = self._Config() def to(self, *args, **kwargs): return self def model(self, batch): # Return a chunk of 20 dummy actions. batch_size = len(batch["robot_type"]) return torch.zeros(batch_size, 20, 6) # ------------------------------------------------------------------ # 1. Create PolicyServer instance with mock policy # ------------------------------------------------------------------ policy_server_config = PolicyServerConfig(host="localhost", port=9999) policy_server = PolicyServer(policy_server_config) # Replace the real policy with our fast, deterministic stub. policy_server.policy = MockPolicy() policy_server.actions_per_chunk = 20 policy_server.device = "cpu" # Set up robot config and features robot_config = MockRobotConfig() mock_robot = make_robot_from_config(robot_config) lerobot_features = map_robot_keys_to_lerobot_features(mock_robot) policy_server.lerobot_features = lerobot_features # Force server to produce deterministic action chunks in test mode policy_server.policy_type = "act" def _fake_get_action_chunk(_self, _obs, _type="test"): action_dim = 6 batch_size = 1 actions_per_chunk = policy_server.actions_per_chunk return torch.zeros(batch_size, actions_per_chunk, action_dim) monkeypatch.setattr(PolicyServer, "_get_action_chunk", _fake_get_action_chunk, raising=True) # Bypass potentially heavy model loading inside SendPolicyInstructions def _fake_send_policy_instructions(self, request, context): # noqa: N802 return services_pb2.Empty() monkeypatch.setattr(PolicyServer, "SendPolicyInstructions", _fake_send_policy_instructions, raising=True) # Build gRPC server running a PolicyServer server = grpc.server(futures.ThreadPoolExecutor(max_workers=1, thread_name_prefix="policy_server")) services_pb2_grpc.add_AsyncInferenceServicer_to_server(policy_server, server) # Use the host/port specified in the fixture's config server_address = f"{policy_server.config.host}:{policy_server.config.port}" server.add_insecure_port(server_address) server.start() # ------------------------------------------------------------------ # 2. Create a RobotClient around the MockRobot # ------------------------------------------------------------------ client_config = RobotClientConfig( server_address=server_address, robot=robot_config, chunk_size_threshold=0.0, policy_type="test", pretrained_name_or_path="test", actions_per_chunk=20, verify_robot_cameras=False, ) client = RobotClient(client_config) assert client.start(), "Client failed initial handshake with the server" # Track action chunks received without modifying RobotClient action_chunks_received = {"count": 0} original_aggregate = client._aggregate_action_queues def counting_aggregate(*args, **kwargs): action_chunks_received["count"] += 1 return original_aggregate(*args, **kwargs) monkeypatch.setattr(client, "_aggregate_action_queues", counting_aggregate) # Start client threads action_thread = threading.Thread(target=client.receive_actions, daemon=True) control_thread = threading.Thread(target=client.control_loop, args=({"task": ""}), daemon=True) action_thread.start() control_thread.start() # ------------------------------------------------------------------ # 3. System exchanges a few messages # ------------------------------------------------------------------ # Wait for 5 seconds server.wait_for_termination(timeout=5) assert action_chunks_received["count"] > 0, "Client did not receive any action chunks" assert len(policy_server._predicted_timesteps) > 0, "Server did not record any predicted timesteps" # ------------------------------------------------------------------ # 4. Stop the system # ------------------------------------------------------------------ client.stop() action_thread.join() control_thread.join() policy_server.stop() server.stop(grace=None)
lerobot/tests/async_inference/test_e2e.py/0
{ "file_path": "lerobot/tests/async_inference/test_e2e.py", "repo_id": "lerobot", "token_count": 2189 }
227
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from lerobot.scripts.visualize_dataset import visualize_dataset @pytest.mark.skip("TODO: add dummy videos") def test_visualize_local_dataset(tmp_path, lerobot_dataset_factory): root = tmp_path / "dataset" output_dir = tmp_path / "outputs" dataset = lerobot_dataset_factory(root=root) rrd_path = visualize_dataset( dataset, episode_index=0, batch_size=32, save=True, output_dir=output_dir, ) assert rrd_path.exists()
lerobot/tests/datasets/test_visualize_dataset.py/0
{ "file_path": "lerobot/tests/datasets/test_visualize_dataset.py", "repo_id": "lerobot", "token_count": 393 }
228
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from unittest.mock import patch import pytest from lerobot.motors.motors_bus import ( Motor, MotorNormMode, assert_same_address, get_address, get_ctrl_table, ) from tests.mocks.mock_motors_bus import ( DUMMY_CTRL_TABLE_1, DUMMY_CTRL_TABLE_2, DUMMY_MODEL_CTRL_TABLE, MockMotorsBus, ) @pytest.fixture def dummy_motors() -> dict[str, Motor]: return { "dummy_1": Motor(1, "model_2", MotorNormMode.RANGE_M100_100), "dummy_2": Motor(2, "model_3", MotorNormMode.RANGE_M100_100), "dummy_3": Motor(3, "model_2", MotorNormMode.RANGE_0_100), } def test_get_ctrl_table(): model = "model_1" ctrl_table = get_ctrl_table(DUMMY_MODEL_CTRL_TABLE, model) assert ctrl_table == DUMMY_CTRL_TABLE_1 def test_get_ctrl_table_error(): model = "model_99" with pytest.raises(KeyError, match=f"Control table for {model=} not found."): get_ctrl_table(DUMMY_MODEL_CTRL_TABLE, model) def test_get_address(): addr, n_bytes = get_address(DUMMY_MODEL_CTRL_TABLE, "model_1", "Firmware_Version") assert addr == 0 assert n_bytes == 1 def test_get_address_error(): model = "model_1" data_name = "Lock" with pytest.raises(KeyError, match=f"Address for '{data_name}' not found in {model} control table."): get_address(DUMMY_MODEL_CTRL_TABLE, "model_1", data_name) def test_assert_same_address(): models = ["model_1", "model_2"] assert_same_address(DUMMY_MODEL_CTRL_TABLE, models, "Present_Position") def test_assert_same_length_different_addresses(): models = ["model_1", "model_2"] with pytest.raises( NotImplementedError, match=re.escape("At least two motor models use a different address"), ): assert_same_address(DUMMY_MODEL_CTRL_TABLE, models, "Model_Number") def test_assert_same_address_different_length(): models = ["model_1", "model_2"] with pytest.raises( NotImplementedError, match=re.escape("At least two motor models use a different bytes representation"), ): assert_same_address(DUMMY_MODEL_CTRL_TABLE, models, "Goal_Position") def test__serialize_data_invalid_length(): bus = MockMotorsBus("", {}) with pytest.raises(NotImplementedError): bus._serialize_data(100, 3) def test__serialize_data_negative_numbers(): bus = MockMotorsBus("", {}) with pytest.raises(ValueError): bus._serialize_data(-1, 1) def test__serialize_data_large_number(): bus = MockMotorsBus("", {}) with pytest.raises(ValueError): bus._serialize_data(2**32, 4) # 4-byte max is 0xFFFFFFFF @pytest.mark.parametrize( "data_name, id_, value", [ ("Firmware_Version", 1, 14), ("Model_Number", 1, 5678), ("Present_Position", 2, 1337), ("Present_Velocity", 3, 42), ], ) def test_read(data_name, id_, value, dummy_motors): bus = MockMotorsBus("/dev/dummy-port", dummy_motors) bus.connect(handshake=False) addr, length = DUMMY_CTRL_TABLE_2[data_name] with ( patch.object(MockMotorsBus, "_read", return_value=(value, 0, 0)) as mock__read, patch.object(MockMotorsBus, "_decode_sign", return_value={id_: value}) as mock__decode_sign, patch.object(MockMotorsBus, "_normalize", return_value={id_: value}) as mock__normalize, ): returned_value = bus.read(data_name, f"dummy_{id_}") assert returned_value == value mock__read.assert_called_once_with( addr, length, id_, num_retry=0, raise_on_error=True, err_msg=f"Failed to read '{data_name}' on {id_=} after 1 tries.", ) mock__decode_sign.assert_called_once_with(data_name, {id_: value}) if data_name in bus.normalized_data: mock__normalize.assert_called_once_with({id_: value}) @pytest.mark.parametrize( "data_name, id_, value", [ ("Goal_Position", 1, 1337), ("Goal_Velocity", 2, 3682), ("Lock", 3, 1), ], ) def test_write(data_name, id_, value, dummy_motors): bus = MockMotorsBus("/dev/dummy-port", dummy_motors) bus.connect(handshake=False) addr, length = DUMMY_CTRL_TABLE_2[data_name] with ( patch.object(MockMotorsBus, "_write", return_value=(0, 0)) as mock__write, patch.object(MockMotorsBus, "_encode_sign", return_value={id_: value}) as mock__encode_sign, patch.object(MockMotorsBus, "_unnormalize", return_value={id_: value}) as mock__unnormalize, ): bus.write(data_name, f"dummy_{id_}", value) mock__write.assert_called_once_with( addr, length, id_, value, num_retry=0, raise_on_error=True, err_msg=f"Failed to write '{data_name}' on {id_=} with '{value}' after 1 tries.", ) mock__encode_sign.assert_called_once_with(data_name, {id_: value}) if data_name in bus.normalized_data: mock__unnormalize.assert_called_once_with({id_: value}) @pytest.mark.parametrize( "data_name, id_, value", [ ("Firmware_Version", 1, 14), ("Model_Number", 1, 5678), ("Present_Position", 2, 1337), ("Present_Velocity", 3, 42), ], ) def test_sync_read_by_str(data_name, id_, value, dummy_motors): bus = MockMotorsBus("/dev/dummy-port", dummy_motors) bus.connect(handshake=False) addr, length = DUMMY_CTRL_TABLE_2[data_name] ids = [id_] expected_value = {f"dummy_{id_}": value} with ( patch.object(MockMotorsBus, "_sync_read", return_value=({id_: value}, 0)) as mock__sync_read, patch.object(MockMotorsBus, "_decode_sign", return_value={id_: value}) as mock__decode_sign, patch.object(MockMotorsBus, "_normalize", return_value={id_: value}) as mock__normalize, ): returned_dict = bus.sync_read(data_name, f"dummy_{id_}") assert returned_dict == expected_value mock__sync_read.assert_called_once_with( addr, length, ids, num_retry=0, raise_on_error=True, err_msg=f"Failed to sync read '{data_name}' on {ids=} after 1 tries.", ) mock__decode_sign.assert_called_once_with(data_name, {id_: value}) if data_name in bus.normalized_data: mock__normalize.assert_called_once_with({id_: value}) @pytest.mark.parametrize( "data_name, ids_values", [ ("Model_Number", {1: 5678}), ("Present_Position", {1: 1337, 2: 42}), ("Present_Velocity", {1: 1337, 2: 42, 3: 4016}), ], ids=["1 motor", "2 motors", "3 motors"], ) def test_sync_read_by_list(data_name, ids_values, dummy_motors): bus = MockMotorsBus("/dev/dummy-port", dummy_motors) bus.connect(handshake=False) addr, length = DUMMY_CTRL_TABLE_2[data_name] ids = list(ids_values) expected_values = {f"dummy_{id_}": val for id_, val in ids_values.items()} with ( patch.object(MockMotorsBus, "_sync_read", return_value=(ids_values, 0)) as mock__sync_read, patch.object(MockMotorsBus, "_decode_sign", return_value=ids_values) as mock__decode_sign, patch.object(MockMotorsBus, "_normalize", return_value=ids_values) as mock__normalize, ): returned_dict = bus.sync_read(data_name, [f"dummy_{id_}" for id_ in ids]) assert returned_dict == expected_values mock__sync_read.assert_called_once_with( addr, length, ids, num_retry=0, raise_on_error=True, err_msg=f"Failed to sync read '{data_name}' on {ids=} after 1 tries.", ) mock__decode_sign.assert_called_once_with(data_name, ids_values) if data_name in bus.normalized_data: mock__normalize.assert_called_once_with(ids_values) @pytest.mark.parametrize( "data_name, ids_values", [ ("Model_Number", {1: 5678, 2: 5799, 3: 5678}), ("Present_Position", {1: 1337, 2: 42, 3: 4016}), ("Goal_Position", {1: 4008, 2: 199, 3: 3446}), ], ids=["Model_Number", "Present_Position", "Goal_Position"], ) def test_sync_read_by_none(data_name, ids_values, dummy_motors): bus = MockMotorsBus("/dev/dummy-port", dummy_motors) bus.connect(handshake=False) addr, length = DUMMY_CTRL_TABLE_2[data_name] ids = list(ids_values) expected_values = {f"dummy_{id_}": val for id_, val in ids_values.items()} with ( patch.object(MockMotorsBus, "_sync_read", return_value=(ids_values, 0)) as mock__sync_read, patch.object(MockMotorsBus, "_decode_sign", return_value=ids_values) as mock__decode_sign, patch.object(MockMotorsBus, "_normalize", return_value=ids_values) as mock__normalize, ): returned_dict = bus.sync_read(data_name) assert returned_dict == expected_values mock__sync_read.assert_called_once_with( addr, length, ids, num_retry=0, raise_on_error=True, err_msg=f"Failed to sync read '{data_name}' on {ids=} after 1 tries.", ) mock__decode_sign.assert_called_once_with(data_name, ids_values) if data_name in bus.normalized_data: mock__normalize.assert_called_once_with(ids_values) @pytest.mark.parametrize( "data_name, value", [ ("Goal_Position", 500), ("Goal_Velocity", 4010), ("Lock", 0), ], ) def test_sync_write_by_single_value(data_name, value, dummy_motors): bus = MockMotorsBus("/dev/dummy-port", dummy_motors) bus.connect(handshake=False) addr, length = DUMMY_CTRL_TABLE_2[data_name] ids_values = {m.id: value for m in dummy_motors.values()} with ( patch.object(MockMotorsBus, "_sync_write", return_value=(ids_values, 0)) as mock__sync_write, patch.object(MockMotorsBus, "_encode_sign", return_value=ids_values) as mock__encode_sign, patch.object(MockMotorsBus, "_unnormalize", return_value=ids_values) as mock__unnormalize, ): bus.sync_write(data_name, value) mock__sync_write.assert_called_once_with( addr, length, ids_values, num_retry=0, raise_on_error=True, err_msg=f"Failed to sync write '{data_name}' with {ids_values=} after 1 tries.", ) mock__encode_sign.assert_called_once_with(data_name, ids_values) if data_name in bus.normalized_data: mock__unnormalize.assert_called_once_with(ids_values) @pytest.mark.parametrize( "data_name, ids_values", [ ("Goal_Position", {1: 1337, 2: 42, 3: 4016}), ("Goal_Velocity", {1: 50, 2: 83, 3: 2777}), ("Lock", {1: 0, 2: 0, 3: 1}), ], ids=["Goal_Position", "Goal_Velocity", "Lock"], ) def test_sync_write_by_value_dict(data_name, ids_values, dummy_motors): bus = MockMotorsBus("/dev/dummy-port", dummy_motors) bus.connect(handshake=False) addr, length = DUMMY_CTRL_TABLE_2[data_name] values = {f"dummy_{id_}": val for id_, val in ids_values.items()} with ( patch.object(MockMotorsBus, "_sync_write", return_value=(ids_values, 0)) as mock__sync_write, patch.object(MockMotorsBus, "_encode_sign", return_value=ids_values) as mock__encode_sign, patch.object(MockMotorsBus, "_unnormalize", return_value=ids_values) as mock__unnormalize, ): bus.sync_write(data_name, values) mock__sync_write.assert_called_once_with( addr, length, ids_values, num_retry=0, raise_on_error=True, err_msg=f"Failed to sync write '{data_name}' with {ids_values=} after 1 tries.", ) mock__encode_sign.assert_called_once_with(data_name, ids_values) if data_name in bus.normalized_data: mock__unnormalize.assert_called_once_with(ids_values)
lerobot/tests/motors/test_motors_bus.py/0
{ "file_path": "lerobot/tests/motors/test_motors_bus.py", "repo_id": "lerobot", "token_count": 5342 }
229
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import gymnasium as gym import pytest import lerobot from lerobot.policies.act.modeling_act import ACTPolicy from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy from lerobot.policies.tdmpc.modeling_tdmpc import TDMPCPolicy from lerobot.policies.vqbet.modeling_vqbet import VQBeTPolicy from tests.utils import require_env @pytest.mark.parametrize("env_name, task_name", lerobot.env_task_pairs) @require_env def test_available_env_task(env_name: str, task_name: list): """ This test verifies that all environments listed in `lerobot/__init__.py` can be successfully imported — if they're installed — and that their `available_tasks_per_env` are valid. """ package_name = f"gym_{env_name}" importlib.import_module(package_name) gym_handle = f"{package_name}/{task_name}" assert gym_handle in gym.envs.registry, gym_handle def test_available_policies(): """ This test verifies that the class attribute `name` for all policies is consistent with those listed in `lerobot/__init__.py`. """ policy_classes = [ACTPolicy, DiffusionPolicy, TDMPCPolicy, VQBeTPolicy] policies = [pol_cls.name for pol_cls in policy_classes] assert set(policies) == set(lerobot.available_policies), policies def test_print(): print(lerobot.available_envs) print(lerobot.available_tasks_per_env) print(lerobot.available_datasets) print(lerobot.available_datasets_per_env) print(lerobot.available_real_world_datasets) print(lerobot.available_policies) print(lerobot.available_policies_per_env)
lerobot/tests/test_available.py/0
{ "file_path": "lerobot/tests/test_available.py", "repo_id": "lerobot", "token_count": 766 }
230
# coding=utf-8 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Benchmark script for the code_reward function with E2B. This script measures the performance of the code_reward function with varying numbers of samples and parallelization levels. Each sample is a CodeForces problem with a gold standard solution that is executed against a set of public test cases. """ from datasets import load_dataset import time from tqdm.auto import tqdm from dotenv import load_dotenv load_dotenv() from open_r1.rewards import code_reward def benchmark_code_reward(example): start_time = time.time() test_completions = [[{"content": example["gold_standard_solution"]}]] reward_kwargs = {"verification_info": [example["verification_info"]]} rewards = code_reward(test_completions, **reward_kwargs) end_time = time.time() example["test_reward"] = rewards[0] example["reward_time"] = end_time - start_time return example if __name__ == "__main__": parallel_dict = { 16:[1,4,16], 64:[4,16, 64], 256:[16, 64, 96], # cap at 96 as PRO account is limited to 100 } # Store results for table formatting results = [] for num_samples in tqdm([16, 64,256], desc="Benchmarking samples"): for num_parallel in parallel_dict[num_samples]: code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated") code_dataset = code_dataset["train"].shuffle(seed=42).select(range(num_samples)) test_completions = [[{"content": example["gold_standard_solution"]}] for example in code_dataset] reward_kwargs = {"verification_info": [example["verification_info"] for example in code_dataset]} start_time = time.time() rewards = code_reward(test_completions, num_parallel=num_parallel, **reward_kwargs) execution_time = time.time() - start_time # Calculate some statistics about rewards mean_reward = sum(rewards) / len(rewards) min_reward = min(rewards) max_reward = max(rewards) # Store results results.append({ "num_samples": num_samples, "num_parallel": num_parallel, "execution_time": execution_time, "mean_reward": mean_reward, "min_reward": min_reward, "max_reward": max_reward }) print("\n## Benchmark Results\n") print("| Sample Size | Parallelization | Execution Time (s) | Mean Reward | Min Reward | Max Reward |") print("|:-----------:|:---------------:|------------------:|:-----------:|:-----------:|:-----------:|") for result in results: print(f"| {result['num_samples']:^11} | {result['num_parallel']:^15} | {result['execution_time']:17.2f} | {result['mean_reward']:^11.4f} | {result['min_reward']:^11.4f} | {result['max_reward']:^11.4f} |")
open-r1/scripts/benchmark_e2b.py/0
{ "file_path": "open-r1/scripts/benchmark_e2b.py", "repo_id": "open-r1", "token_count": 1377 }
231
#!/bin/bash #SBATCH --ntasks-per-node=1 #SBATCH --gres=gpu:8 #SBATCH --partition=hopper-prod #SBATCH --output=./logs/%x-%j.out #SBATCH --error=./logs/%x-%j.err #SBATCH --requeue #SBATCH --time=1-00:00:00 # Specific configuration optimized for the Hugging Face Compute Cluster # Be ye warned this may not work on other clusters! module load cuda/12.4 # Refresh Weka on h4 cache echo "Refreshing Weka filesystem..." find -L /fsx/h4/ -type f | xargs -d '\n' -r -n512 -P64 weka fs tier fetch # Needed for vLLM export VLLM_WORKER_MULTIPROC_METHOD=spawn set -x -e source ~/.bashrc source openr1/bin/activate TASK_NAME=$1 TASKS=$2 MODEL_ID=$3 MODEL_REVISION=$4 # Optional args [ -z "$5"] && TENSOR_PARALLEL=False || TENSOR_PARALLEL=$5 [ -z "$6"] && TRUST_REMOTE_CODE=False || TRUST_REMOTE_CODE=$6 # $7 is reserved for system_prompt, see line 51 NUM_GPUS=$(nvidia-smi -L | wc -l) # Use TP to shard model across GPUs if [ "$TENSOR_PARALLEL" = "True" ]; then MODEL_ARGS="model_name=$MODEL_ID,revision=$MODEL_REVISION,trust_remote_code=$TRUST_REMOTE_CODE,dtype=bfloat16,tensor_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" else MODEL_ARGS="model_name=$MODEL_ID,revision=$MODEL_REVISION,trust_remote_code=$TRUST_REMOTE_CODE,dtype=bfloat16,data_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" fi LM_EVAL_REPO_ID="open-r1/open-r1-eval-leaderboard" MODEL_NAME=$(echo $MODEL_ID | sed 's/\//_/g') # replaces / with _ DETAILS_REPO_ID="open-r1/details-$MODEL_NAME" OUTPUT_DIR="eval_results/$MODEL_ID/$MODEL_REVISION/$TASK_NAME" # We need this flag since we run this script from training jobs that use DeepSpeed and the env vars get progated which causes errors during evaluation ACCELERATE_USE_DEEPSPEED=false echo "Running lighteval script ..." echo "Eval results will be saved to $OUTPUT_DIR" lighteval vllm "$MODEL_ARGS" $TASKS \ --use-chat-template \ --output-dir $OUTPUT_DIR \ --save-details \ ${7:+--system-prompt "$(echo "$7" | base64 --decode)"} OUTPUT_FILEPATHS=$(find $OUTPUT_DIR/results/ -type f \( -name "*.json" \)) for filepath in $OUTPUT_FILEPATHS; do echo "Uploading $filepath to Hugging Face Hub..." filename=$(basename -- "$filepath") for attempt in {1..20}; do if huggingface-cli upload --repo-type space --private $LM_EVAL_REPO_ID $filepath $OUTPUT_DIR/$filename; then echo "Upload succeeded for $filepath" break else echo "Upload failed for $filepath. Attempt $attempt of 20. Retrying in 5 seconds..." sleep 5 fi done done echo "Uploading details to Hugging Face Hub..." DETAILS_FILEPATHS=$(find $OUTPUT_DIR/details/ -type f \( -name "*.parquet" \)) echo "DETAILS_FILEPATHS: $DETAILS_FILEPATHS" TIMESTAMP=$(date +"%Y-%m-%dT%H-%M-%S") python scripts/upload_details.py --data_files $DETAILS_FILEPATHS --hub_repo_id $DETAILS_REPO_ID --config_name $MODEL_REVISION.$TASK_NAME.$TIMESTAMP echo "Cleaning up ..." rm -rf $OUTPUT_DIR echo "Done!"
open-r1/slurm/evaluate.slurm/0
{ "file_path": "open-r1/slurm/evaluate.slurm", "repo_id": "open-r1", "token_count": 1321 }
232
from .data import get_dataset from .import_utils import is_e2b_available, is_morph_available from .model_utils import get_model, get_tokenizer __all__ = ["get_tokenizer", "is_e2b_available", "is_morph_available", "get_model", "get_dataset"]
open-r1/src/open_r1/utils/__init__.py/0
{ "file_path": "open-r1/src/open_r1/utils/__init__.py", "repo_id": "open-r1", "token_count": 85 }
233
# coding=utf-8 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional import requests class RoutedMorphSandbox: """ Client for the MorphCloud router service that mimics the API of MorphCloud's Sandbox. This class provides a simple interface to execute code via a central MorphCloud router, which manages sandbox creation and cleanup. It allows batch processing of multiple scripts in a single request for improved efficiency. Attributes: router_url (str): The URL of the MorphCloud router service. timeout (int): Execution timeout in seconds. request_timeout (int): HTTP request timeout in seconds. """ def __init__(self, router_url: str, timeout: int = 300, request_timeout: int = 60): """ Initialize the routed MorphCloud sandbox client. Args: router_url: The URL of the MorphCloud router, including host and port. timeout: Default execution timeout in seconds. request_timeout: Default HTTP request timeout in seconds. """ self.router_url = router_url self.timeout = timeout self.request_timeout = request_timeout def run_code( self, scripts: List[str], languages: Optional[List[str]] = None, timeout: Optional[int] = None, request_timeout: Optional[int] = None, ) -> List: """ Execute multiple scripts using MorphCloud via the router. Args: scripts: List of code scripts to execute. languages: List of programming languages for each script. If None, defaults to Python for all scripts. timeout: Execution timeout in seconds. If None, uses the instance timeout. request_timeout: HTTP request timeout in seconds. If None, uses the instance request_timeout. Returns: List of execution results with text and exception_str properties. """ actual_timeout = timeout if timeout is not None else self.timeout actual_request_timeout = request_timeout if request_timeout is not None else self.request_timeout # Default to Python for all scripts if languages is not provided if languages is None: languages = ["python"] * len(scripts) payload = { "scripts": scripts, "languages": languages, "timeout": actual_timeout, "request_timeout": actual_request_timeout, } try: endpoint = f"http://{self.router_url}/execute_batch" response = requests.post(endpoint, json=payload, timeout=actual_request_timeout) if response.status_code != 200: error = f"Request to MorphCloud router failed with status code: {response.status_code}" print(error) results = [] for _ in scripts: results.append(type("obj", (object,), {"text": None, "exception_str": error})) return results response_data = response.json() results = [] for item in response_data: # Log the response data to see what we're getting # print(f"RoutedMorphSandbox: Got response item: {item}") result = type( "obj", (object,), { "text": item.get("text"), "exception_str": item.get("exception_str"), }, ) results.append(result) return results except Exception as e: error = f"Error communicating with MorphCloud router: {str(e)}" print(error) results = [] for _ in scripts: results.append(type("obj", (object,), {"text": None, "exception_str": error})) return results
open-r1/src/open_r1/utils/routed_morph.py/0
{ "file_path": "open-r1/src/open_r1/utils/routed_morph.py", "repo_id": "open-r1", "token_count": 1779 }
234
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Generating the documentation To generate the documentation, you first have to build it. Several packages are necessary to build the doc, you can install them with the following command, at the root of the code repository: ```bash pip install -e ".[docs]" ``` Then you need to install our special tool that builds the documentation: ```bash pip install git+https://github.com/huggingface/doc-builder ``` --- **NOTE** You only need to generate the documentation to inspect it locally (if you're planning changes and want to check how they look before committing for instance). You don't have to commit to the built documentation. --- ## Building the documentation Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing the following command: ```bash doc-builder build peft docs/source/ --build_dir ~/tmp/test-build ``` You can adapt the `--build_dir` to set any temporary folder you prefer. This command will create it and generate the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite Markdown editor. ## Previewing the documentation To preview the docs, first install the `watchdog` module with: ```bash pip install watchdog ``` Then run the following command: ```bash doc-builder preview {package_name} {path_to_docs} ``` For example: ```bash doc-builder preview peft docs/source ``` The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. --- **NOTE** The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). --- ## Adding a new element to the navigation bar Accepted files are Markdown (.md or .mdx). Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/peft/blob/main/docs/source/_toctree.yml) file. ## Renaming section headers and moving sections It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: ``` Sections that were moved: [ <a href="#section-b">Section A</a><a id="section-a"></a> ] ``` and of course, if you moved it to another file, then: ``` Sections that were moved: [ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ] ``` Use the relative style to link to the new file so that the versioned docs continue to work. ## Writing Documentation - Specification The `huggingface/peft` documentation follows the [Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings, although we can write them directly in Markdown. ### Adding a new tutorial Adding a new tutorial or section is done in two steps: - Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). - Link that file in `./source/_toctree.yml` on the correct toc-tree. Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so depending on the intended targets (beginners, more advanced users, or researchers) it should go into sections two, three, or four. ### Writing source documentation Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names and objects like True, None, or any strings should usually be put in `code`. When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or function to be in the main package. If you want to create a link to some internal class or function, you need to provide its path. For instance: \[\`utils.gather\`\]. This will be converted into a link with `utils.gather` in the description. To get rid of the path and only keep the name of the object you are linking to in the description, add a ~: \[\`~utils.gather\`\] will generate a link with `gather` in the description. The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\]. #### Defining arguments in a method Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its description: ``` Args: n_layers (`int`): The number of layers of the model. ``` If the description is too long to fit in one line (more than 119 characters in total), another indentation is necessary before writing the description after the argument. Finally, to maintain uniformity if any *one* description is too long to fit on one line, the rest of the parameters should follow suit and have an indention before their description. Here's an example showcasing everything so far: ``` Args: gradient_accumulation_steps (`int`, *optional*, default to 1): The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with `Accelerator.accumulate`. cpu (`bool`, *optional*): Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force the execution on one process only. ``` For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the following signature: ``` def my_function(x: str = None, a: float = 1): ``` then its documentation should look like this: ``` Args: x (`str`, *optional*): This argument controls ... and has a description longer than 119 chars. a (`float`, *optional*, defaults to 1): This argument is used to ... and has a description longer than 119 chars. ``` Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even if the first line describing your argument type and its default gets long, you can't break it into several lines. You can however write as many lines as you want in the indented description (see the example above with `input_ids`). #### Writing a multi-line code block Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: ```` ```python # first line of code # second line # etc ``` ```` #### Writing a return block The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation. The first line should be the type of the return, followed by a line return. No need to indent further for the elements building the return. Here's an example of a single value return: ``` Returns: `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. ``` Here's an example of a tuple return, comprising several objects: ``` Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` -- Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss. - **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``` ## Styling the docstring We have an automatic script running with the `make style` comment that will make sure that: - the docstrings fully take advantage of the line width - all code examples are formatted using black, like the code of the Transformers library This script may have some weird failures if you make a syntax mistake or if you uncover a bug. Therefore, it's recommended to commit your changes before running `make style`, so you can revert the changes done by that script easily. ## Writing documentation examples The syntax, for example, docstrings can look as follows: ``` Example: ```python >>> import time >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> if accelerator.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> accelerator.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` ``` The docstring should give a minimal, clear example of how the respective function is to be used in inference and also include the expected (ideally sensible) output. Often, readers will try out the example before even going through the function or class definitions. Therefore, it is of utmost importance that the example works as expected.
peft/docs/README.md/0
{ "file_path": "peft/docs/README.md", "repo_id": "peft", "token_count": 2889 }
235
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quantization Quantization represents data with fewer bits, making it a useful technique for reducing memory-usage and accelerating inference especially when it comes to large language models (LLMs). There are several ways to quantize a model including: * optimizing which model weights are quantized with the [AWQ](https://hf.co/papers/2306.00978) algorithm * independently quantizing each row of a weight matrix with the [GPTQ](https://hf.co/papers/2210.17323) algorithm * quantizing to 8-bit and 4-bit precision with the [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) library * quantizing to as low as 2-bit precision with the [AQLM](https://huggingface.co/papers/2401.06118) algorithm However, after a model is quantized it isn't typically further trained for downstream tasks because training can be unstable due to the lower precision of the weights and activations. But since PEFT methods only add *extra* trainable parameters, this allows you to train a quantized model with a PEFT adapter on top! Combining quantization with PEFT can be a good strategy for training even the largest models on a single GPU. For example, [QLoRA](https://hf.co/papers/2305.14314) is a method that quantizes a model to 4-bits and then trains it with LoRA. This method allows you to finetune a 65B parameter model on a single 48GB GPU! In this guide, you'll see how to quantize a model to 4-bits and train it with LoRA. ## Quantize a model [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) is a quantization library with a Transformers integration. With this integration, you can quantize a model to 8 or 4-bits and enable many other options by configuring the [`~transformers.BitsAndBytesConfig`] class. For example, you can: * set `load_in_4bit=True` to quantize the model to 4-bits when you load it * set `bnb_4bit_quant_type="nf4"` to use a special 4-bit data type for weights initialized from a normal distribution * set `bnb_4bit_use_double_quant=True` to use a nested quantization scheme to quantize the already quantized weights * set `bnb_4bit_compute_dtype=torch.bfloat16` to use bfloat16 for faster computation ```py import torch from transformers import BitsAndBytesConfig config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16, ) ``` Pass the `config` to the [`~transformers.AutoModelForCausalLM.from_pretrained`] method. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=config) ``` Next, you should call the [`~peft.utils.prepare_model_for_kbit_training`] function to preprocess the quantized model for training. ```py from peft import prepare_model_for_kbit_training model = prepare_model_for_kbit_training(model) ``` Now that the quantized model is ready, let's set up a configuration. ## LoraConfig Create a [`LoraConfig`] with the following parameters (or choose your own): ```py from peft import LoraConfig config = LoraConfig( r=16, lora_alpha=8, target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM" ) ``` Then use the [`get_peft_model`] function to create a [`PeftModel`] from the quantized model and configuration. ```py from peft import get_peft_model model = get_peft_model(model, config) ``` You're all set for training with whichever training method you prefer! ### LoftQ initialization [LoftQ](https://hf.co/papers/2310.08659) initializes LoRA weights such that the quantization error is minimized, and it can improve performance when training quantized models. To get started, follow [these instructions](https://github.com/huggingface/peft/tree/main/examples/loftq_finetuning). In general, for LoftQ to work best, it is recommended to target as many layers with LoRA as possible, since those not targeted cannot have LoftQ applied. This means that passing `LoraConfig(..., target_modules="all-linear")` will most likely give the best results. Also, you should use `nf4` as quant type in your quantization config when using 4bit quantization, i.e. `BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4")`. ### QLoRA-style training QLoRA adds trainable weights to all the linear layers in the transformer architecture. Since the attribute names for these linear layers can vary across architectures, set `target_modules` to `"all-linear"` to add LoRA to all the linear layers: ```py config = LoraConfig(target_modules="all-linear", ...) ``` ## GPTQ quantization You can learn more about gptq based `[2, 3, 4, 8]` bits quantization at [GPTQModel](https://github.com/ModelCloud/GPTQModel) and the Transformers [GPTQ](https://huggingface.co/docs/transformers/quantization/gptq) doc. Post-quant training, PEFT can use both [GPTQModel](https://github.com/ModelCloud/GPTQModel) or [AutoGPTQ](https://github.com/autogptq/autogptq) libraries, but we recommend GPTQModel because AutoGPTQ will be deprecated in a future release. ```bash # gptqmodel install pip install gptqmodel --no-build-isolation ``` ```py from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig model_id = "facebook/opt-125m" tokenizer = AutoTokenizer.from_pretrained(model_id) gptq_config = GPTQConfig(bits=4, group_size=128, dataset="wikitext2", tokenizer=tokenizer) quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config) # save quantized model quantized_model.save_pretrained("./opt-125m-gptq") tokenizer.save_pretrained("./opt-125m-gptq") ``` Once quantized, you can post-train GPTQ models with PEFT APIs. ## AQLM quantization Additive Quantization of Language Models ([AQLM](https://huggingface.co/papers/2401.06118)) is a Large Language Models compression method. It quantizes multiple weights together and takes advantage of interdependencies between them. AQLM represents groups of 8-16 weights as a sum of multiple vector codes. This allows it to compress models down to as low as 2-bit with considerably low accuracy losses. Since the AQLM quantization process is computationally expensive, the use of prequantized models is recommended. A partial list of available models can be found in the official aqlm [repository](https://github.com/Vahe1994/AQLM). The models support LoRA adapter tuning. To tune the quantized model you'll need to install the `aqlm` inference library: `pip install aqlm>=1.0.2`. Finetuned LoRA adapters shall be saved separately, as merging them with AQLM quantized weights is not possible. ```py quantized_model = AutoModelForCausalLM.from_pretrained( "BlackSamorez/Mixtral-8x7b-AQLM-2Bit-1x16-hf-test-dispatch", torch_dtype="auto", device_map="auto", low_cpu_mem_usage=True, ) peft_config = LoraConfig(...) quantized_model = get_peft_model(quantized_model, peft_config) ``` You can refer to the [Google Colab](https://colab.research.google.com/drive/12GTp1FCj5_0SnnNQH18h_2XFh9vS_guX?usp=sharing) example for an overview of AQLM+LoRA finetuning. ## EETQ quantization You can also perform LoRA fine-tuning on EETQ quantized models. [EETQ](https://github.com/NetEase-FuXi/EETQ) package offers simple and efficient way to perform 8-bit quantization, which is claimed to be faster than the `LLM.int8()` algorithm. First, make sure that you have a transformers version that is compatible with EETQ (e.g. by installing it from latest pypi or from source). ```py import torch from transformers import EetqConfig config = EetqConfig("int8") ``` Pass the `config` to the [`~transformers.AutoModelForCausalLM.from_pretrained`] method. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=config) ``` and create a `LoraConfig` and pass it to `get_peft_model`: ```py from peft import LoraConfig, get_peft_model config = LoraConfig( r=16, lora_alpha=8, target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM" ) model = get_peft_model(model, config) ``` ## HQQ quantization The models that are quantized using Half-Quadratic Quantization of Large Machine Learning Models ([HQQ](https://mobiusml.github.io/hqq_blog/)) support LoRA adapter tuning. To tune the quantized model, you'll need to install the `hqq` library with: `pip install hqq`. ```python from hqq.engine.hf import HQQModelForCausalLM device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" quantized_model = HQQModelForCausalLM.from_quantized(save_dir_or_hfhub, device=device) peft_config = LoraConfig(...) quantized_model = get_peft_model(quantized_model, peft_config) ``` Or using transformers version that is compatible with HQQ (e.g. by installing it from latest pypi or from source). ```python from transformers import HqqConfig, AutoModelForCausalLM quant_config = HqqConfig(nbits=4, group_size=64) quantized_model = AutoModelForCausalLM.from_pretrained(save_dir_or_hfhub, device_map=device_map, quantization_config=quant_config) peft_config = LoraConfig(...) quantized_model = get_peft_model(quantized_model, peft_config) ``` ## torchao (PyTorch Architecture Optimization) PEFT supports models quantized with [torchao](https://github.com/pytorch/ao) ("ao") for int8 quantization. ```python from peft import LoraConfig, get_peft_model from transformers import AutoModelForCausalLM, TorchAoConfig model_id = ... quantization_config = TorchAoConfig(quant_type="int8_weight_only") base_model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) peft_config = LoraConfig(...) model = get_peft_model(base_model, peft_config) ``` ### Caveats: - Use the most recent versions of torchao (>= v0.4.0) and transformers (> 4.42). - Only linear layers are currently supported. - `quant_type = "int4_weight_only"` is currently not supported. - `NF4` is not implemented in transformers as of yet and is thus also not supported. - DoRA only works with `quant_type = "int8_weight_only"` at the moment. - There is explicit support for torchao when used with LoRA. However, when torchao quantizes a layer, its class does not change, only the type of the underlying tensor. For this reason, PEFT methods other than LoRA will generally also work with torchao, even if not explicitly supported. Be aware, however, that **merging only works correctly with LoRA and with `quant_type = "int8_weight_only"`**. If you use a different PEFT method or dtype, merging will likely result in an error, and even it doesn't, the results will still be incorrect. ## INC quantization Intel Neural Compressor ([INC](https://github.com/intel/neural-compressor)) enables model quantization for various devices, including Intel Gaudi accelerators (also known as HPU devices). You can perform LoRA fine-tuning on models that have been quantized using INC. To use INC with PyTorch models, install the library with: `pip install neural-compressor[pt]`. Quantizing a model to FP8 precision for HPU devices can be done with the following single-step quantization workflow: ```python import torch from neural_compressor.torch.quantization import FP8Config, convert, finalize_calibration, prepare quant_configs = { ... } config = FP8Config(**quant_configs) ``` Pass the config to the `prepare` method, run inference to gather calibration stats, and call `finalize_calibration` and `convert` methods to quantize model to FP8 precision: ```python model = prepare(model, config) # Run inference to collect calibration statistics ... # Finalize calibration and convert the model to FP8 precision finalize_calibration(model) model = convert(model) # Load PEFT LoRA adapter as usual ... ``` An example demonstrating how to load a PEFT LoRA adapter into an INC-quantized FLUX text-to-image model for HPU devices is provided [here](https://github.com/huggingface/peft/blob/main/examples/stable_diffusion/inc_flux_lora_hpu.py). ### Caveats: - `merge()` and `unmerge()` methods are currently not supported for INC-quantized models. - Currently, only **Linear** INC-quantized layers are supported when loading PEFT adapters. ## Other Supported PEFT Methods Besides LoRA, the following PEFT methods also support quantization: - **VeRA** (supports bitsandbytes quantization) - **AdaLoRA** (supports both bitsandbytes and GPTQ quantization) - **(IA)³** (supports bitsandbytes quantization) ## Next steps If you're interested in learning more about quantization, the following may be helpful: * Learn more details about QLoRA and check out some benchmarks on its impact in the [Making LLMs even more accessible with bitsandbytes, 4-bit quantization and QLoRA](https://huggingface.co/blog/4bit-transformers-bitsandbytes) blog post. * Read more about different quantization schemes in the Transformers [Quantization](https://hf.co/docs/transformers/main/quantization) guide.
peft/docs/source/developer_guides/quantization.md/0
{ "file_path": "peft/docs/source/developer_guides/quantization.md", "repo_id": "peft", "token_count": 4220 }
236
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Fine-tuning for controllable generation with BOFT (ControlNet) This guide demonstrates how to use BOFT, an orthogonal fine-tuning method, to fine-tune Stable Diffusion with either `stabilityai/stable-diffusion-2-1` or `runwayml/stable-diffusion-v1-5` model for controllable generation. By using BOFT from 🤗 PEFT, we can significantly reduce the number of trainable parameters while still achieving impressive results in various fine-tuning tasks across different foundation models. BOFT enhances model efficiency by integrating full-rank orthogonal matrices with a butterfly structure into specific model blocks, such as attention blocks, mirroring the approach used in LoRA. During fine-tuning, only these inserted matrices are trained, leaving the original model parameters untouched. During inference, the trainable BOFT parameters can be merged into the original model, eliminating any additional computational costs. As a member of the **orthogonal finetuning** class, BOFT presents a systematic and principled method for fine-tuning. It possesses several unique properties and has demonstrated superior performance compared to LoRA in a variety of scenarios. For further details on BOFT, please consult the [PEFT's GitHub repo's concept guide OFT](https://https://huggingface.co/docs/peft/index), the [original BOFT paper](https://huggingface.co/papers/2311.06243) and the [original OFT paper](https://huggingface.co/papers/2306.07280). In this guide we provide a controllable generation (ControlNet) fine-tuning script that is available in [PEFT's GitHub repo examples](https://github.com/huggingface/peft/tree/main/examples/boft_controlnet). This implementation is adapted from [diffusers's ControlNet](https://github.com/huggingface/diffusers/tree/main/examples/controlnet) and [Hecong Wu's ControlLoRA](https://github.com/HighCWu/ControlLoRA). You can try it out and finetune on your custom images. ## Set up your environment Start by cloning the PEFT repository: ```bash git clone https://github.com/huggingface/peft ``` Navigate to the directory containing the training scripts for fine-tuning Dreambooth with BOFT: ```bash cd peft/examples/boft_controlnet ``` Set up your environment: install PEFT, and all the required libraries. At the time of writing this guide we recommend installing PEFT from source. ```bash conda create --name peft python=3.10 conda activate peft conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 pytorch-cuda=11.8 -c pytorch -c nvidia conda install xformers -c xformers pip install -r requirements.txt pip install git+https://github.com/huggingface/peft ``` ## Data We use the [control-celeba-hq](https://huggingface.co/datasets/oftverse/control-celeba-hq) dataset for landmark-to-face controllable generation. We also provide evaluation scripts to evaluate the controllable generation performance. This task can be used to quantitatively compare different fine-tuning techniques. ```bash export DATASET_NAME="oftverse/control-celeba-hq" ``` ## Train controllable generation (ControlNet) with BOFT Start with setting some hyperparameters for BOFT: ```bash PEFT_TYPE="boft" BLOCK_NUM=8 BLOCK_SIZE=0 N_BUTTERFLY_FACTOR=0 ``` Here: Navigate to the directory containing the training scripts for fine-tuning Stable Diffusion with BOFT for controllable generation: ```bash ./train_controlnet.sh ``` or ```bash export MODEL_NAME="stabilityai/stable-diffusion-2-1" # export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATASET_NAME="oftverse/control-celeba-hq" export PROJECT_NAME="controlnet_${PEFT_TYPE}" export RUN_NAME="${PEFT_TYPE}_${BLOCK_NUM}${BLOCK_SIZE}${N_BUTTERFLY_FACTOR}" export CONTROLNET_PATH="" export OUTPUT_DIR="./output/${DATASET_NAME}/${RUN_NAME}" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --resume_from_checkpoint=$RESUME_PATH \ --controlnet_model_name_or_path=$CONTROLNET_PATH \ --output_dir=$OUTPUT_DIR \ --report_to="wandb" \ --dataset_name=$DATASET_NAME \ --resolution=512 \ --learning_rate=1e-5 \ --checkpointing_steps=5000 \ --max_train_steps=50000 \ --validation_steps=2000 \ --num_validation_images=12 \ --train_batch_size=4 \ --dataloader_num_workers=2 \ --seed="0" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --wandb_project_name=$PROJECT_NAME \ --wandb_run_name=$RUN_NAME \ --enable_xformers_memory_efficient_attention \ --use_boft \ --boft_block_num=$BLOCK_NUM \ --boft_block_size=$BLOCK_SIZE \ --boft_n_butterfly_factor=$N_BUTTERFLY_FACTOR \ --boft_dropout=0.1 \ --boft_bias="boft_only" \ --report_to="wandb" \ ``` Run inference on the saved model to sample new images from the validation set: ```bash ./test_controlnet.sh ``` or ```bash ITER_NUM=50000 export MODEL_NAME="stabilityai/stable-diffusion-2-1" # export MODEL_NAME="runwayml/stable-diffusion-v1-5" export RUN_NAME="${PEFT_TYPE}_${BLOCK_NUM}${BLOCK_SIZE}${N_BUTTERFLY_FACTOR}" export DATASET_NAME="oftverse/control-celeba-hq" export CKPT_NAME="checkpoint-${ITER_NUM}" export OUTPUT_DIR="./output/${DATASET_NAME}/${RUN_NAME}/${CKPT_NAME}" export CONTROLNET_PATH="${OUTPUT_DIR}/controlnet/model.safetensors" export UNET_PATH="${OUTPUT_DIR}/unet/${RUN_NAME}" export RESULTS_PATH="${OUTPUT_DIR}/results" accelerate launch test_controlnet.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --controlnet_path=$CONTROLNET_PATH \ --unet_path=$UNET_PATH \ --adapter_name=$RUN_NAME \ --output_dir=$RESULTS_PATH \ --dataset_name=$DATASET_NAME \ ``` Run evaluation on the sampled images to evaluate the landmark reprojection error: ```bash ./eval.sh ``` or ```bash ITER_NUM=50000 export MODEL_NAME="stabilityai/stable-diffusion-2-1" # export MODEL_NAME="runwayml/stable-diffusion-v1-5" export RUN_NAME="${PEFT_TYPE}_${BLOCK_NUM}${BLOCK_SIZE}${N_BUTTERFLY_FACTOR}" export DATASET_NAME="oftverse/control-celeba-hq" export CKPT_NAME="checkpoint-${ITER_NUM}" export OUTPUT_DIR="./output/${DATASET_NAME}/${RUN_NAME}/${CKPT_NAME}" export CONTROLNET_PATH="${OUTPUT_DIR}/controlnet/model.safetensors" export UNET_PATH="${OUTPUT_DIR}/unet/${RUN_NAME}" accelerate launch eval.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --controlnet_path=$CONTROLNET_PATH \ --unet_path=$UNET_PATH \ --adapter_name=$RUN_NAME \ --output_dir=$OUTPUT_DIR \ --dataset_name=$DATASET_NAME \ --vis_overlays \ ```
peft/examples/boft_controlnet/boft_controlnet.md/0
{ "file_path": "peft/examples/boft_controlnet/boft_controlnet.md", "repo_id": "peft", "token_count": 2427 }
237
<jupyter_start><jupyter_code>from transformers import AutoModelForCausalLM from peft import get_peft_config, get_peft_model, PrefixTuningConfig, TaskType, PeftType import torch from datasets import load_dataset import os from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" model_name_or_path = "bigscience/bloomz-560m" tokenizer_name_or_path = "bigscience/bloomz-560m" peft_config = PrefixTuningConfig(task_type=TaskType.CAUSAL_LM, num_virtual_tokens=30) dataset_name = "twitter_complaints" checkpoint_name = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}_v1.pt".replace( "/", "_" ) text_column = "Tweet text" label_column = "text_label" max_length = 64 lr = 3e-2 num_epochs = 50 batch_size = 8 dataset = load_dataset( "parquet", data_files={ "train": f"hf://datasets/ought/raft@refs/convert/parquet/{dataset_name}/train/0000.parquet", "test": f"hf://datasets/ought/raft@refs/convert/parquet/{dataset_name}/test/0000.parquet" } ) classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] print(classes) dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) print(dataset) dataset["train"][0] # data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) print(target_max_length) def preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] targets = [str(x) for x in examples[label_column]] model_inputs = tokenizer(inputs) labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id] # print(i, sample_input_ids, label_input_ids) model_inputs["input_ids"][i] = sample_input_ids + label_input_ids labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i]) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length]) model_inputs["labels"] = labels["input_ids"] return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["train"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) def test_preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] model_inputs = tokenizer(inputs) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) return model_inputs test_dataset = dataset["test"].map( test_preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) test_dataloader = DataLoader(test_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) next(iter(test_dataloader)) next(iter(train_dataloader)) len(test_dataloader) next(iter(test_dataloader)) # creating model model = AutoModelForCausalLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() model model.peft_config # model # optimizer and lr scheduler optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) # training and evaluation model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} # print(batch) # print(batch["input_ids"].shape) outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") model.eval() i = 16 inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(dataset["test"][i]["Tweet text"]) print(inputs) with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3 ) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))<jupyter_output>Hey @nytimes your link to cancel my subscription isn't working and nobody is answering the chat. Please don't play that kind of stupid game. {'input_ids': tensor([[227985, 5484, 915, 54078, 2566, 7782, 24502, 2632, 8989, 427, 36992, 2670, 140711, 21994, 10789, 530, 88399, 632, 183542, 368, 44799, 17, 29901, 5926, 7229, 861, 11596, 461, 78851, 14775, 17, 77658, 915, 210]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])} tensor([[227985, 5484, 915, 54078, 2566, 7782, 24502, 2632, 8989, 427, 36992, 2670, 140711, 21994, 10789, 530, 88399, 632, 183542, 368, 44799, 17, 29901, 5926, 7229, 861, 11596, 461, 78851, 14775, 17, 77658, 915, 210, 16449, 5952, 3]], device='cuda:0') ["Tweet text : Hey @nytimes your [...]<jupyter_text>You can push model to hub or save model locally. - Option1: Pushing the model to Hugging Face Hub```pythonmodel.push_to_hub( f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"), token = "hf_...")```token (`bool` or `str`, *optional*): `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` is not specified. Or you can get your token from https://huggingface.co/settings/token```- Or save model locally```pythonpeft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_")model.save_pretrained(peft_model_id)```<jupyter_code># saving model peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace( "/", "_" ) model.save_pretrained(peft_model_id) ckpt = f"{peft_model_id}/adapter_model.safetensors" !du -h $ckpt from peft import PeftModel, PeftConfig peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace( "/", "_" ) config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id) model.to(device) model.eval() i = 4 inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(dataset["test"][i]["Tweet text"]) print(inputs) with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3 ) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))<jupyter_output>@greateranglia Ok thanks... {'input_ids': tensor([[227985, 5484, 915, 2566, 14173, 2960, 29906, 387, 20706, 49337, 1369, 77658, 915, 210]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])} tensor([[227985, 5484, 915, 2566, 14173, 2960, 29906, 387, 20706, 49337, 1369, 77658, 915, 210, 1936, 106863, 3]], device='cuda:0') ['Tweet text : @greateranglia Ok thanks... Label : no complaint']
peft/examples/causal_language_modeling/peft_prefix_tuning_clm.ipynb/0
{ "file_path": "peft/examples/causal_language_modeling/peft_prefix_tuning_clm.ipynb", "repo_id": "peft", "token_count": 4824 }
238
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import numpy as np import torch from datasets import load_dataset """ doc https://huggingface.co/docs/datasets/loading doc https://huggingface.co/docs/datasets/process doc https://huggingface.co/blog/llama2#how-to-prompt-llama-2 """ def set_seed(seed): np.random.seed(seed) torch.random.manual_seed(seed) def sample_train_loaders(name, tokenizer, nsamples=128, seed=0, seqlen=2048): set_seed(seed) if "wikitext2" in name: traindata = load_dataset( "wikitext", "wikitext-2-raw-v1", split="train", ) traindata = "\n\n".join(traindata["text"]) elif "c4" in name: traindata = load_dataset( "allenai/c4", "allenai--c4", data_files={"train": "en/c4-train.00000-of-01024.json.gz"}, split="train", ) traindata = "\n\n".join(traindata["text"]) else: raise NotImplementedError trainloader = [] for _ in range(nsamples): i = random.randint(0, len(traindata) - seqlen * 2 - 1) j = i + seqlen * 2 # breakpoint() trainenc = tokenizer(traindata[i:j], return_tensors="pt") inp = trainenc.input_ids[:, :seqlen] trainloader.append(inp) return trainloader def get_redpajama_train(tokenizer, percent=10, seed=3, batch_size=128, max_length=2048): def tokenization(example): return tokenizer(example["text"], truncation=True, max_length=max_length) if percent != 100: split = f"train[:{int(850000 * percent / 100)}]" else: split = "train" dataset = load_dataset("togethercomputer/RedPajama-Data-1T-Sample", split=split) processed_dataset = dataset.map(tokenization, batched=True, batch_size=batch_size, num_proc=os.cpu_count()) return processed_dataset def get_english_quote(dataset_name, tokenizer): data = load_dataset(dataset_name) data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) return data["train"] def get_qat_dataset(name, tokenizer, data_percent): if name == "red_pajama": data = get_redpajama_train(tokenizer, data_percent) elif name == "Abirate/english_quotes": data = get_english_quote(name, tokenizer) else: raise NotImplementedError data = data.shuffle() return data llama_chat_format = """<s>[INST] <<SYS>> "Below is an instruction that describes a task. Write a response that appropriately completes the request." <</SYS>> {instruction} [/INST] {response} </s> """ def get_calib_data(name, tokenizer, model_id, nsamples, seqlen=2048, seed=3): print(f" get_data_from: {name}, nsamples={nsamples}, seqlen={seqlen}, {seed}") cache_file = f"cache/{name}_{model_id.replace('/', '_')}_{nsamples}_{seqlen}_{seed}.pt" traindataset = [] if not os.path.exists("cache"): os.makedirs("cache") if os.path.exists(cache_file): print(f"found data file: {cache_file}") traindataset = torch.load(cache_file) print("loaded ...") return traindataset if name == "c4": traindata = load_dataset( "allenai/c4", "allenai--c4", data_files={"train": "en/c4-train.00000-of-01024.json.gz"}, split="train", ) tot_text = "\n\n".join(traindata["text"]) elif name == "wikitext2": traindata = load_dataset("wikitext", "wikitext-2-raw-v1", split="train") tot_text = "\n\n".join(traindata["text"]) elif name == "ptb": traindata = load_dataset( "ptb_text_only", "penn_treebank", split="train", ) tot_text = "\n\n".join(traindata["sentence"]) elif name == "traivia_qa": traindata = load_dataset("trivia_qa", "rc", split="train") tot_text = "\n\n".join(traindata["question"]) elif name == "nqopen": traindata = load_dataset("nq_open", split="train") tot_text = "\n\n".join(traindata["question"]) elif name == "alpaca": selected_data_dict = load_dataset("iboing/alpaca_data", split="train").shuffle(seed=seed).take(nsamples) for example in selected_data_dict: if example.get("input", "") == "": s = llama_chat_format.format(instruction=example["instruction"], response=example["output"]) trainenc = tokenizer(s, return_tensors="pt") inp = trainenc.input_ids[:, :seqlen] attention_mask = torch.ones_like(inp) traindataset.append({"input_ids": inp, "attention_mask": attention_mask}) print("example instruction:", s) torch.save(traindataset, cache_file) return traindataset elif name == "MetaMATH": selected_data_dict = load_dataset("iboing/MetaMathQA-395K", split="train").shuffle(seed=seed).take(nsamples) for example in selected_data_dict: if example.get("input", "") == "": s = llama_chat_format.format(instruction=example["query"], response=example["response"]) trainenc = tokenizer(s, return_tensors="pt") inp = trainenc.input_ids[:, :seqlen] attention_mask = torch.ones_like(inp) traindataset.append({"input_ids": inp, "attention_mask": attention_mask}) print("example instruction:", s) torch.save(traindataset, cache_file) return traindataset elif name == "codefeedback": selected_data_dict = ( load_dataset("iboing/CodeFeedback-Filtered-Instruction", split="train").shuffle(seed=seed).take(nsamples) ) for example in selected_data_dict: if example.get("input", "") == "": s = llama_chat_format.format(instruction=example["query"], response=example["answer"]) trainenc = tokenizer(s, return_tensors="pt") inp = trainenc.input_ids[:, :seqlen] attention_mask = torch.ones_like(inp) traindataset.append({"input_ids": inp, "attention_mask": attention_mask}) print("example instruction:", s) torch.save(traindataset, cache_file) return traindataset elif name == "WizLMinstruct": selected_data_dict = ( load_dataset("iboing/WizardLM_evol_instruct_V2_143k", split="train").shuffle(seed=seed).take(nsamples) ) for example in selected_data_dict: if example.get("input", "") == "": s = llama_chat_format.format( instruction=example["conversation"][0]["human"], response=example["conversation"][0]["assistant"] ) trainenc = tokenizer(s, return_tensors="pt") inp = trainenc.input_ids[:, :seqlen] attention_mask = torch.ones_like(inp) traindataset.append({"input_ids": inp, "attention_mask": attention_mask}) print("example instruction:", s) torch.save(traindataset, cache_file) return traindataset else: raise NotImplementedError print(f"tot_text={len(tot_text)}") for _ in range(nsamples): i = random.randint(0, len(tot_text) - seqlen - 1) j = i + seqlen * 10 trainenc = tokenizer(tot_text[i:j], return_tensors="pt") inp = trainenc.input_ids[:, :seqlen] attention_mask = torch.ones_like(inp) traindataset.append({"input_ids": inp, "attention_mask": attention_mask}) torch.save(traindataset, cache_file) return traindataset def get_eval_loaders(name, tokenizer): if "wikitext2" in name: testdata = load_dataset( "wikitext", "wikitext-2-raw-v1", split="test", ) testenc = tokenizer("\n\n".join(testdata["text"]), return_tensors="pt") return testenc if "ptb" in name: valdata = load_dataset( "ptb_text_only", "penn_treebank", split="validation", ) testenc = tokenizer("\n\n".join(valdata["sentence"]), return_tensors="pt") return testenc if "c4" in name: testdata = load_dataset( "allenai/c4", "allenai--c4", data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"}, split="validation", ) testenc = tokenizer("\n\n".join(testdata["text"]), return_tensors="pt") return testenc raise NotImplementedError
peft/examples/corda_finetuning/datautils.py/0
{ "file_path": "peft/examples/corda_finetuning/datautils.py", "repo_id": "peft", "token_count": 4106 }
239
<jupyter_start><jupyter_text>Fine-tune large models using 🤗 `peft` adapters, `transformers` & `bitsandbytes`In this tutorial we will cover how we can fine-tune large language models using the very recent `peft` library and `bitsandbytes` for loading large models in 8-bit.The fine-tuning method will rely on a recent method called "Low Rank Adapters" (LoRA), instead of fine-tuning the entire model you just have to fine-tune these adapters and load them properly inside the model. After fine-tuning the model you can also share your adapters on the 🤗 Hub and load them very easily. Let's get started! Install requirementsFirst, run the cells below to install the requirements:<jupyter_code>!pip install -q datasets==3.6.0 accelerate !pip install -q git+https://github.com/bitsandbytes-foundation/bitsandbytes.git !pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git<jupyter_output> ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 76.3/76.3 MB 10.3 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 462.8/462.8 KB 25.4 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 199.7/199.7 KB 25.5 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 190.3/190.3 KB 23.1 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 213.0/213.0 KB 26.4 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 132.0/132.0 KB 18.5 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 140.6/140.6 KB 20.2 MB/s eta 0:00:00 [?25h Installing build dependencies ... [?25l[?25hdone Getting requirements to build wheel ... [?25l[?25hdone Preparing metadata (pyproject.tom[...]<jupyter_text>Model loadingHere let's load the `opt-6.7b` model, its weights in half-precision (float16) are about 13GB on the Hub! If we load them in 8-bit we would require around 7GB of memory instead.<jupyter_code>import os import torch import torch.nn as nn import bitsandbytes as bnb from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, BitsAndBytesConfig model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", quantization_config=BitsAndBytesConfig(load_in_8bit=True)) tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b")<jupyter_output><empty_output><jupyter_text>Prepare model for trainingSome pre-processing needs to be done before training such an int8 model using `peft`, therefore let's import an utiliy function `prepare_model_for_kbit_training` that will: - Casts all the non `int8` modules to full precision (`fp32`) for stability- Add a `forward_hook` to the input embedding layer to enable gradient computation of the input hidden states- Enable gradient checkpointing for more memory-efficient training<jupyter_code>from peft import prepare_model_for_kbit_training model = prepare_model_for_kbit_training(model)<jupyter_output><empty_output><jupyter_text>Apply LoRAHere comes the magic with `peft`! Let's load a `PeftModel` and specify that we are going to use low-rank adapters (LoRA) using `get_peft_model` utility function from `peft`.<jupyter_code>def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) from peft import LoraConfig, get_peft_model config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM" ) model = get_peft_model(model, config) print_trainable_parameters(model)<jupyter_output>trainable params: 8388608 || all params: 6666862592 || trainable%: 0.12582542214183376<jupyter_text>Training<jupyter_code>import transformers from datasets import load_dataset data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = transformers.Trainer( model=model, train_dataset=data["train"], args=transformers.TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=100, max_steps=200, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir="outputs", ), data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train()<jupyter_output><empty_output><jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>from huggingface_hub import notebook_login notebook_login() model.push_to_hub("ybelkada/opt-6.7b-lora", use_auth_token=True)<jupyter_output>Uploading the following files to ybelkada/opt-6.7b-lora: adapter_config.json,adapter_model.bin<jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig peft_model_id = "ybelkada/opt-6.7b-lora" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained( config.base_model_name_or_path, return_dict=True, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model model = PeftModel.from_pretrained(model, peft_model_id)<jupyter_output><empty_output><jupyter_text>InferenceYou can then directly use the trained model or the model that you have loaded from the 🤗 Hub for inference as you would do it usually in `transformers`.<jupyter_code>batch = tokenizer("Two things are infinite: ", return_tensors="pt").to(model.device) device_type = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" with torch.amp.autocast(device_type=device_type): output_tokens = model.generate(**batch, max_new_tokens=50) print("\n\n", tokenizer.decode(output_tokens[0], skip_special_tokens=True))<jupyter_output>/home/marc/anaconda3/envs/accelerate/lib/python3.10/site-packages/transformers/generation/utils.py:1448: UserWarning: You are calling .generate() with the `input_ids` being on a device type different than your model's device. `input_ids` is on cpu, whereas the model is on cuda. You may experience unexpected behaviors or slower generation. Please make sure that you have put `input_ids` to the correct device by calling for example input_ids = input_ids.to('cuda') before running `.generate()`. warnings.warn(
peft/examples/int8_training/Finetune_opt_bnb_peft.ipynb/0
{ "file_path": "peft/examples/int8_training/Finetune_opt_bnb_peft.ipynb", "repo_id": "peft", "token_count": 2830 }
240
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import torch from transformers import AutoModelForCausalLM, AutoTokenizer from peft import LoraConfig, get_peft_model parser = argparse.ArgumentParser(description="Merge Adapter to Base Model") parser.add_argument( "--base_model_name_or_path", help="The name or path of the fp32/16 base model.", ) parser.add_argument("--output_dir", type=str, help="The directory to save the PiSSA model.") parser.add_argument("--bits", type=str, default="bf16", choices=["bf16", "fp16", "fp32"]) parser.add_argument( "--init_lora_weights", type=str, default="pissa", help="(`['pissa', 'pissa_niter_[number of iters]']`)" ) parser.add_argument("--lora_r", type=int, default=128) parser.add_argument("--lora_alpha", type=int, default=128) parser.add_argument("--lora_dropout", type=int, default=0) script_args = parser.parse_args() print(script_args) model = AutoModelForCausalLM.from_pretrained( script_args.base_model_name_or_path, torch_dtype=( torch.float16 if script_args.bits == "fp16" else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32) ), device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name_or_path) tokenizer.pad_token_id = tokenizer.eos_token_id lora_config = LoraConfig( r=script_args.lora_r, lora_alpha=script_args.lora_alpha, init_lora_weights=script_args.init_lora_weights, lora_dropout=script_args.lora_dropout, target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"], bias="none", task_type="CAUSAL_LM", ) peft_model = get_peft_model(model, lora_config) # Save PiSSA modules: peft_model.peft_config["default"].init_lora_weights = True peft_model.save_pretrained(os.path.join(script_args.output_dir, "pissa_init")) # Save residual model: peft_model = peft_model.unload() peft_model.save_pretrained(script_args.output_dir) # Save the tokenizer: tokenizer.save_pretrained(script_args.output_dir)
peft/examples/pissa_finetuning/preprocess.py/0
{ "file_path": "peft/examples/pissa_finetuning/preprocess.py", "repo_id": "peft", "token_count": 938 }
241
<jupyter_start><jupyter_code>import argparse import os import torch from torch.optim import AdamW from torch.utils.data import DataLoader from peft import ( get_peft_config, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict, LoraConfig, PeftType, PrefixTuningConfig, PromptEncoderConfig, ) import evaluate from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from tqdm import tqdm batch_size = 32 model_name_or_path = "roberta-large" task = "mrpc" peft_type = PeftType.LORA device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" num_epochs = 20 peft_config = LoraConfig(task_type="SEQ_CLS", inference_mode=False, r=8, lora_alpha=16, lora_dropout=0.1) lr = 3e-4 if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size ) model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True) model = get_peft_model(model, peft_config) model.print_trainable_parameters() model optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs), num_training_steps=(len(train_dataloader) * num_epochs), ) model.to(device) for epoch in range(num_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(f"epoch {epoch}:", eval_metric)<jupyter_output>0%| | 0/115 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. 100%|████████████████████████████████████████████████████████████████████████████████████████| 115/115 [00:28<00:00, 4.08it/s] 100%|██████████████████████████████████████████████████████████████████████████████████████████| 13/13 [00:01<00:00, 8.68it/s]<jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>model.push_to_hub("smangrul/roberta-large-peft-lora", use_auth_token=True)<jupyter_output><empty_output><jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "smangrul/roberta-large-peft-lora" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model inference_model = PeftModel.from_pretrained(inference_model, peft_model_id) inference_model.to(device) inference_model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = inference_model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(eval_metric)<jupyter_output>Some weights of the model checkpoint at roberta-large were not used when initializing RobertaForSequenceClassification: ['lm_head.bias', 'roberta.pooler.dense.weight', 'roberta.pooler.dense.bias', 'lm_head.layer_norm.weight', 'lm_head.decoder.weight', 'lm_head.dense.bias', 'lm_head.dense.weight', 'lm_head.layer_norm.bias'] - This IS expected if you are initializing RobertaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing RobertaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at roberta-large and are newly initialized: ['classifier.dense.bias', 'classifie[...]
peft/examples/sequence_classification/LoRA.ipynb/0
{ "file_path": "peft/examples/sequence_classification/LoRA.ipynb", "repo_id": "peft", "token_count": 2317 }
242
python train.py \ --seed 100 \ --model_name_or_path "mistralai/Mistral-7B-v0.1" \ --dataset_name "smangrul/ultrachat-10k-chatml" \ --chat_template_format "chatml" \ --add_special_tokens False \ --append_concat_token False \ --splits "train,test" \ --max_seq_len 2048 \ --num_train_epochs 1 \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ --eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ --hub_strategy "every_save" \ --bf16 True \ --packing True \ --learning_rate 1e-4 \ --lr_scheduler_type "cosine" \ --weight_decay 1e-4 \ --warmup_ratio 0.0 \ --max_grad_norm 1.0 \ --output_dir "mistral-sft-lora" \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --gradient_accumulation_steps 8 \ --gradient_checkpointing True \ --use_reentrant True \ --dataset_text_field "content" \ --use_peft_lora True \ --lora_r 8 \ --lora_alpha 16 \ --lora_dropout 0.1 \ --lora_target_modules "all-linear" \ --use_4bit_quantization True \ --use_nested_quant True \ --bnb_4bit_compute_dtype "bfloat16" \ --use_flash_attn True
peft/examples/sft/run_peft.sh/0
{ "file_path": "peft/examples/sft/run_peft.sh", "repo_id": "peft", "token_count": 457 }
243
<jupyter_start><jupyter_text>IntroductionIn this notebook, we are going to fine-tune the LayoutLM model by Microsoft Research on the [FUNSD](https://guillaumejaume.github.io/FUNSD/) dataset, which is a collection of annotated form documents. The goal of our model is to learn the annotations of a number of labels ("question", "answer", "header" and "other") on those forms, such that it can be used to annotate unseen forms in the future.* Original LayoutLM paper: https://huggingface.co/papers/1912.13318* Original FUNSD paper: https://huggingface.co/papers/1905.13538 Install librariesCurrently you have to first install the `unilm` package, and then the `transformers` package (which updates the outdated `transformers` package that is included in the `unilm` package). The reason we also install the `unilm` package is because we need its preprocessing files. I've forked it, and removed some statements which introduced some issues.<jupyter_code>! rm -r unilm ! git clone https://github.com/microsoft/unilm.git<jupyter_output><empty_output><jupyter_text>Getting the dataHere we download the data of the [FUNSD dataset](https://guillaumejaume.github.io/FUNSD/) from the web. This results in a directory called "data" being created, which has 2 subdirectories, one for training and one for testing. Each of those has 2 subdirectories in turn, one containing the images as png files and one containing the annotations in json format.<jupyter_code>! wget https://guillaumejaume.github.io/FUNSD/dataset.zip ! unzip dataset.zip && mv dataset data && rm -rf dataset.zip __MACOSX<jupyter_output><empty_output><jupyter_text>Let's take a look at a training example. For this, we are going to use PIL (Python Image Library).<jupyter_code>from PIL import Image, ImageDraw, ImageFont import os base_path = "./data" image = Image.open(os.path.join(base_path, "training_data/images/0000971160.png")) image = image.convert("RGB") image<jupyter_output><empty_output><jupyter_text>Now let's plot its corresponding annotations. Basically, if you type `data['form']`, you get a list of all general annotations. Each general annotation has a label, a bounding box, and one or more words, which in also have their own bounding box. The bounding boxes are in [xleft, ytop, xright, ybottom] format.<jupyter_code>import json with open(os.path.join(base_path, "training_data/annotations/0000971160.json")) as f: data = json.load(f) for annotation in data["form"]: print(annotation)<jupyter_output>{'box': [292, 91, 376, 175], 'text': 'R&D', 'label': 'other', 'words': [{'box': [292, 91, 376, 175], 'text': 'R&D'}], 'linking': [], 'id': 0} {'box': [219, 316, 225, 327], 'text': ':', 'label': 'question', 'words': [{'box': [219, 316, 225, 327], 'text': ':'}], 'linking': [], 'id': 1} {'box': [95, 355, 169, 370], 'text': 'Suggestion:', 'label': 'question', 'words': [{'box': [95, 355, 169, 370], 'text': 'Suggestion:'}], 'linking': [[2, 16]], 'id': 2} {'box': [482, 268, 518, 282], 'text': 'Date:', 'label': 'question', 'words': [{'box': [482, 268, 518, 282], 'text': 'Date:'}], 'linking': [[3, 12]], 'id': 3} {'box': [511, 309, 570, 323], 'text': 'Licensee', 'label': 'answer', 'words': [{'box': [511, 309, 570, 323], 'text': 'Licensee'}], 'linking': [[13, 4]], 'id': 4} {'box': [211, 651, 217, 662], 'text': '', 'label': 'question', 'words': [{'box': [211, 651, 217, 662], 'text': ''}], 'linking': [], 'id': 5} {'box': [461, 605, 483, 619], 'text': 'Yes', 'label': 'question', 'words': [{'box': [4[...]<jupyter_text>The PIL library has a handy ImageDraw module, which -you guessed it- allows to draw things (such as rectangles) on an image:<jupyter_code>draw = ImageDraw.Draw(image, "RGBA") font = ImageFont.load_default() label2color = {"question": "blue", "answer": "green", "header": "orange", "other": "violet"} for annotation in data["form"]: label = annotation["label"] general_box = annotation["box"] draw.rectangle(general_box, outline=label2color[label], width=2) draw.text((general_box[0] + 10, general_box[1] - 10), label, fill=label2color[label], font=font) words = annotation["words"] for word in words: box = word["box"] draw.rectangle(box, outline=label2color[label], width=1) image<jupyter_output><empty_output><jupyter_text>Preprocessing the dataNext, we need to turn the document images into individual tokens and corresponding labels (BIOES format, see further). We do this both for the training and test datasets. Make sure to run this from the `/content` directory:<jupyter_code>! python unilm/layoutlm/deprecated/examples/seq_labeling/preprocess.py --data_dir data/training_data/annotations \ --data_split train \ --output_dir data \ --model_name_or_path microsoft/layoutlm-base-uncased \ --max_len 510 ! python unilm/layoutlm/deprecated/examples/seq_labeling/preprocess.py --data_dir data/testing_data/annotations \ --data_split test \ --output_dir data \ --model_name_or_path microsoft/layoutlm-base-uncased \ --max_len 510<jupyter_output><empty_output><jupyter_text>Next, we create a labels.txt file that contains the unique labels of the FUNSD dataset:<jupyter_code>! cat data/train.txt | cut -d$'\t' -f 2 | grep -v "^$"| sort | uniq > data/labels.txt<jupyter_output><empty_output><jupyter_text>Define a PyTorch datasetFirst, we create a list containing the unique labels based on `data/labels.txt` (run this from the content directory):<jupyter_code>from torch.nn import CrossEntropyLoss def get_labels(path): with open(path, "r") as f: labels = f.read().splitlines() if "O" not in labels: labels = ["O"] + labels return labels labels = get_labels("data/labels.txt") num_labels = len(labels) label_map = {i: label for i, label in enumerate(labels)} # Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later pad_token_label_id = CrossEntropyLoss().ignore_index<jupyter_output><empty_output><jupyter_text>We can see that the dataset uses the so-called BIOES annotation scheme to annotate the tokens. This means that a given token can be either at the beginning (B), inside (I), outside (O), at the end (E) or start (S) of a given entity. Entities include ANSWER, QUESTION, HEADER and OTHER:<jupyter_code>print(labels)<jupyter_output>['B-ANSWER', 'B-HEADER', 'B-QUESTION', 'E-ANSWER', 'E-HEADER', 'E-QUESTION', 'I-ANSWER', 'I-HEADER', 'I-QUESTION', 'O', 'S-ANSWER', 'S-HEADER', 'S-QUESTION']<jupyter_text>Next, we can create a PyTorch dataset and corresponding dataloader (both for training and evaluation):<jupyter_code>import logging import os import torch from torch.utils.data import Dataset logger = logging.getLogger(__name__) class FunsdDataset(Dataset): def __init__(self, args, tokenizer, labels, pad_token_label_id, mode): if args.local_rank not in [-1, 0] and mode == "train": torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) examples = read_examples_from_file(args.data_dir, mode) features = convert_examples_to_features( examples, labels, args.max_seq_length, tokenizer, cls_token_at_end=bool(args.model_type in ["xlnet"]), # xlnet has a cls token at the end cls_token=tokenizer.cls_token, cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=bool(args.model_type in ["roberta"]), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0, pad_token_label_id=pad_token_label_id, ) # if args.local_rank in [-1, 0]: # logger.info("Saving features into cached file %s", cached_features_file) # torch.save(features, cached_features_file) if args.local_rank == 0 and mode == "train": torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache self.features = features # Convert to Tensors and build dataset self.all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) self.all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) self.all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) self.all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long) self.all_bboxes = torch.tensor([f.boxes for f in features], dtype=torch.long) def __len__(self): return len(self.features) def __getitem__(self, index): return ( self.all_input_ids[index], self.all_input_mask[index], self.all_segment_ids[index], self.all_label_ids[index], self.all_bboxes[index], ) class InputExample(object): """A single training/test example for token classification.""" def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size): """Constructs a InputExample. Args: guid: Unique id for the example. words: list. The words of the sequence. labels: (Optional) list. The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.words = words self.labels = labels self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size class InputFeatures(object): """A single set of features of data.""" def __init__( self, input_ids, input_mask, segment_ids, label_ids, boxes, actual_bboxes, file_name, page_size, ): assert ( 0 <= all(boxes) <= 1000 ), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(boxes) self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_ids = label_ids self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size def read_examples_from_file(data_dir, mode): file_path = os.path.join(data_dir, "{}.txt".format(mode)) box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode)) image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode)) guid_index = 1 examples = [] with open(file_path, encoding="utf-8") as f, open(box_file_path, encoding="utf-8") as fb, open( image_file_path, encoding="utf-8" ) as fi: words = [] boxes = [] actual_bboxes = [] file_name = None page_size = None labels = [] for line, bline, iline in zip(f, fb, fi): if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append( InputExample( guid="{}-{}".format(mode, guid_index), words=words, labels=labels, boxes=boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) guid_index += 1 words = [] boxes = [] actual_bboxes = [] file_name = None page_size = None labels = [] else: splits = line.split("\t") bsplits = bline.split("\t") isplits = iline.split("\t") assert len(splits) == 2 assert len(bsplits) == 2 assert len(isplits) == 4 assert splits[0] == bsplits[0] words.append(splits[0]) if len(splits) > 1: labels.append(splits[-1].replace("\n", "")) box = bsplits[-1].replace("\n", "") box = [int(b) for b in box.split()] boxes.append(box) actual_bbox = [int(b) for b in isplits[1].split()] actual_bboxes.append(actual_bbox) page_size = [int(i) for i in isplits[2].split()] file_name = isplits[3].strip() else: # Examples could have no label for mode = "test" labels.append("O") if words: examples.append( InputExample( guid="%s-%d".format(mode, guid_index), words=words, labels=labels, boxes=boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) return examples def convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_segment_id=0, pad_token_label_id=-1, sequence_a_segment_id=0, mask_padding_with_zero=True, ): """Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for ex_index, example in enumerate(examples): file_name = example.file_name page_size = example.page_size width, height = page_size if ex_index % 10000 == 0: logger.info("Writing example %d of %d", ex_index, len(examples)) tokens = [] token_boxes = [] actual_bboxes = [] label_ids = [] for word, label, box, actual_bbox in zip(example.words, example.labels, example.boxes, example.actual_bboxes): word_tokens = tokenizer.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) actual_bboxes.extend([actual_bbox] * len(word_tokens)) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = 3 if sep_token_extra else 2 if len(tokens) > max_seq_length - special_tokens_count: tokens = tokens[: (max_seq_length - special_tokens_count)] token_boxes = token_boxes[: (max_seq_length - special_tokens_count)] actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)] label_ids = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] token_boxes += [sep_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] token_boxes += [sep_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] token_boxes += [cls_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens token_boxes = [cls_token_box] + token_boxes actual_bboxes = [[0, 0, width, height]] + actual_bboxes label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids token_boxes = ([pad_token_box] * padding_length) + token_boxes else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length token_boxes += [pad_token_box] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length assert len(token_boxes) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s", example.guid) logger.info("tokens: %s", " ".join([str(x) for x in tokens])) logger.info("input_ids: %s", " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s", " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) logger.info("label_ids: %s", " ".join([str(x) for x in label_ids])) logger.info("boxes: %s", " ".join([str(x) for x in token_boxes])) logger.info("actual_bboxes: %s", " ".join([str(x) for x in actual_bboxes])) features.append( InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids, boxes=token_boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) return features from transformers import LayoutLMTokenizer # from .unilm.layoutlm.data.funsd import FunsdDataset, InputFeatures from torch.utils.data import DataLoader, RandomSampler, SequentialSampler batch_size = 16 args = { "local_rank": -1, "overwrite_cache": True, "data_dir": "data/", "model_name_or_path": "microsoft/layoutlm-base-uncased", "max_seq_length": 512, "model_type": "layoutlm", } # class to turn the keys of a dict into attributes (thanks Stackoverflow) class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self args = AttrDict(args) tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") # the LayoutLM authors already defined a specific FunsdDataset, so we are going to use this here train_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode="train") train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=batch_size) eval_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode="test") eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=batch_size) len(train_dataloader) len(eval_dataloader) batch = next(iter(train_dataloader)) input_ids = batch[0][0] tokenizer.decode(input_ids)<jupyter_output><empty_output><jupyter_text>Define and fine-tune the modelAs this is a sequence labeling task, we are going to load `LayoutLMForTokenClassification` (the base sized model) from the hub. We are going to fine-tune it on a downstream task, namely FUNSD.<jupyter_code>from peft import get_peft_config, PeftModel, get_peft_model, LoraConfig, TaskType peft_config = LoraConfig( task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all" ) peft_config from transformers import LayoutLMForTokenClassification import torch from transformers import set_seed seed = 100 set_seed(seed) device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=num_labels) model = get_peft_model(model, peft_config) model.to(device) print(model.model.layoutlm.encoder.layer[0].attention.self.query.weight) print(model.model.layoutlm.encoder.layer[0].attention.self.query.lora_A.default.weight) print(model.model.classifier.weight)<jupyter_output><empty_output><jupyter_text>Now we can start training:<jupyter_code>from transformers import get_linear_schedule_with_warmup from tqdm import tqdm num_train_epochs = 100 optimizer = torch.optim.AdamW(model.parameters(), lr=3e-3) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_train_epochs), num_training_steps=(len(train_dataloader) * num_train_epochs), ) global_step = 0 t_total = len(train_dataloader) * num_train_epochs # total number of training steps # put the model in training mode model.train() for epoch in range(num_train_epochs): for batch in tqdm(train_dataloader, desc="Training"): input_ids = batch[0].to(device) bbox = batch[4].to(device) attention_mask = batch[1].to(device) token_type_ids = batch[2].to(device) labels = batch[3].to(device) # forward pass outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels ) loss = outputs.loss # print loss every 100 steps if global_step % 10 == 0: print(f"Loss after {global_step} steps: {loss.item()}") # backward pass to get the gradients loss.backward() # print("Gradients on classification head:") # print(model.classifier.weight.grad[6,:].sum()) # update optimizer.step() lr_scheduler.step() optimizer.zero_grad() global_step += 1 import numpy as np from seqeval.metrics import ( classification_report, f1_score, precision_score, recall_score, ) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None # put model in evaluation mode model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): with torch.no_grad(): input_ids = batch[0].to(device) bbox = batch[4].to(device) attention_mask = batch[1].to(device) token_type_ids = batch[2].to(device) labels = batch[3].to(device) # forward pass outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels ) # get the loss and logits tmp_eval_loss = outputs.loss logits = outputs.logits eval_loss += tmp_eval_loss.item() nb_eval_steps += 1 # compute the predictions if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = labels.detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0) # compute average evaluation loss eval_loss = eval_loss / nb_eval_steps preds = np.argmax(preds, axis=2) out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] for i in range(out_label_ids.shape[0]): for j in range(out_label_ids.shape[1]): if out_label_ids[i, j] != pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) results = { "loss": eval_loss, "precision": precision_score(out_label_list, preds_list), "recall": recall_score(out_label_list, preds_list), "f1": f1_score(out_label_list, preds_list), } print(results) model.print_trainable_parameters() model.save_pretrained("peft_layoutlm") !du -h ./peft_layoutlm/adapter_model.safetensors<jupyter_output>2.8M ./peft_layoutlm/adapter_model.safetensors
peft/examples/token_classification/peft_lora_token_cls.ipynb/0
{ "file_path": "peft/examples/token_classification/peft_lora_token_cls.ipynb", "repo_id": "peft", "token_count": 12369 }
244
{ "optimizer_kwargs": { "lr": 3e-1, "weight_decay": 1e-5 } }
peft/method_comparison/MetaMathQA/experiments/c3a/llama-3.2-3B-default/training_params.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/c3a/llama-3.2-3B-default/training_params.json", "repo_id": "peft", "token_count": 43 }
245
{ "alpha_pattern": {}, "auto_mapping": null, "base_model_name_or_path": null, "bias": "none", "block_share": false, "coft": false, "eps": 6e-05, "exclude_modules": null, "fan_in_fan_out": false, "inference_mode": false, "init_weights": true, "layers_pattern": null, "layers_to_transform": null, "module_dropout": 0.0, "modules_to_save": null, "oft_block_size": 0, "peft_type": "OFT", "r": 32, "rank_pattern": {}, "revision": null, "target_modules": [ "q_proj", "v_proj" ], "task_type": null }
peft/method_comparison/MetaMathQA/experiments/oft/llama-3.2-3B-rank32/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/oft/llama-3.2-3B-rank32/adapter_config.json", "repo_id": "peft", "token_count": 246 }
246
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Main entry point to run the experiments. Contains general setup and the proper training code. """ import argparse import datetime as dt import gc import json import os import random import sys import textwrap import time from contextlib import AbstractContextManager, nullcontext from functools import partial from typing import Any, Callable, Literal, Optional import torch from torch import nn from torch.amp import GradScaler, autocast from tqdm import tqdm from transformers import GenerationConfig, set_seed from utils import ( FILE_NAME_TRAIN_PARAMS, BucketIterator, TrainResult, TrainStatus, get_accuracy, get_base_model_info, get_dataset_info, get_file_size, get_model, get_optimizer_and_scheduler, get_peft_branch, get_tokenizer, get_train_config, init_accelerator, log_results, validate_experiment_path, ) from data import get_train_valid_test_datasets from peft import AdaLoraConfig, PeftConfig from peft.utils import infer_device, CONFIG_NAME # # suppress all warnings # warnings.filterwarnings("ignore") # FIXME? dtype_to_bytes_linear = {"float32": 4, "float16": 2, "bfloat16": 2, "int8": 1, "int4": 0.5} # if lr scheduler with warmup is used, the ratio of warmup steps to total steps BUCKET_FACTOR = 20 # number of batches per bucket, increasing this further has diminishing returns def get_generation_config(*, seq_len, generate_kwargs) -> GenerationConfig: # filter out None values so that we don't depend on setting correct defaults in the config generation_kwargs = {k: v for k, v in generate_kwargs.items() if v is not None} if ("max_length" in generation_kwargs) and ("max_new_tokens" in generation_kwargs): # transformers does not support setting both max_length and max_new_tokens, but what we want in this case is to # take the smaller of the two values new_max_length = min(generation_kwargs["max_new_tokens"] + seq_len, generation_kwargs["max_length"]) del generation_kwargs["max_new_tokens"] generation_kwargs["max_length"] = new_max_length generation_config = GenerationConfig(**generate_kwargs) return generation_config def evaluate(model, tokenizer, ds, batch_size, generate_kwargs, use_tqdm: bool = False) -> tuple[list[str], list[str]]: with torch.inference_mode(): predictions = [] responses = [] pbar = range(0, len(ds), batch_size) if use_tqdm: pbar = tqdm(pbar) for j in pbar: sliced = ds[j : j + batch_size] responses += sliced.pop("response") batch = tokenizer.pad(sliced, return_tensors="pt", padding_side="left").to(model.device) seq_len = batch["input_ids"].shape[1] generation_config = get_generation_config(seq_len=seq_len, generate_kwargs=generate_kwargs) outputs = model.generate(**batch, generation_config=generation_config, pad_token_id=tokenizer.eos_token_id) predictions += tokenizer.batch_decode(outputs, skip_special_tokens=True) return predictions, responses class DummyGradScaler: # if no mixed precision is being used def scale(self, loss): return loss def unscale_(self, optimizer): pass def step(self, optimizer): optimizer.step() def update(self): pass def train( *, model: nn.Module, max_steps: int, batch_size: int, batch_size_eval: int, tokenizer: Any, accelerator_memory_init: int, eval_steps: int, generation_kwargs: dict[str, Any], grad_norm_clip: float, optimizer_type: str, optimizer_kwargs: dict[str, Any], query_template: str, lr_scheduler_arg: Optional[Literal["cosine"]], use_amp: bool, is_adalora: bool, ) -> TrainResult: accelerator_memory_allocated_log = [] accelerator_memory_reserved_log = [] losses = [] durations = [] metrics = [] sample = 0 # keep count of the current sample total_samples = 0 # total number of samples over all epochs total_tokens = [] # total number of tokens over all epochs device_type = infer_device() torch_accelerator_module = getattr(torch, device_type, torch.cuda) if use_amp: grad_scaler: GradScaler | DummyGradScaler = GradScaler(device=device_type) autocast_ctx: Callable[[], ContextManager[Any]] = partial(autocast, device_type=device_type) else: grad_scaler = DummyGradScaler() autocast_ctx = nullcontext optimizer, lr_scheduler = get_optimizer_and_scheduler( model, optimizer_type=optimizer_type, max_steps=max_steps, lr_scheduler_arg=lr_scheduler_arg, **optimizer_kwargs, ) # print this after getting the optimizer, in case it modifies requires_gard if hasattr(model, "get_nb_trainable_parameters"): num_trainable_params, num_params = model.get_nb_trainable_parameters() else: num_params = model.num_parameters() num_trainable_params = num_params print_verbose( f"trainable params: {num_trainable_params:,d} || all params: {num_params:,d} || " f"trainable: {100 * num_trainable_params / num_params:.4f}%" ) status = TrainStatus.FAILED tic_train = time.perf_counter() eval_time = 0.0 error_msg = "" ds_train, ds_valid, ds_test = get_train_valid_test_datasets( tokenizer=tokenizer, query_template=query_template, print_fn=print_verbose ) # note: bucketing by length is only really worth it for the train dataset, since it's length is big compared to the # batch size iterator_train = BucketIterator( ds_train, batch_size=batch_size, bucket_factor=BUCKET_FACTOR, delete_cols=["response"], ) try: pbar = tqdm(range(1, max_steps + 1)) for step, batch in zip(pbar, iterator_train): tic = time.perf_counter() # create the batch tokens_per_sample = [len(i) for i in batch["input_ids"]] total_tokens.append(sum(tokens_per_sample) + len(tokens_per_sample)) # add EOS token batch = tokenizer.pad(batch, return_tensors="pt").to(model.device) actual_batch_size = len(batch["input_ids"]) total_samples += actual_batch_size sample += batch_size if sample >= len(ds_train): # new epoch sample = 0 # add labels, they are automatically shifted by transformers labels = batch["input_ids"].clone() # We want to ignore the padding tokens except for the first EOS token; if we don't ignore them, the loss # will be dominated by padding tokens; if we ignore all, the model will not learn to predict the EOS token. # TODO: Note that the longest sequence in the batch won't have any PAD/EOS token at the end, this is fine if # the batch size is > 1 but should still be fixed eventually. for i, num_tokens in enumerate(tokens_per_sample): labels[i, num_tokens + 1 :] = -100 batch["labels"] = labels num_items_in_batch = batch["attention_mask"].sum().item() # train step optimizer.zero_grad() with autocast_ctx(): outputs = model(**batch, num_items_in_batch=num_items_in_batch) loss = outputs.loss grad_scaler.scale(loss).backward() if grad_norm_clip: grad_scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), grad_norm_clip) grad_scaler.step(optimizer) grad_scaler.update() lr_scheduler.step() if is_adalora: model.base_model.update_and_allocate(step) losses.append(loss.item()) pbar.set_postfix({"loss": loss.item()}) accelerator_memory_allocated_log.append( torch_accelerator_module.memory_allocated() - accelerator_memory_init ) accelerator_memory_reserved_log.append( torch_accelerator_module.memory_reserved() - accelerator_memory_init ) toc = time.perf_counter() durations.append(toc - tic) # every couple of steps, evaluate; this can be slow due to generation if step % eval_steps == 0: tic_eval = time.perf_counter() loss_avg = sum(losses[-eval_steps:]) / eval_steps memory_allocated_avg = sum(accelerator_memory_allocated_log[-eval_steps:]) / eval_steps memory_reserved_avg = sum(accelerator_memory_reserved_log[-eval_steps:]) / eval_steps token_sum = sum(total_tokens[-eval_steps:]) dur_train = sum(durations[-eval_steps:]) tokens_per_sec = token_sum / dur_train model.eval() predictions, responses = evaluate( model=model, tokenizer=tokenizer, ds=ds_valid, batch_size=batch_size_eval, generate_kwargs={**generation_kwargs}, ) model.train() example = random.choice(predictions) example = textwrap.shorten(example, width=750) example = textwrap.indent(example, " ") print_verbose(f"\nExample prediction:\n{example}\n") accuracy = get_accuracy(predictions=predictions, responses=responses) num_tokens_generated = sum(sum(mask) for mask in tokenizer(predictions)["attention_mask"]) toc_eval = time.perf_counter() dur_eval = toc_eval - tic_eval eval_time += toc_eval - tic_eval elapsed = time.perf_counter() - tic_train metrics.append( { "step": step, "valid accuracy": accuracy, "train loss": loss_avg, "train samples": total_samples, "train time": dur_train, "eval time": dur_eval, "tokens / sec": tokens_per_sec, "mem allocated avg": memory_allocated_avg, "mem reserved avg": memory_reserved_avg, "elapsed time": elapsed, } ) log_dict = { "step": f"{step:5d}", "samples": f"{total_samples:7d}", "lr": f"{lr_scheduler.get_last_lr()[0]:.2e}", "loss avg": f"{loss_avg:.4f}", "valid acc": f"{accuracy:.3f}", "gen valid tokens": num_tokens_generated, "train time": f"{dur_train:.1f}s", "eval time": f"{dur_eval:.1f}s", "train tokens / sec": f"{tokens_per_sec:.0f}", "mem allocated": f"{memory_allocated_avg:.0f}", "mem reserved": f"{memory_reserved_avg:.0f}", "elapsed time": f"{elapsed // 60:.0f}min {elapsed % 60:.0f}s", } print_verbose(json.dumps(log_dict)) # # TODO is this needed? torch_accelerator_module.empty_cache() gc.collect() print_verbose(f"Training finished after {max_steps} steps, evaluation on test set follows.") # test set evaluation model.eval() predictions, responses = evaluate( model=model, tokenizer=tokenizer, ds=ds_test, batch_size=batch_size_eval, generate_kwargs={**generation_kwargs, "pad_token_id": tokenizer.eos_token_id}, use_tqdm=len(ds_test) > 100, ) accuracy = get_accuracy(predictions=predictions, responses=responses) metrics.append( { "step": step, "test accuracy": accuracy, "train loss": sum(losses[-eval_steps:]) / eval_steps, "train samples": total_samples, "train total tokens": sum(total_tokens), } ) print_verbose(f"Test accuracy: {accuracy:.3f}") except KeyboardInterrupt: print_verbose("canceled training") status = TrainStatus.CANCELED error_msg = "manually canceled" except torch.OutOfMemoryError as exc: # ouch, still let's try to log some results print_verbose("out of memory error encountered") status = TrainStatus.CANCELED error_msg = str(exc) except Exception as exc: print_verbose(f"encountered an error: {exc}") status = TrainStatus.CANCELED error_msg = str(exc) toc_train = time.perf_counter() train_time = toc_train - tic_train - eval_time if status != TrainStatus.CANCELED: status = TrainStatus.SUCCESS train_result = TrainResult( status=status, train_time=train_time, accelerator_memory_reserved_log=accelerator_memory_reserved_log, losses=losses, metrics=metrics, error_msg=error_msg, num_trainable_params=num_trainable_params, num_total_params=num_params, ) return train_result def main(*, path_experiment: str, experiment_name: str, clean: bool) -> None: tic_total = time.perf_counter() start_date = dt.datetime.now(tz=dt.timezone.utc).replace(microsecond=0).isoformat() peft_branch = get_peft_branch() if peft_branch == "main": print_verbose("===== This experiment is categorized as a MAIN run because the PEFT branch is 'main' ======") else: print_verbose( f"===== This experiment is categorized as a TEST run because the PEFT branch is '{peft_branch}' ======" ) # load configs peft_config: Optional[PeftConfig] = None if os.path.exists(os.path.join(path_experiment, CONFIG_NAME)): peft_config = PeftConfig.from_pretrained(path_experiment) else: print_verbose(f"Could not find PEFT config at {path_experiment}, performing FULL FINETUNING") path_train_config = os.path.join(path_experiment, FILE_NAME_TRAIN_PARAMS) train_config = get_train_config(path_train_config) set_seed(train_config.seed) # initialize objects accelerator_memory_init = init_accelerator() tokenizer = get_tokenizer(model_id=train_config.model_id, max_seq_length=train_config.max_seq_length) model_info = get_base_model_info(train_config.model_id) metamath_info = get_dataset_info("meta-math/MetaMathQA") gsm8k_info = get_dataset_info("openai/gsm8k") model = get_model( model_id=train_config.model_id, dtype=train_config.dtype, compile=train_config.compile, attn_implementation=train_config.attn_implementation, peft_config=peft_config, autocast_adapter_dtype=train_config.autocast_adapter_dtype, ) print_verbose(model) # train model train_result = train( model=model, max_steps=train_config.max_steps, batch_size=train_config.batch_size, batch_size_eval=train_config.batch_size_eval, tokenizer=tokenizer, accelerator_memory_init=accelerator_memory_init, eval_steps=train_config.eval_steps, generation_kwargs=train_config.generation_kwargs, grad_norm_clip=train_config.grad_norm_clip, optimizer_type=train_config.optimizer_type, optimizer_kwargs=train_config.optimizer_kwargs, query_template=train_config.query_template, lr_scheduler_arg=train_config.lr_scheduler, use_amp=train_config.use_amp, is_adalora=isinstance(peft_config, AdaLoraConfig), ) if train_result.status == TrainStatus.FAILED: print_verbose("Training failed, not logging results") sys.exit(1) file_size = get_file_size( model, peft_config=peft_config, clean=clean, print_fn=print_verbose, ) time_total = time.perf_counter() - tic_total # log results: print and save to file log_results( experiment_name=experiment_name, train_result=train_result, accelerator_memory_init=accelerator_memory_init, time_total=time_total, file_size=file_size, model_info=model_info, datasets_info={"metamath": metamath_info, "gsm8k": gsm8k_info}, start_date=start_date, train_config=train_config, peft_config=peft_config, print_fn=print_verbose, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose output") parser.add_argument("path_experiment", type=str, help="Path to the experiment directory") parser.add_argument( "--clean", action="store_true", help="Delete training artifacts after run finishes (logs are still saved)", ) args = parser.parse_args() experiment_name = validate_experiment_path(args.path_experiment) if args.verbose: def print_verbose(*args, **kwargs) -> None: kwargs["file"] = sys.stderr print(*args, **kwargs) else: def print_verbose(*args, **kwargs) -> None: pass main( path_experiment=args.path_experiment, experiment_name=experiment_name, clean=args.clean, )
peft/method_comparison/MetaMathQA/run.py/0
{ "file_path": "peft/method_comparison/MetaMathQA/run.py", "repo_id": "peft", "token_count": 8105 }
247
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json import os import warnings from dataclasses import asdict, dataclass, field from typing import Optional, Union from huggingface_hub import hf_hub_download from transformers.utils import PushToHubMixin, http_user_agent from .utils import CONFIG_NAME, PeftType, TaskType # we expect at least these keys to be present in a PEFT adapter_config.json MIN_EXPECTED_CONFIG_KEYS = {"peft_type"} def _check_and_remove_unused_kwargs(cls, kwargs): """Make PEFT configs forward-compatible by removing unused kwargs that were added in later PEFT versions. This assumes that removing the unused kwargs will not affect the default behavior. Returns the filtered kwargs and the set of removed keys. """ # it's not pretty but eh signature_parameters = inspect.signature(cls.__init__).parameters unexpected_kwargs = set(kwargs.keys()) - set(signature_parameters.keys()) for key in unexpected_kwargs: del kwargs[key] return kwargs, unexpected_kwargs @dataclass class PeftConfigMixin(PushToHubMixin): r""" This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a directory. The method `from_pretrained` will load the configuration of your adapter model from a directory. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. """ task_type: Optional[TaskType] = field(default=None, metadata={"help": "The type of task."}) peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."}) auto_mapping: Optional[dict] = field( default=None, metadata={"help": "An auto mapping dict to help retrieve the base model class if needed."} ) def __post_init__(self): # check for invalid task type if (self.task_type is not None) and (self.task_type not in list(TaskType)): raise ValueError( f"Invalid task type: '{self.task_type}'. Must be one of the following task types: {', '.join(TaskType)}." ) def to_dict(self) -> dict: r""" Returns the configuration for your adapter model as a dictionary. """ return asdict(self) def save_pretrained(self, save_directory: str, **kwargs) -> None: r""" This method saves the configuration of your adapter model in a directory. Args: save_directory (`str`): The directory where the configuration will be saved. kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) auto_mapping_dict = kwargs.pop("auto_mapping_dict", None) output_dict = self.to_dict() # converting set type to list for key, value in output_dict.items(): if isinstance(value, set): output_dict[key] = list(value) output_path = os.path.join(save_directory, CONFIG_NAME) # Add auto mapping details for custom models. if auto_mapping_dict is not None: output_dict["auto_mapping"] = auto_mapping_dict # save it with open(output_path, "w") as writer: writer.write(json.dumps(output_dict, indent=2, sort_keys=True)) @classmethod def from_peft_type(cls, **kwargs): r""" This method loads the configuration of your adapter model from a set of kwargs. The appropriate configuration type is determined by the `peft_type` argument. If `peft_type` is not provided, the calling class type is instantiated. Args: kwargs (configuration keyword arguments): Keyword arguments passed along to the configuration initialization. """ # Avoid circular dependency .. TODO: fix this with a larger refactor from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING # TODO: this hack is needed to fix the following issue (on commit 702f937): # if someone saves a default config and loads it back with `PeftConfig` class it yields to # not loading the correct config class. # # from peft import AdaLoraConfig, PeftConfig # peft_config = AdaLoraConfig() # print(peft_config) # >>> AdaLoraConfig(peft_type=<PeftType.ADALORA: 'ADALORA'>, auto_mapping=None, base_model_name_or_path=None, # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ... # # peft_config.save_pretrained("./test_config") # peft_config = PeftConfig.from_pretrained("./test_config") # print(peft_config) # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False) if "peft_type" in kwargs: peft_type = kwargs["peft_type"] config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type] else: config_cls = cls try: config = config_cls(**kwargs) except TypeError as exc: # Here we potentially handle forward compatibility. Sometimes new keywords are added to configs, which makes # new configs incompatible with older PEFT versions. We catch these and remove them to allow the program to # continue, but warn the user about it. # First check if the error is due to unexpected keyword arguments, we don't want to accidentally catch # other TypeErrors. if "got an unexpected keyword argument" not in str(exc): raise exc filtered_kwargs, unexpected_kwargs = _check_and_remove_unused_kwargs(config_cls, kwargs) if not MIN_EXPECTED_CONFIG_KEYS.issubset(set(filtered_kwargs.keys())): raise TypeError( f"The {cls.__name__} config that is trying to be loaded is missing required keys: " f"{MIN_EXPECTED_CONFIG_KEYS}." ) warnings.warn( f"Unexpected keyword arguments {sorted(unexpected_kwargs)} for class {config_cls.__name__}, these are " "ignored. This probably means that you're loading a configuration file that was saved using a " "higher version of the library and additional parameters have been introduced since. It is " "highly recommended to upgrade the PEFT version before continuing (e.g. by running `pip install " "-U peft`)." ) config = config_cls.from_peft_type(**filtered_kwargs) return config @classmethod def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs): r""" This method loads the configuration of your adapter model from a directory. Args: pretrained_model_name_or_path (`str`): The directory or the Hub repository id where the configuration is saved. kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the child class initialization. """ path = ( os.path.join(pretrained_model_name_or_path, subfolder) if subfolder is not None else pretrained_model_name_or_path ) hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs) if "user_agent" not in hf_hub_download_kwargs: hf_hub_download_kwargs["user_agent"] = http_user_agent() if os.path.isfile(os.path.join(path, CONFIG_NAME)): config_file = os.path.join(path, CONFIG_NAME) else: try: config_file = hf_hub_download( pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs ) except Exception as exc: raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'") from exc loaded_attributes = cls.from_json_file(config_file) kwargs = {**class_kwargs, **loaded_attributes} kwargs = cls.check_kwargs(**kwargs) return cls.from_peft_type(**kwargs) @classmethod def from_json_file(cls, path_json_file: str, **kwargs): r""" Loads a configuration file from a json file. Args: path_json_file (`str`): The path to the json file. """ with open(path_json_file) as file: json_object = json.load(file) # Sanity check that config does not contain a runtime_config if "runtime_config" in json_object: warnings.warn( "The configuration file contains a `runtime_config` key. This is ignored. Runtime configurations are only valid at runtime." ) del json_object["runtime_config"] return json_object @classmethod def _split_kwargs(cls, kwargs): hf_hub_download_kwargs = {} class_kwargs = {} other_kwargs = {} for key, value in kwargs.items(): if key in inspect.signature(hf_hub_download).parameters: hf_hub_download_kwargs[key] = value elif key in list(cls.__annotations__): class_kwargs[key] = value else: other_kwargs[key] = value return hf_hub_download_kwargs, class_kwargs, other_kwargs @classmethod def _get_peft_type( cls, model_id: str, **hf_hub_download_kwargs, ): subfolder = hf_hub_download_kwargs.get("subfolder", None) path = os.path.join(model_id, subfolder) if subfolder is not None else model_id if os.path.isfile(os.path.join(path, CONFIG_NAME)): config_file = os.path.join(path, CONFIG_NAME) else: try: config_file = hf_hub_download( model_id, CONFIG_NAME, **hf_hub_download_kwargs, ) except Exception: raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'") loaded_attributes = cls.from_json_file(config_file) return loaded_attributes["peft_type"] @classmethod def check_kwargs(cls, **kwargs): """Check kwargs before initializing the config instance. Subclasses can override this method to add specific checks. """ return kwargs @property def is_prompt_learning(self) -> bool: r""" Utility method to check if the configuration is for prompt learning. """ return False @property def is_adaption_prompt(self) -> bool: """Return True if this is an adaption prompt config.""" return False @dataclass class PeftConfig(PeftConfigMixin): """ This is the base configuration class to store the configuration of a [`PeftModel`]. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. """ base_model_name_or_path: Optional[str] = field( default=None, metadata={"help": "The name of the base model to use."} ) revision: Optional[str] = field(default=None, metadata={"help": "The specific base model version to use."}) peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"}) task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"}) inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) @dataclass class PromptLearningConfig(PeftConfig): """ This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or [`PromptTuning`]. Args: num_virtual_tokens (`int`): The number of virtual tokens to use. token_dim (`int`): The hidden embedding dimension of the base transformer model. num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model. num_attention_heads (`int`): The number of attention heads in the base transformer model. num_layers (`int`): The number of layers in the base transformer model. """ num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"}) token_dim: int = field( default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"} ) num_transformer_submodules: Optional[int] = field( default=None, metadata={"help": "Number of transformer submodules"} ) num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"}) num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"}) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": "List of extra modules to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved. " "The module(s) will be fully fine-tuned." }, ) @property def is_prompt_learning(self) -> bool: r""" Utility method to check if the configuration is for prompt learning. """ return True
peft/src/peft/config.py/0
{ "file_path": "peft/src/peft/config.py", "repo_id": "peft", "token_count": 5959 }
248
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from .layer import AdaLoraLayer class SVDQuantLinear(torch.nn.Module, AdaLoraLayer): def __init__( self, base_layer, adapter_name, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, **kwargs, ) -> None: super().__init__() AdaLoraLayer.__init__(self, base_layer) # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self._active_adapter = adapter_name self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def forward(self, x: torch.Tensor) -> torch.Tensor: result = self.quant_linear_module(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] lora_E = self.lora_E[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] ranknum = self.ranknum[active_adapter] + 1e-5 requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = self._cast_input_dtype(x, torch.float32) output = (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum # TODO: here, the dtype conversion is applied on the *whole expression*, # not the intermediate result, unlike for SVDLinear8bitLT and # SVDLinear4bit, is that correct? if requires_conversion: output = output.to(expected_dtype) result += output return result def __repr__(self) -> str: rep = super().__repr__() return "adalora." + rep
peft/src/peft/tuners/adalora/gptq.py/0
{ "file_path": "peft/src/peft/tuners/adalora/gptq.py", "repo_id": "peft", "token_count": 1154 }
249
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from dataclasses import dataclass, field from typing import Literal, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class BoneConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`BoneModel`]. Args: r (`int`): The rank of Bone across different layers. It is best to set 'r' to an even number; otherwise, the default initialization method will not work. target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. exclude_modules (`Optional[Union[List[str], str]]`): The names of the modules to not apply the adapter. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. init_weights (bool | Literal["bat"]): Different initializations correspond to different Bone variants. By default, setting True uses the Bone structure, while "bat" selects the Bat structure. layers_to_transform (`Union[List[int], int]`): The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices that are specified in this list. If a single integer is passed, it will apply the transformations on the layer at this index. layers_pattern (`str`): The layer pattern name, used only if `layers_to_transform` is different from `None`. modules_to_save (`List[str]`): List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. """ r: int = field( default=64, metadata={ "help": "The rank of Bone across different layers.", "note": "It is best to set 'r' to an even number; otherwise, the default initialization method will not work.", }, ) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with Bone.", "example": "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' ", }, ) exclude_modules: Optional[Union[list[str], str]] = field( default=None, metadata={"help": "List of module names or regex expression of the module names to exclude from Bone."}, ) init_weights: bool | Literal["bat"] = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the Bone layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." }, ) layers_pattern: Optional[str] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." }, ) bias: str = field(default="none", metadata={"help": "Bias type for Bone. Can be 'none', 'all' or 'Bone_only'"}) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": "List of modules apart from Bone layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.BONE self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.exclude_modules = ( set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules ) # if target_modules is a regex expression, then layers_to_transform should be None if isinstance(self.target_modules, str) and self.layers_to_transform is not None: raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") # if target_modules is a regex expression, then layers_pattern should be None if isinstance(self.target_modules, str) and self.layers_pattern is not None: raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.") warnings.warn( "Bone will be removed in v0.19.0 of PEFT, use `MissConfig` instead. " "If you already have a Bone checkpoint, you can use `/scripts/convert-bone-to-miss.py` to convert it into " )
peft/src/peft/tuners/bone/config.py/0
{ "file_path": "peft/src/peft/tuners/bone/config.py", "repo_id": "peft", "token_count": 2324 }
250
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class HRAConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`HRAModel`]. Args: r (`int`): The rank of HRA across different layers. It is best to set 'r' to an even number; otherwise, the default initialization method will not work. apply_GS (`bool`): Whether to apply Gram-Schmidt orthogonalization. target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. exclude_modules (`Optional[Union[List[str], str]]`): The names of the modules to not apply the adapter. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. init_weights (`bool`): Whether to perform initialization of HRA weights. layers_to_transform (`Union[List[int], int]`): The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices that are specified in this list. If a single integer is passed, it will apply the transformations on the layer at this index. layers_pattern (`Optional[Union[List[str], str]]`): The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`. modules_to_save (`List[str]`): List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. """ r: int = field( default=8, metadata={ "help": "The rank of HRA across different layers.", "note": "It is best to set 'r' to an even number; otherwise, the default initialization method will not work.", }, ) apply_GS: bool = field( default=False, metadata={"help": "Whether to apply Gram-Schmidt orthogonalization or not."}, ) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with HRA.", "example": "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' ", }, ) exclude_modules: Optional[Union[list[str], str]] = field( default=None, metadata={"help": "List of module names or regex expression of the module names to exclude from HRA."}, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the HRA layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern. " "This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`." }, ) bias: str = field(default="none", metadata={"help": "Bias type for HRA. Can be 'none', 'all' or 'hra_only'"}) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": "List of modules apart from HRA layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.HRA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.exclude_modules = ( set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules ) # if target_modules is a regex expression, then layers_to_transform should be None if isinstance(self.target_modules, str) and self.layers_to_transform is not None: raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") # if target_modules is a regex expression, then layers_pattern should be None if isinstance(self.target_modules, str) and self.layers_pattern is not None: raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.") # check for layers_to_transform and layers_pattern if self.layers_pattern and not self.layers_to_transform: raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
peft/src/peft/tuners/hra/config.py/0
{ "file_path": "peft/src/peft/tuners/hra/config.py", "repo_id": "peft", "token_count": 2474 }
251
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import math import warnings from contextlib import contextmanager from typing import Any, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from packaging import version from torch import svd_lowrank from transformers.pytorch_utils import Conv1D from peft.tuners._buffer_dict import BufferDict from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.integrations import ( dequantize_module_weight, gather_params_ctx, get_bnb_param_type, skip_init_on_device, ) from peft.utils.other import transpose from peft.utils.warning import PeftWarning from .config import LoraConfig class LoraVariant: """ Base class for LoRA variants, e.g. DoRA. This class should be subclassed and the methods below should be implemented accordingly. The methods should be implemented as static methods, this makes it easier to combine variants. Note for developers: These methods are prone to change and should thus considered to be "private". Use at your own discretion. """ @staticmethod def init(module: LoraLayer, adapter_name: str) -> None: """Initialization code for the LoRA variant, it's called within `update_layer`""" raise NotImplementedError @staticmethod def merge_safe(module: LoraLayer, active_adapter: str, orig_weight: torch.Tensor) -> torch.Tensor: """Safe merging of the weights from `merge(..., safe_merge=True)`, should return a new tensor""" raise NotImplementedError @staticmethod def merge_unsafe(module: LoraLayer, active_adapter: str, orig_weight: torch.Tensor) -> None: """Unsafe merging of the weights from `merge(..., safe_merge=False)`, should modify the weight in-place""" @staticmethod def unmerge(module: LoraLayer, active_adapter: str, orig_weight: torch.Tensor) -> torch.Tensor: """Remove the adapter weights from the original weights, then return them""" @staticmethod def forward(module: LoraLayer, active_adapter: str, x: torch.Tensor, result: torch.Tensor) -> torch.Tensor: """ The forward pass of the LoRA variant, should return the overall result (not just the diff) Args: module (LoraLayer): The module on which the forward pass is called active_adapter (str): The name of the active adapter x (torch.Tensor): The input to the forward call result (torch.Tensor): The result from the base model """ raise NotImplementedError class LoraLayer(BaseTunerLayer): # All names of layers that may contain (trainable) adapter weights adapter_layer_names: tuple[str, ...] = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B") # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str, ...] = ("r", "lora_alpha", "scaling", "lora_dropout") def __init__(self, base_layer: nn.Module, ephemeral_gpu_offload: bool = False, **kwargs) -> None: self.base_layer = base_layer self.r = {} self.lora_alpha = {} self.scaling = {} self.lora_dropout = nn.ModuleDict({}) self.lora_A = nn.ModuleDict({}) self.lora_B = nn.ModuleDict({}) # For Embedding layer self.lora_embedding_A = nn.ParameterDict({}) self.lora_embedding_B = nn.ParameterDict({}) # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] self.use_dora: dict[str, bool] = {} # not actively used anymore after #2443, keep it for BC self.lora_bias: dict[str, bool] = {} self.lora_magnitude_vector = torch.nn.ModuleDict() # for DoRA self._caches: dict[str, Any] = {} self.ephemeral_gpu_offload: bool = ephemeral_gpu_offload # flag to enable/disable casting of input to weight dtype during forward call self.cast_input_dtype_enabled: bool = True self.lora_variant: dict[str, LoraVariant] = {} self.kwargs = kwargs base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): torch_supports_dtensor = version.parse(torch.__version__) >= version.parse("2.5.0") if torch_supports_dtensor and isinstance(self.base_layer.weight, torch.distributed.tensor.DTensor): # If Tensor Parallel is used, the weight is sharded, so we need to get the local shape out_features, in_features = self.base_layer.weight.to_local().shape else: in_features, out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, nn.Conv1d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Conv2d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Conv3d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Embedding): in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim elif isinstance(base_layer, Conv1D): in_features, out_features = ( base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape ) elif isinstance(base_layer, nn.MultiheadAttention): if not base_layer._qkv_same_embed_dim: raise ValueError(f"Only same dim for query/key/value is supported as of now for {self.__class__}.") in_features, out_features = base_layer.embed_dim, 3 * base_layer.embed_dim elif hasattr(base_layer, "infeatures") and hasattr(base_layer, "outfeatures"): # QuantLinear in_features, out_features = base_layer.infeatures, base_layer.outfeatures elif hasattr(base_layer, "input_size") and hasattr(base_layer, "output_size"): # Megatron ColumnParallelLinear,RowParallelLinear in_features, out_features = base_layer.input_size, base_layer.output_size elif hasattr(base_layer, "codebooks") and base_layer.__class__.__name__ == "QuantizedLinear": # AQLM QuantLinear in_features, out_features = base_layer.in_features, base_layer.out_features elif hasattr(base_layer, "w_bit") and base_layer.__class__.__name__ == "WQLinear_GEMM": # Awq layers in_features, out_features = base_layer.in_features, base_layer.out_features elif base_layer.__class__.__name__ == "EetqLinear": # Eetq layers in_features, out_features = base_layer.in_features, base_layer.out_features elif hasattr(base_layer, "W_q") and base_layer.__class__.__name__ == "HQQLinear": # HQQ layers in_features, out_features = base_layer.in_features, base_layer.out_features elif base_layer.__class__.__name__ == "PatchedLinear": # INC layers in_features, out_features = base_layer.in_features, base_layer.out_features else: # possibly support user provided custom layer types using dynamic dispatch if hasattr(base_layer, "in_features") and hasattr(base_layer, "out_features"): in_features, out_features = base_layer.in_features, base_layer.out_features else: in_features, out_features = None, None warnings.warn( f"Unsupported layer type '{type(base_layer)}' encountered, proceed at your own risk.", UserWarning ) self.in_features = in_features self.out_features = out_features def resolve_lora_variant(self, *, use_dora: bool, **kwargs) -> Optional[LoraVariant]: """Return a matching LoRA variant for this layer type. Given the init arguments of this layer, return the correct LoRA variant, if any. E.g., if `use_dora=True`, this method should return the DoRA variant for the given layer. If there is no fitting variant, return None. Note: If this layer type does not support the LoRA variant at all, please raise an error during __init__ as is convention, and not here. """ return None def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora: bool = False, use_qalora: bool = False, lora_bias: bool = False, qalora_group_size: int = 32, **kwargs, ): # collect the kwargs kwargs = locals().copy() del kwargs["self"] # This code works for linear layers, override for other layer types if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") if lora_bias and (getattr(self.get_base_layer(), "bias", None) is None): warnings.warn( f"`lora_bias=True` was passed but the targeted layer of type {type(self.get_base_layer()).__name__} " "has no bias. This means that merging LoRA weights won't be possible.", PeftWarning, ) lora_variant = self.resolve_lora_variant( use_dora=use_dora, use_qalora=use_qalora, qalora_group_size=qalora_group_size ) if lora_variant is not None: self.lora_variant[adapter_name] = lora_variant self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer})) # Actual trainable parameters self.lora_A[adapter_name] = nn.Linear(self.in_features, r, bias=False) self.lora_B[adapter_name] = nn.Linear(r, self.out_features, bias=lora_bias) self.lora_bias[adapter_name] = lora_bias if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r self.use_dora[adapter_name] = use_dora # for inits that require access to the base weight, use gather_param_ctx so that the weight is gathered when using DeepSpeed if isinstance(init_lora_weights, str) and init_lora_weights.startswith("pissa"): with gather_params_ctx(self.get_base_layer().weight): self.pissa_init(adapter_name, init_lora_weights) elif isinstance(init_lora_weights, str) and init_lora_weights.startswith("corda"): with gather_params_ctx(self.get_base_layer().weight): self.corda_init(adapter_name, init_lora_weights) elif isinstance(init_lora_weights, str) and init_lora_weights.lower() == "olora": with gather_params_ctx(self.get_base_layer().weight): self.olora_init(adapter_name) elif init_lora_weights == "loftq": with gather_params_ctx(self.get_base_layer().weight): self.loftq_init(adapter_name) elif init_lora_weights == "eva": nn.init.zeros_(self.lora_B[adapter_name].weight) elif init_lora_weights == "orthogonal": with gather_params_ctx(self.get_base_layer().weight): self.orthogonal_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) # call this before init of the lora variants self._move_adapter_to_device_of_base_layer(adapter_name) if adapter_name in self.lora_variant: self.lora_variant[adapter_name].init(self, **kwargs) self.set_adapter(self.active_adapters) def reset_lora_parameters(self, adapter_name, init_lora_weights): if init_lora_weights is False: return if adapter_name in self.lora_A.keys(): if init_lora_weights is True: # initialize A the same way as the default for nn.Linear and B to zero # https://github.com/microsoft/LoRA/blob/a0a92e0f26c067cf94747bdbf1ce73793fa44d19/loralib/layers.py#L124 nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5)) elif init_lora_weights.lower() == "gaussian": nn.init.normal_(self.lora_A[adapter_name].weight, std=1 / self.r[adapter_name]) else: raise ValueError(f"Unknown initialization {init_lora_weights=}") nn.init.zeros_(self.lora_B[adapter_name].weight) if self.lora_bias[adapter_name]: nn.init.zeros_(self.lora_B[adapter_name].bias) if adapter_name in self.lora_embedding_A.keys(): # Initialize A to zeros and B the same way as the default for nn.Embedding, see: # https://github.com/microsoft/LoRA/blob/4c0333854cb905966f8cc4e9a74068c1e507c7b7/loralib/layers.py#L59-L60 nn.init.zeros_(self.lora_embedding_A[adapter_name]) nn.init.normal_(self.lora_embedding_B[adapter_name]) if self.lora_bias[adapter_name]: # embeddings are not supported at the moment, but still adding this for consistency nn.init.zeros_(self.lora_embedding_B[adapter_name].bias) def olora_init(self, adapter_name): base_layer = self.get_base_layer() orig_weight = base_layer.weight bnb_param_type = get_bnb_param_type(orig_weight) dtype = orig_weight.dtype if bnb_param_type: # check without importing bitsandbytes and robust to bnb_4bit_quant_storage=float* weight_tensor = dequantize_module_weight(base_layer) elif dtype in [torch.float32, torch.float16, torch.bfloat16]: weight_tensor = orig_weight else: raise TypeError(f"Unsupported data type for the base layer. Got {dtype}.") scale_factor = self.scaling[adapter_name] r = self.r[adapter_name] weight_tensor = weight_tensor.to(torch.float32) Q, R = torch.linalg.qr(weight_tensor.data) Qr, Rr = Q[:, :r], R[:r] self.lora_A[adapter_name].weight.data = Rr.contiguous() self.lora_B[adapter_name].weight.data = Qr.contiguous() weight_tensor.data -= scale_factor * self.lora_B[adapter_name].weight @ self.lora_A[adapter_name].weight if bnb_param_type == "4bit": weight_tensor = orig_weight.__class__( weight_tensor, quant_type=orig_weight.quant_type, quant_storage=orig_weight.quant_storage, compress_statistics=orig_weight.compress_statistics, module=orig_weight.module, ).to(orig_weight.device) base_layer.weight = weight_tensor elif bnb_param_type == "8bit": weight_tensor = orig_weight.__class__( weight_tensor, requires_grad=orig_weight.requires_grad, has_fp16_weights=orig_weight.has_fp16_weights, ).to(orig_weight.device) base_layer.weight = weight_tensor else: weight_tensor = weight_tensor.to(dtype) base_layer.weight.data = weight_tensor def pissa_init(self, adapter_name, init_lora_weights): weight = self.get_base_layer().weight dtype = weight.dtype if dtype not in [torch.float32, torch.float16, torch.bfloat16]: raise TypeError( "Please initialize PiSSA under float32, float16, or bfloat16. " "Subsequently, re-quantize the residual model to help minimize quantization errors." ) weight = transpose(weight.to(torch.float32), self.fan_in_fan_out) if init_lora_weights == "pissa": # USV^T = W <-> VSU^T = W^T, where W^T = weight.data in R^{out_channel, in_channel}, V, S, Uh = torch.linalg.svd(weight.data, full_matrices=False) Vr = V[:, : self.r[adapter_name]] Sr = S[: self.r[adapter_name]] Sr /= self.scaling[adapter_name] Uhr = Uh[: self.r[adapter_name]] elif len(init_lora_weights.split("_niter_")) == 2: Vr, Sr, Ur = svd_lowrank( weight.data, self.r[adapter_name], niter=int(init_lora_weights.split("_niter_")[-1]) ) Sr /= self.scaling[adapter_name] Uhr = Ur.t() else: raise ValueError( f"init_lora_weights should be 'pissa' or 'pissa_niter_[number of iters]', got {init_lora_weights} instead." ) lora_A = torch.diag(torch.sqrt(Sr)) @ Uhr lora_B = Vr @ torch.diag(torch.sqrt(Sr)) self.lora_A[adapter_name].weight.data = lora_A self.lora_B[adapter_name].weight.data = lora_B weight = weight.data - self.scaling[adapter_name] * lora_B @ lora_A weight = transpose(weight.to(dtype), self.fan_in_fan_out) self.get_base_layer().weight.data = weight def corda_init(self, adapter_name, init_lora_weights): linear = self.get_base_layer() weight = linear.weight dtype = weight.dtype if dtype not in [torch.float32, torch.float16, torch.bfloat16]: raise TypeError( "Please initialize CorDA under float32, float16, or bfloat16. " "Subsequently, re-quantize the residual model to help minimize quantization errors." ) weight = weight.to(torch.float32) out_dim = weight.data.size(0) in_dim = weight.data.size(1) # Calculate WC from covariance matrix if not hasattr(linear, "eigens"): raise ValueError( "`eigens` attribute not found for layer, please run `preprocess_corda` first. " "More information can be found at examples/corda_finetuning/README.md." ) eigens = linear.eigens U = eigens.U_WC S = eigens.S_WC V = eigens.V_WC r = self.r[adapter_name] # nan or inf check if torch.isnan(S).any() or torch.isinf(S).any(): raise ValueError( "Invalid value found in matrix S. Please file an issue at https://github.com/huggingface/peft/issues." ) if torch.isnan(U).any() or torch.isinf(U).any(): raise ValueError( "Invalid value found in matrix U. Please file an issue at https://github.com/huggingface/peft/issues." ) if torch.isnan(V).any() or torch.isinf(V).any(): raise ValueError( "Invalid value found in matrix V. Please file an issue at https://github.com/huggingface/peft/issues." ) # Sanity check if U.size(0) != out_dim or U.size(1) != r: raise ValueError( f"Matrix U size mismatch: {U.size()} vs. ({out_dim}, {r}). Please make sure the `lora_config` and " "`model` argument of `preprocess_corda` is consistent with `get_peft_model`. If you're using cache " "in `preprocess_corda`, please make sure the cache is built with the same model and LoRA rank." ) if S.size(0) != r: raise ValueError( f"Matrix S size mismatch: {S.size()} vs. ({r},). Please make sure the `lora_config` and `model` argument " "of `preprocess_corda` is consistent with `get_peft_model`. If you're using cache in `preprocess_corda`, " "please make sure the cache is built with the same model and LoRA rank." ) if V.size(0) != in_dim or V.size(1) != r: raise ValueError( f"Matrix V size mismatch: {V.size()} vs. ({in_dim}, {r}). Please make sure the `lora_config` and " "`model` argument of `preprocess_corda` is consistent with `get_peft_model`. If you're using cache " "in `preprocess_corda`, please make sure the cache is built with the same model and LoRA rank." ) # Apply alpha S /= self.scaling[adapter_name] # Init lora_A and lora_B weights lora_A = V.t().mul(S.sqrt().view(-1, 1)).contiguous() lora_B = U.mul(S.sqrt()).contiguous() self.lora_A[adapter_name].weight.data = lora_A self.lora_B[adapter_name].weight.data = lora_B weight = weight.data - self.scaling[adapter_name] * lora_B @ lora_A weight = weight.to(dtype) self.get_base_layer().weight.data = weight # Remove redundant fields del linear.eigens def loftq_init(self, adapter_name): from peft.utils.loftq_utils import loftq_init weight = self.get_base_layer().weight kwargs = { "num_bits": self.kwargs.get("loftq_bits", 4), "reduced_rank": self.r[adapter_name], "num_iter": self.kwargs.get("loftq_iter", 1), } qweight, lora_A, lora_B = loftq_init(weight, **kwargs) if adapter_name in self.lora_A.keys(): # initialize A the same way as the default for nn.Linear and B to zero self.lora_A[adapter_name].weight.data = lora_A self.lora_B[adapter_name].weight.data = lora_B if adapter_name in self.lora_embedding_A.keys(): # initialize a the same way as the default for nn.linear and b to zero self.lora_embedding_A[adapter_name].weight.data = lora_A self.lora_embedding_B[adapter_name].weight.data = lora_B self.get_base_layer().weight.data = qweight @torch.no_grad() def orthogonal_init(self, adapter_name): # https://datta0.github.io/posts/rethink-lora-init/#orthogonal-initialisation rank = self.r[adapter_name] if rank % 2 != 0: raise ValueError(f"Orthogonal initialization requires the LoRA rank to be even, got {rank} instead.") X = torch.randn(rank, rank) Q, _ = torch.linalg.qr(X) q_odd = Q[0::2, :] # Odd rows q_even = Q[1::2, :] # Even rows dtype = self.get_base_layer().weight.dtype lora_A = torch.randn(self.in_features, rank // 2).mm(q_odd).T / 10.0 lora_B = torch.randn(rank // 2, self.out_features).T.mm(q_even) / 10.0 self.lora_A[adapter_name].weight = nn.Parameter(lora_A.contiguous().to(dtype)) self.lora_B[adapter_name].weight = nn.Parameter(lora_B.contiguous().to(dtype)) def _cache_store(self, key: str, value: Any) -> None: self._caches[key] = value def _cache_pop(self, key: str) -> Any: value = self._caches.pop(key) return value def set_scale(self, adapter: str, scale: float | int) -> None: """Set the scale of the given adapter to the initial scale multiplied by the provided factor The initial scale is determined by the configured `r` (rank) and `lora_alpha`. """ if adapter not in self.scaling: # Ignore the case where the adapter is not in the layer return self.scaling[adapter] = scale * self.lora_alpha[adapter] / self.r[adapter] def scale_layer(self, scale: float | int) -> None: """Multiply the current scale of all active adapters by the provided factor""" if scale == 1: return for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue self.scaling[active_adapter] *= scale def unscale_layer(self, scale: Optional[float | int] = None) -> None: """Divide the current scale of all active adapters by the provided factor. If `scale=None` is passed, reset to initial scale The initial scale is determined by the configured `r` (rank) and `lora_alpha`. """ for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue if scale is None: self.scaling[active_adapter] = self.lora_alpha[active_adapter] / self.r[active_adapter] else: self.scaling[active_adapter] /= scale def _check_forward_args(self, x, *args, **kwargs): """Check if the arguments are compatible with the configs and state of the model""" adapter_names = kwargs.get("adapter_names", None) if adapter_names is None: return if len(x) != len(adapter_names): msg = ( "Length of `adapter_names` should be the same as the number of inputs, but got " f"{len(adapter_names)} and {len(x)} respectively." ) raise ValueError(msg) if self.merged: # It is unclear what would be the right thing to do if users pass adapter_names and there are merged # adapters. Therefore, it is better to raise an error in this case. msg = "Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first." raise ValueError(msg) # DoRA is not supported (yet), check that it's not being used. Don't check "__base__", as this is the # placeholder for the base model. unique_adapters = {name for name in adapter_names if name != "__base__"} for adapter_name in unique_adapters: if self.use_dora.get(adapter_name, False): msg = "Cannot pass `adapter_names` when DoRA is enabled." raise ValueError(msg) def _mixed_batch_forward( self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any ) -> torch.Tensor: # This is a special method that handles the case when users pass the argument `adapter_names`. This is an # extra argument that allows mixing different adapters in the same batch at inference time. result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype unique_adapters = set(adapter_names) sub_batch_indices_list = [] for adapter in unique_adapters: sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) for i, active_adapter in enumerate(unique_adapters): if active_adapter == "__base__": continue if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear # layer output sub_batch = x[sub_batch_indices_list[i]].to(lora_A.weight.dtype) lora_output = lora_B(lora_A(dropout(sub_batch))) * scaling result[sub_batch_indices_list[i]] += lora_output.to(torch_result_dtype) return result # Below code is based on https://github.com/microsoft/LoRA/blob/main/loralib/layers.py # and modified to work with PyTorch FSDP # ------------------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ class Linear(nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_target_conv_1d_layer: bool = False, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, lora_bias: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer, **kwargs) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, lora_bias=lora_bias, ) self.is_target_conv_1d_layer = is_target_conv_1d_layer def resolve_lora_variant(self, *, use_dora: bool, **kwargs) -> Optional[LoraVariant]: if not use_dora: return None from .variants import DoraLinearVariant return DoraLinearVariant() def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weight = base_layer.weight.data.clone() orig_dtype = orig_weight.dtype if active_adapter not in self.lora_variant: # vanilla LoRA delta_weight = self.get_delta_weight(active_adapter) orig_weight += delta_weight.to(orig_dtype) else: orig_weight = self.lora_variant[active_adapter].merge_safe(self, active_adapter, orig_weight) if not torch.isfinite(orig_weight).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weight if self.lora_bias[active_adapter]: if getattr(base_layer, "bias", None) is None: raise RuntimeError( "Impossible to merge LoRA with `lora_bias=True` because the base layer has no bias." ) new_bias = base_layer.bias + self.lora_B[active_adapter].bias * self.scaling[active_adapter] if not torch.isfinite(new_bias).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.bias.data = new_bias.to(orig_dtype) else: if active_adapter not in self.lora_variant: # vanilla LoRA delta_weight = self.get_delta_weight(active_adapter) base_layer.weight.data += delta_weight else: self.lora_variant[active_adapter].merge_unsafe(self, active_adapter, base_layer.weight) if self.lora_bias[active_adapter]: if getattr(base_layer, "bias", None) is None: raise RuntimeError( "Impossible to merge LoRA with `lora_bias=True` because the base layer has no bias." ) base_layer.bias.data += self.lora_B[active_adapter].bias * self.scaling[active_adapter] self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): weight = self.get_base_layer().weight if active_adapter not in self.lora_variant: # vanilla LoRA orig_dtype = weight.dtype delta_weight = self.get_delta_weight(active_adapter) weight.data -= delta_weight.to(orig_dtype) else: unmerged = self.lora_variant[active_adapter].unmerge(self, active_adapter, weight) weight.data = unmerged if self.lora_bias[active_adapter]: self.get_base_layer().bias.data -= self.lora_B[active_adapter].bias * self.scaling[active_adapter] def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_B[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype lora_A_keys = self.lora_A.keys() for active_adapter in self.active_adapters: if active_adapter not in lora_A_keys: continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = self._cast_input_dtype(x, lora_A.weight.dtype) if active_adapter not in self.lora_variant: # vanilla LoRA result = result + lora_B(lora_A(dropout(x))) * scaling else: result = self.lora_variant[active_adapter].forward( self, active_adapter=active_adapter, x=x, result=result, ) result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class Embedding(nn.Module, LoraLayer): # LoRA implemented in a Embedding layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, lora_bias: bool = False, **kwargs, ) -> None: if lora_bias: # lora_bias=True is not supported (yet) for embedding layers, as they use nn.Parameter raise ValueError(f"lora_bias={lora_bias} is not supported for {self.__class__.__name__}.") super().__init__() LoraLayer.__init__(self, base_layer) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, lora_bias=lora_bias, ) def resolve_lora_variant(self, *, use_dora: bool, **kwargs) -> Optional[LoraVariant]: if not use_dora: return None from .variants import DoraEmbeddingVariant return DoraEmbeddingVariant() def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora, lora_bias ): # collect the kwargs kwargs = locals().copy() del kwargs["self"] if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") lora_variant = self.resolve_lora_variant(use_dora=use_dora) if lora_variant is not None: self.lora_variant[adapter_name] = lora_variant self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters weight_A = torch.randn((r, self.in_features)) weight_B = torch.randn((self.out_features, r)) self.lora_embedding_A[adapter_name] = nn.Parameter(weight_A) self.lora_embedding_B[adapter_name] = nn.Parameter(weight_B) self.lora_bias[adapter_name] = lora_bias if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r self.use_dora[adapter_name] = use_dora if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) # call this before init of the lora variants self._move_adapter_to_device_of_base_layer(adapter_name) if adapter_name in self.lora_variant: self.lora_variant[adapter_name].init(self, **kwargs) self.set_adapter(self.active_adapters) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_embedding_A.keys(): base_layer = self.get_base_layer() orig_dtype = base_layer.weight.dtype if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weight = base_layer.weight.data.clone() if active_adapter not in self.lora_variant: # vanilla LoRA orig_weight += self.get_delta_weight(active_adapter).to(orig_dtype) else: orig_weight = self.lora_variant[active_adapter].merge_safe(self, active_adapter, orig_weight) if not torch.isfinite(orig_weight).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weight else: if active_adapter not in self.lora_variant: # vanilla LoRA base_layer.weight.data += self.get_delta_weight(active_adapter).to(orig_dtype) else: self.lora_variant[active_adapter].merge_unsafe(self, active_adapter, base_layer.weight) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() orig_dtype = self.get_base_layer().weight.dtype if active_adapter in self.lora_embedding_A.keys(): weight = self.get_base_layer().weight if active_adapter not in self.lora_variant: # vanilla LoRA weight.data -= self.get_delta_weight(active_adapter).to(orig_dtype) else: unmerged = self.lora_variant[active_adapter].unmerge(self, active_adapter, weight) weight.data = unmerged def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_embedding_B[adapter].device dtype = self.lora_embedding_A[adapter].dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) weight_A = self.lora_embedding_A[adapter] weight_B = self.lora_embedding_B[adapter] if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, True) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_embedding_A[adapter] = weight_A.to(dtype) self.lora_embedding_B[adapter] = weight_B.to(dtype) return output_tensor def _mixed_batch_forward( self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any ) -> torch.Tensor: # This is a special method that handles the case when users pass the argument `adapter_names`. This is an # extra argument that allows mixing different adapters in the same batch at inference time. result = self.base_layer(x, *args, **kwargs) unique_adapters = set(adapter_names) sub_batch_indices_list = [] for adapter in unique_adapters: sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) for i, active_adapter in enumerate(unique_adapters): if active_adapter == "__base__": continue if active_adapter not in self.lora_embedding_A.keys(): continue embedding_A = self.lora_embedding_A[active_adapter].T embedding_B = self.lora_embedding_B[active_adapter].T scaling = self.scaling[active_adapter] # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear # layer output sub_batch = x[sub_batch_indices_list[i]] after_A = self._embed(sub_batch, embedding_A) result[sub_batch_indices_list[i]] += (after_A @ embedding_B) * scaling return result def _embed(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: base_layer = self.get_base_layer() return F.embedding( input, weight, padding_idx=base_layer.padding_idx, max_norm=base_layer.max_norm, norm_type=base_layer.norm_type, scale_grad_by_freq=base_layer.scale_grad_by_freq, sparse=base_layer.sparse, ) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # TODO: no dtype conversion here, unlike in Linear, is that correct? self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_embedding_A: continue if active_adapter not in self.lora_variant: # vanilla LoRA embedding_A = self.lora_embedding_A[active_adapter].T embedding_B = self.lora_embedding_B[active_adapter].T scaling = self.scaling[active_adapter] after_A = self._embed(x, embedding_A) result = result + (after_A @ embedding_B) * scaling else: result = self.lora_variant[active_adapter].forward( self, active_adapter=active_adapter, x=x, result=result, ) result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class _ConvNd(nn.Module, LoraLayer): # Lora implemented in a conv(2,3)d layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, lora_bias: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) if base_layer.groups > 1: warnings.warn("LoRA adapter added to ConvNd layer with groups > 1. Merging is not supported.") if r % base_layer.groups != 0: raise ValueError( f"Targeting a {base_layer.__class__.__name__} with groups={base_layer.groups} and rank {r}. " "Currently, support is limited to conv layers where the rank is divisible by groups. " "Either choose a different rank or do not target this specific layer." ) self._active_adapter = adapter_name self._kernel_dim = base_layer.weight.dim() self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, lora_bias=lora_bias, ) def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora, lora_bias ): # collect the kwargs kwargs = locals().copy() del kwargs["self"] if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") if lora_bias and (getattr(self.get_base_layer(), "bias", None) is None): warnings.warn( f"`lora_bias=True` was passed but the targeted layer of type {type(self.get_base_layer()).__name__} " "has no bias. This means that merging LoRA weights won't be possible.", PeftWarning, ) lora_variant = self.resolve_lora_variant(use_dora=use_dora) if lora_variant is not None: self.lora_variant[adapter_name] = lora_variant self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters base_layer = self.get_base_layer() kernel_size = base_layer.kernel_size stride = base_layer.stride padding = base_layer.padding conv_layer = type(base_layer) out_kernel = out_stride = (1,) * (self._kernel_dim - 2) self.lora_A[adapter_name] = conv_layer(self.in_features, r, kernel_size, stride, padding, bias=False) self.lora_B[adapter_name] = conv_layer( r, self.out_features, out_kernel, out_stride, groups=base_layer.groups, bias=lora_bias ) self.lora_bias[adapter_name] = lora_bias if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r self.use_dora[adapter_name] = use_dora if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) # call this before init of the lora variants self._move_adapter_to_device_of_base_layer(adapter_name) if adapter_name in self.lora_variant: self.lora_variant[adapter_name].init(self, **kwargs) self.set_adapter(self.active_adapters) def _get_dora_factor_view(self): return (-1,) + (1,) * (self._kernel_dim - 1) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights inside the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() orig_dtype = base_layer.weight.dtype if base_layer.groups > 1: # https://github.com/huggingface/peft/pull/2403 raise NotImplementedError("Merging is not supported for _ConvNd layers with groups > 1!") if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weight = base_layer.weight.data.clone() if active_adapter not in self.lora_variant: # vanilla LoRA delta_weight = self.get_delta_weight(active_adapter) orig_weight += delta_weight.to(orig_dtype) else: orig_weight = self.lora_variant[active_adapter].merge_safe(self, active_adapter, orig_weight) if not torch.isfinite(orig_weight).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weight if self.lora_bias[active_adapter]: if getattr(base_layer, "bias", None) is None: raise RuntimeError( "Impossible to merge LoRA with `lora_bias=True` because the base layer has no bias." ) new_bias = base_layer.bias + self.lora_B[active_adapter].bias * self.scaling[active_adapter] if not torch.isfinite(new_bias).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.bias.data = new_bias.to(orig_dtype) else: if active_adapter not in self.lora_variant: # vanilla LoRA delta_weight = self.get_delta_weight(active_adapter) base_layer.weight.data += delta_weight.to(orig_dtype) else: self.lora_variant[active_adapter].merge_unsafe(self, active_adapter, base_layer.weight) if self.lora_bias[active_adapter]: if getattr(base_layer, "bias", None) is None: raise RuntimeError( "Impossible to merge LoRA with `lora_bias=True` because the base layer has no bias." ) base_layer.bias.data += self.lora_B[active_adapter].bias * self.scaling[active_adapter] self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): weight = self.get_base_layer().weight if active_adapter not in self.lora_variant: # vanilla LoRA orig_dtype = weight.dtype delta_weight = self.get_delta_weight(active_adapter) weight.data -= delta_weight.to(orig_dtype) else: unmerged = self.lora_variant[active_adapter].unmerge(self, active_adapter, weight) weight.data = unmerged if self.lora_bias[active_adapter]: self.get_base_layer().bias.data -= self.lora_B[active_adapter].bias * self.scaling[active_adapter] def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_A[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() # https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117 if self.get_base_layer().weight.size()[2:4] == (1, 1): # conv2d 1x1 output_tensor = (weight_B.squeeze(3).squeeze(2) @ weight_A.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze( 3 ) * self.scaling[adapter] else: output_tensor = self.conv_fn(weight_A.transpose(0, 1), weight_B) if self.get_base_layer().groups > 1: output_tensor = output_tensor * self.scaling[adapter] else: output_tensor = output_tensor.transpose(0, 1) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = self._cast_input_dtype(x, lora_A.weight.dtype) if active_adapter not in self.lora_variant: # vanilla LoRA result = result + lora_B(lora_A(dropout(x))) * scaling else: result = self.lora_variant[active_adapter].forward( self, active_adapter=active_adapter, x=x, result=result, ) result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class Conv2d(_ConvNd): # Lora implemented in a conv2d layer def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self._kernel_dim == 4: raise ValueError(f"Conv2d layer kernel must have 4 dimensions, not {self._kernel_dim}") self.conv_fn = F.conv2d def resolve_lora_variant(self, *, use_dora: bool, **kwargs) -> Optional[LoraVariant]: if not use_dora: return None from .variants import DoraConv2dVariant return DoraConv2dVariant() class Conv1d(_ConvNd): # Lora implemented in a conv1d layer def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self._kernel_dim == 3: raise ValueError(f"Conv1d layer kernel must have 3 dimensions, not {self._kernel_dim}") self.conv_fn = F.conv1d def resolve_lora_variant(self, *, use_dora: bool, **kwargs) -> Optional[LoraVariant]: if not use_dora: return None from .variants import DoraConv1dVariant return DoraConv1dVariant() class Conv3d(_ConvNd): # Lora implemented in a conv3d layer def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self._kernel_dim == 5: raise ValueError(f"Conv3d layer kernel must have 5 dimensions, not {self._kernel_dim}") self.conv_fn = F.conv3d def resolve_lora_variant(self, *, use_dora: bool, **kwargs) -> Optional[LoraVariant]: if not use_dora: return None from .variants import DoraConv3dVariant return DoraConv3dVariant() class MultiheadAttention(nn.Module, LoraLayer): """LoRA implemented in a multihead attention layer This is currently only implemented for the case of `_qkv_same_embed_dim = True`, i.e. query, key, and value having the same dimension. Note: LoRA is applied to both the in_proj (query/key/value) and out_proj. There is currently no way to specify only one of them. Don't try to apply LoRA to the out_proj of MultiheadAttention by targeting that layer specifically, since the forward method of that layer is not being used, hence the LoRA adapter would be ignored. This is a little bit hacky because of the way that MultiheadAttention is implemented in PyTorch: There are no `nn.Linear` layers which we can hook onto or, in case of output projection, `.forward` is not used. This implementation works around these problems by merging the weights before the forward call and unmerging them after the forward call. """ def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: # TODO work with separate weights if not getattr(base_layer, "_qkv_same_embed_dim", True): # default for this value appears to be True: # https://github.com/pytorch/pytorch/blob/701ba5203fe68d55d655bd4d6c008be94cf34ea5/torch/nn/modules/activation.py#L1128-L1130 raise ValueError( f"Only same embed for query/key/value is supported as of now for {self.__class__.__name__}." ) if use_dora: # TODO: probably not so hard to implement raise ValueError(f"{self.__class__.__name__} does not support DoRA (yet), please set use_dora to False") super().__init__() LoraLayer.__init__(self, base_layer, **kwargs) # Note: LoRA is applied to both in_proj and out_proj. There is currently no way to only specify one of them. if isinstance(base_layer.out_proj, nn.Linear): self.base_layer.out_proj = Linear( base_layer.out_proj, adapter_name, r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, **kwargs, ) else: raise ValueError(f"out_proj must be an instance of nn.Linear for {self.__class__.__name__}.") self._active_adapter = adapter_name self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora) @property def embed_dim(self) -> int: return self.get_base_layer().embed_dim @property def kdim(self) -> Optional[int]: return self.get_base_layer().kdim @property def vdim(self) -> Optional[int]: return self.get_base_layer().vdim @property def _qkv_same_embed_dim(self) -> bool: return self.get_base_layer()._qkv_same_embed_dim @property def num_heads(self) -> int: return self.get_base_layer().num_heads @property def dropout(self) -> float: return self.get_base_layer().dropout @property def batch_first(self) -> bool: return self.get_base_layer().batch_first @property def head_dim(self) -> int: return self.get_base_layer().head_dim @property def in_proj_weight(self) -> nn.Parameter: return self.get_base_layer().in_proj_weight @property def in_proj_bias(self) -> nn.Parameter: return self.get_base_layer().in_proj_bias @property def out_proj(self) -> nn.Module: return self.get_base_layer().out_proj.get_base_layer() @property def bias_k(self) -> Optional[nn.Parameter]: return self.get_base_layer().bias_k @property def bias_v(self) -> Optional[nn.Parameter]: return self.get_base_layer().bias_v def merge_masks(self, *args, **kwargs) -> tuple[Optional[torch.Tensor], Optional[int]]: return self.get_base_layer().merge_masks(*args, **kwargs) @property def add_zero_attn(self) -> bool: return self.get_base_layer().add_zero_attn def update_layer(self, *args, **kwargs) -> None: super().update_layer(*args, **kwargs) # Note: LoRA is applied to both in_proj and out_proj. There is currently no way to only specify one of them. self.base_layer.out_proj.update_layer(*args, **kwargs) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return # Implementation follows this: # https://github.com/Baijiong-Lin/LoRA-Torch/blob/4bfed6820b64fcf47064c30f30606a190a4f0d2e/loratorch/layers.py#L73-L79 # Notably, instead of mutating the weight, we delete the original weight and replace it by the merged weight # TODO: work with separate weights for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() orig_dtype = base_layer.out_proj.weight.dtype if safe_merge: # TODO: work with separate weights # merging in_proj (nn.Parameter) orig_weight_in = base_layer.in_proj_weight.data.detach().clone() orig_weight_in += self.get_delta_weight(active_adapter).to(orig_dtype) if not torch.isfinite(orig_weight_in).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) # merging out_proj (subclass of nn.Linear) orig_weight_out = base_layer.out_proj.weight.data.detach().clone() orig_weight_out += base_layer.out_proj.get_delta_weight(active_adapter).to(orig_dtype) if not torch.isfinite(orig_weight_out).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) # unregister parameter implicitly and overwrite using merged weights; gradients are computed after # forward and, thus, after unmerging (see forward()), therefore this is safe to do. del base_layer.in_proj_weight base_layer.in_proj_weight = orig_weight_in del base_layer.out_proj.get_base_layer().weight base_layer.out_proj.get_base_layer().weight = orig_weight_out base_layer.out_proj.merge(adapter_names=[active_adapter]) else: # merging in_proj (nn.Parameter) # TODO: work with separate weights delta_weight = self.get_delta_weight(active_adapter).to(orig_dtype) weight_merged = base_layer.in_proj_weight.data.detach() + delta_weight # unregister parameter implicitly and overwrite using merged weights; gradients are computed after # forward and, thus, after unmerging (see forward()), therefore this is safe to do. del base_layer.in_proj_weight base_layer.in_proj_weight = weight_merged # merging out_proj (subclass of nn.Linear) delta_weight = base_layer.out_proj.get_delta_weight(active_adapter).to(orig_dtype) weight_merged = base_layer.out_proj.weight.data.detach() + delta_weight del base_layer.out_proj.get_base_layer().weight base_layer.out_proj.get_base_layer().weight = weight_merged base_layer.out_proj.merge(adapter_names=[active_adapter]) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return # TODO work with separate weights base_layer = self.get_base_layer() orig_dtype = base_layer.out_proj.base_layer.weight.dtype while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): # Ensure that requires_grad=False for the base weights after unmerging. This may not matter since # requires_grad was False when the optimizer was initialized, but still let's try to be correct here. # in_proj delta_weight = self.get_delta_weight(active_adapter).to(orig_dtype) old_weight = base_layer.in_proj_weight.data - delta_weight del base_layer.in_proj_weight base_layer.register_parameter("in_proj_weight", nn.Parameter(old_weight, requires_grad=False)) # out_proj delta_weight = base_layer.out_proj.get_delta_weight(active_adapter).to(orig_dtype) old_weight = base_layer.out_proj.base_layer.weight.data - delta_weight del base_layer.out_proj.base_layer.weight base_layer.out_proj.base_layer.register_parameter( "weight", nn.Parameter(old_weight, requires_grad=False) ) self.get_base_layer().out_proj.unmerge() def unload_and_optionally_merge_module( self, merge: bool, safe_merge: bool, adapter_names: Optional[list[str]] ) -> nn.MultiheadAttention: """ Merging and unloading of the MultiheadAttention module This requires an extra step for MultiheadAttention, which is why there is this special method instead of relying on the normal merge_and_unload code path. """ if merge: self.merge(safe_merge=safe_merge, adapter_names=adapter_names) base_layer = self.get_base_layer() # extra steps: re-register weights, take care of out_proj layer # in_proj weight = base_layer.in_proj_weight del base_layer.in_proj_weight base_layer.register_parameter("in_proj_weight", nn.Parameter(weight.data, requires_grad=weight.requires_grad)) # out_proj out_proj_layer = base_layer.out_proj.get_base_layer() weight = out_proj_layer.weight del out_proj_layer.weight out_proj_layer.register_parameter("weight", nn.Parameter(weight.data, requires_grad=weight.requires_grad)) base_layer.out_proj = out_proj_layer return base_layer def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_B[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = (weight_B @ weight_A) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def _check_forward_args(self, x, *args, **kwargs): if "adapter_names" in kwargs: raise TypeError(f"lora.{self.__class__.__name__} does not support mixed adapter batches.") super()._check_forward_args(x, *args, **kwargs) def forward(self, query: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: previous_dtype = query.dtype self._check_forward_args(query, *args, **kwargs) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(query, *args, **kwargs) elif self.merged: result = self.base_layer(query, *args, **kwargs) else: out_proj = self.get_base_layer().out_proj if out_proj.active_adapters != self.active_adapters: # We have a case that in_proj and out_proj have diverging merged adapters. We cannot # really deal with this correctly, thus it's better to raise than possibly create a hard to debug mess cls_name = self.get_base_layer().__class__.__name__ raise ValueError( f"The out_proj layer of {cls_name} has merged layers but {cls_name} itself doesn't; please ensure " "that either both or none have merged layers" ) # Merge all adapters that are active for this module, i.e. the LoRA weights for in_proj and out_proj. # in_proj uses nn.Parameters, therefore, there is no forward method to be used and we have to explicitly # merge for the LoRA weights to have an effect: # https://github.com/pytorch/pytorch/blob/6ebb26d572d5fcdc6ac0d1297bdf8d1eb5d20722/torch/nn/modules/activation.py#L1020 # For out_proj, we have an nn.Linear (or rather: NonDynamicallyQuantizableLinear), but its forward method # is not used: # https://github.com/pytorch/pytorch/blob/6ebb26d572d5fcdc6ac0d1297bdf8d1eb5d20722/torch/nn/modules/activation.py#L1267-L1271 # Therefore, its LoRA weights also need to be merged to have an effect. active_adapters = [a for a in self.active_adapters if a in self.lora_A] try: self.merge(adapter_names=active_adapters) result = self.base_layer(query, *args, **kwargs) finally: # it's safe to call unmerge(), which unmerges all adapters, because we checked that not self.merged, # i.e. there is was no merged layer before self.unmerge() result = (result[0].to(previous_dtype), result[1].to(previous_dtype) if result[1] is not None else result[1]) return result # The decorator is needed in case low_cpu_mem_usage=True is used, as we don't want the base layer weights to be # moved to meta device. This requires the use of PEFT's implementation of init_empty_weight instead of using the one # from accelerate. @skip_init_on_device def _restore_weights(self): # Restore the weights as registered parameters on the base layer. # This is necessary because the way that weights are merged/unmerged (which is necessary for forward to work # correctly), the Module "forgets" these attributes. Therefore, we need to call register_parameter explicitly. # We cannot call register_parameter for merging/unmerging because that cuts them off from the autograd graph. # Note that this is hacky, since we need to ensure that _restore_weights is called by each method that needs it. # in_proj # TODO work with separate weights base_layer = self.get_base_layer() weight = base_layer.in_proj_weight del base_layer.in_proj_weight base_layer.register_parameter("in_proj_weight", nn.Parameter(weight.data, requires_grad=weight.requires_grad)) # out_proj base_layer = base_layer.out_proj.get_base_layer() weight = base_layer.weight del base_layer.weight base_layer.register_parameter("weight", nn.Parameter(weight.data, requires_grad=weight.requires_grad)) def state_dict(self, *args, **kwargs): self._restore_weights() return super().state_dict(*args, **kwargs) def named_modules(self, *args, **kwargs): # Note: no need to also implement modules(), as modules() calls named_modules() under the hood self._restore_weights() return super().named_modules(*args, **kwargs) def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class _LoraParameterProxy(nn.Module): """This proxies an `nn.Parameter` that is targeted with LoRA. Intended to be used in conjunction with `nn.utils.parametrize`, see `ParamWrapper`. """ def __init__(self, delta_weight): super().__init__() self.delta_weight = delta_weight def forward(self, W): with nn.utils.parametrize.cached(): return W + self.delta_weight # copied from: # https://github.com/pytorch/pytorch/blob/5e386eec9426f174eea130c0c012d9f65ebe65fb/torch/nn/utils/parametrize.py#L75-L79 def _register_parameter_or_buffer(module, name, X): if isinstance(X, nn.Parameter): module.register_parameter(name, X) else: module.register_buffer(name, X) class ParamWrapper(nn.Module, LoraLayer): """A LoRA wrapper for `nn.Parameter`. This layer is dispatched if users target a parameter directly with `lora_config.target_parameters` Note: - When accessing the wrapped nn.Parameter directly, e.g. via `module.weight`, the LoRA weights are *not* applied. - It is currently not implemented to target multiple parameters on the same module. To achieve this, it is currently required to create a separate LoRA adapter (with another adapter name) and activate both at the same time. """ def __init__( self, base_layer, adapter_name: str, parameter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_target_conv_1d_layer: bool = False, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, lora_bias: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer, **kwargs) self.parameter_name = parameter_name param = self.get_param() if param.ndim == 3: self.num_experts, self.in_features, self.out_features = param.shape else: self.num_experts, self.in_features, self.out_features = 1, param.shape[1], param.shape[0] if param.ndim not in (2, 3): raise ValueError( f"lora.{self.__class__.__name__} was initialized with {param.ndim} dimensional Parameter, but only 2d " "and 3d are supported." ) if lora_dropout: # It's not possible to factor out x from lora_B(lora_A(dropout(x))), so dropout can't be correctly # implemented raise ValueError(f"lora.{self.__class__.__name__} does not work with lora_dropout != 0.") if fan_in_fan_out: raise ValueError(f"lora.{self.__class__.__name__} does not work with fan_in_fan_out.") if lora_bias: raise ValueError(f"lora.{self.__class__.__name__} does not work with lora_bias=True.") if use_dora: raise ValueError(f"lora.{self.__class__.__name__} does not work with use_dora=True.") if is_target_conv_1d_layer: raise ValueError(f"lora.{self.__class__.__name__} does not work with is_target_conv_1d_layer=True.") self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, lora_bias=lora_bias, ) def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora: bool = False, use_qalora: bool = False, lora_bias: bool = False, qalora_group_size: int = 32, **kwargs, ): # same method as in lora.Linear but taking into account that there can be multiple experts (3d parameter) # collect the kwargs kwargs = locals().copy() del kwargs["self"] # This code works for linear layers, override for other layer types if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") lora_variant = self.resolve_lora_variant( use_dora=use_dora, use_qalora=use_qalora, qalora_group_size=qalora_group_size ) if lora_variant is not None: raise ValueError(f"lora.{self.__class__.__name__} does not work with LoRA variants like DoRA.") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: # It's not possible to factor out x from lora_B(lora_A(dropout(x))), so dropout can't be correctly # implemented raise ValueError(f"lora.{self.__class__.__name__} does not work with lora_dropout != 0.") else: lora_dropout_layer = nn.Identity() self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer})) # Actual trainable parameters # Difference to normal update_layer: consider experts. LoRA layers still use nn.Linear for consistency with # lora.Linear. self.lora_A[adapter_name] = nn.Linear(self.in_features, r * self.num_experts, bias=False) self.lora_B[adapter_name] = nn.Linear(r * self.num_experts, self.out_features, bias=lora_bias) self.lora_bias[adapter_name] = lora_bias if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r self.use_dora[adapter_name] = use_dora # for inits that require access to the base weight, use gather_param_ctx so that the weight is gathered when using DeepSpeed if isinstance(init_lora_weights, str) and init_lora_weights.startswith("pissa"): with gather_params_ctx(self.get_base_layer().weight): self.pissa_init(adapter_name, init_lora_weights) elif isinstance(init_lora_weights, str) and init_lora_weights.startswith("corda"): with gather_params_ctx(self.get_base_layer().weight): self.corda_init(adapter_name, init_lora_weights) elif isinstance(init_lora_weights, str) and init_lora_weights.lower() == "olora": with gather_params_ctx(self.get_base_layer().weight): self.olora_init(adapter_name) elif init_lora_weights == "loftq": with gather_params_ctx(self.get_base_layer().weight): self.loftq_init(adapter_name) elif init_lora_weights == "eva": nn.init.zeros_(self.lora_B[adapter_name].weight) elif init_lora_weights == "orthogonal": with gather_params_ctx(self.get_base_layer().weight): self.orthogonal_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) # call this before init of the lora variants self._move_adapter_to_device_of_base_layer(adapter_name) if adapter_name in self.lora_variant: self.lora_variant[adapter_name].init(self, **kwargs) self.set_adapter(self.active_adapters) def _move_adapter_to_device_of_base_layer(self, adapter_name: str, device: Optional[torch.device] = None) -> None: """ Move the adapter of the given name to the device of the base layer. Needs special handling for nn.Parameter """ device = self.get_param().device meta = torch.device("meta") param = self.get_param() for adapter_layer_name in self.adapter_layer_names + self.other_param_names: adapter_layer = getattr(self, adapter_layer_name, None) if not isinstance(adapter_layer, (nn.ModuleDict, nn.ParameterDict, BufferDict)): continue if adapter_name not in adapter_layer: continue if any(p.device == meta for p in adapter_layer.parameters()): continue if param.dtype.is_floating_point or param.dtype.is_complex: adapter_layer[adapter_name] = adapter_layer[adapter_name].to(device, dtype=param.dtype) else: adapter_layer[adapter_name] = adapter_layer[adapter_name].to(device) def get_param(self): param = getattr(self.get_base_layer(), self.parameter_name) return param def get_delta_weight(self, adapter_name, *args, **kwargs): if self.num_experts == 1: delta_weight = Linear.get_delta_weight(self, adapter_name, *args, **kwargs) else: weight_A = self.lora_A[adapter_name].weight weight_B = self.lora_B[adapter_name].weight # shape: experts x rank x in_features weight_A = weight_A.reshape(self.num_experts, -1, weight_A.shape[-1]) # shape: out_features x rank x experts weight_B = weight_B.reshape(weight_B.shape[0], -1, self.num_experts) # fan_in_fan_out must be False, so no transpose call here delta_weight = torch.einsum("o r e, e r i -> e i o", weight_B, weight_A) * self.scaling[adapter_name] base_layer = self.get_base_layer() param = self.get_param() delta_weight = delta_weight.to(param.device, param.dtype) return delta_weight @contextmanager def _activate_lora(self, active_adapters: list[str]): if not active_adapters or not any(adapter in self.lora_A for adapter in active_adapters): # no active adapters for this layer yield return delta_weight = None for active_adapter in active_adapters: if active_adapter not in self.lora_A: continue if delta_weight is None: delta_weight = self.get_delta_weight(active_adapter) else: delta_weight = delta_weight + self.get_delta_weight(active_adapter) base_layer = self.get_base_layer() requires_grad_before = self.get_param().requires_grad nn.utils.parametrize.register_parametrization( base_layer, self.parameter_name, _LoraParameterProxy(delta_weight) ) # set requires_grad, as it defaults to False base_layer.parametrizations[self.parameter_name].original.requires_grad_(requires_grad_before) try: yield finally: self._remove_parametrizations() def _remove_parametrizations(self): # Remove the parametrization of this specific parameter base_layer = self.get_base_layer() parameter_name = self.parameter_name if parameter_name not in base_layer.parametrizations: raise ValueError( "Something went wrong, please report this issue on PEFT: https://github.com/huggingface/peft/issues" ) param_list = base_layer.parametrizations[parameter_name] if len(param_list) == 1: # last parametrization, we can safely remove it completely nn.utils.parametrize.remove_parametrizations(base_layer, parameter_name, leave_parametrized=False) return # If there are multiple parametrizations for the same parameter_name, we only want to remove the LoRA proxy. # Unfortunately, PyTorch does not support this directly, so we need to take care of it manually. To achieve # this, we check the ParameterList from the back until we find the _LoraParameterProxy instance and then remove # it. reversed_indices = reversed(range(len(param_list))) for i in reversed_indices: module = param_list[i] if isinstance(module, _LoraParameterProxy): del param_list[i] break else: # no break encountered # this should not happen, but raising an error is probably not necessary warnings.warn( f"Could not find any LoRA parametrization on {self}, please open an issue on " "https://github.com/huggingface/peft/issues and report this warning." ) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: # same as lora.Linear.merge but not hard-coding base_layer.weight and without special cases like variants removed adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() param = getattr(base_layer, self.parameter_name) if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weight = param.data.clone() orig_dtype = orig_weight.dtype delta_weight = self.get_delta_weight(active_adapter) orig_weight += delta_weight.to(orig_dtype) if not torch.isfinite(orig_weight).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) param.data = orig_weight else: delta_weight = self.get_delta_weight(active_adapter) param.data += delta_weight self.merged_adapters.append(active_adapter) def unmerge(self) -> None: # same as lora.Linear.unmerge but not hard-coding base_layer.weight and without special cases like variants removed if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): param = getattr(self.get_base_layer(), self.parameter_name) orig_dtype = param.dtype delta_weight = self.get_delta_weight(active_adapter) param.data -= delta_weight.to(orig_dtype) def _check_forward_args(self, x, *args, **kwargs): """Check if the arguments are compatible with the configs and state of the model""" if kwargs.get("adapter_names", None): raise ValueError(f"lora.{self.__class__.__name__} does not support mixed adapter batches yet.") super()._check_forward_args(x, *args, **kwargs) def unload_and_optionally_merge_module(self, merge: bool, safe_merge: bool, adapter_names: Optional[list[str]]): base_layer = self.base_layer # ParamWrappers can be nested, so merge and retrieve base layer recursively if merge: self.merge(safe_merge=safe_merge, adapter_names=adapter_names) while isinstance(base_layer, ParamWrapper): base_layer.merge(safe_merge=safe_merge, adapter_names=adapter_names) base_layer = base_layer.base_layer else: base_layer = self.get_base_layer() return base_layer def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: raise ValueError(f"lora.{self.__class__.__name__} does not support mixed batch inference") elif self.merged: result = self.base_layer(x, *args, **kwargs) else: with self._activate_lora(self.active_adapters): result = self.base_layer(x, *args, **kwargs) return result def __repr__(self) -> str: rep = super().__repr__() idx = rep.find("(") + 1 # insert the name of the parameter to allow the repr to be disambiguous when multiple parameters on the same # module are being targeted rep = f"{rep[:idx]}\n parameter_name='{self.parameter_name}',{rep[idx:]}" return "lora." + rep def dispatch_default( target: torch.nn.Module, adapter_name: str, lora_config: LoraConfig, parameter_name: Optional[str] = None, **kwargs, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if parameter_name is not None: new_module = ParamWrapper(target, adapter_name, parameter_name=parameter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Embedding): embedding_kwargs = kwargs.copy() embedding_kwargs.pop("fan_in_fan_out", None) embedding_kwargs.update(lora_config.loftq_config) new_module = Embedding(target, adapter_name, **embedding_kwargs) elif isinstance(target_base_layer, torch.nn.Conv2d): kwargs.update(lora_config.loftq_config) new_module = Conv2d(target, adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Conv3d): kwargs.update(lora_config.loftq_config) new_module = Conv3d(target, adapter_name, **kwargs) elif isinstance(target_base_layer, nn.Conv1d): kwargs.update(lora_config.loftq_config) new_module = Conv1d(target, adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.MultiheadAttention): kwargs.update(lora_config.loftq_config) new_module = MultiheadAttention(target, adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False kwargs.update(lora_config.loftq_config) new_module = Linear(target, adapter_name, **kwargs) elif isinstance(target_base_layer, Conv1D): if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True kwargs.update(lora_config.loftq_config) new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, **kwargs) return new_module
peft/src/peft/tuners/lora/layer.py/0
{ "file_path": "peft/src/peft/tuners/lora/layer.py", "repo_id": "peft", "token_count": 45998 }
252
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Optional import torch from peft.import_utils import is_aqlm_available from peft.tuners.oft.layer import OFTLayer from peft.tuners.tuners_utils import BaseTunerLayer if is_aqlm_available(): from aqlm import QuantizedLinear class AqlmOFTLinear(torch.nn.Module, OFTLayer): def __init__( self, base_layer, adapter_name: str, r: int = 0, oft_block_size: int = 32, module_dropout: float = 0.0, init_weights: bool = True, coft: bool = False, eps: float = 6e-5, block_share: bool = False, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) use_cayley_neumann: bool = False, num_cayley_neumann_terms: int = 5, **kwargs, ): super().__init__() OFTLayer.__init__(self, base_layer) self._active_adapter = adapter_name self.update_layer( adapter_name, r, oft_block_size=oft_block_size, module_dropout=module_dropout, init_weights=init_weights, coft=coft, eps=eps, block_share=block_share, use_cayley_neumann=use_cayley_neumann, num_cayley_neumann_terms=num_cayley_neumann_terms, ) def forward(self, x: torch.Tensor): # note: logic differs from default Linear because merging is not supported if self.disable_adapters: return self.base_layer(x) for active_adapter in self.active_adapters: if active_adapter not in self.oft_R.keys(): continue oft_R = self.oft_R[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = x.dtype x = self._cast_input_dtype(x, oft_R.weight.dtype) x = oft_R(x) result = self.base_layer(x) if requires_conversion: result = result.to(expected_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "oft." + rep def dispatch_aqlm( target: torch.nn.Module, adapter_name: str, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear): new_module = AqlmOFTLinear(target, adapter_name, **kwargs) target.qweight = target_base_layer.codes return new_module
peft/src/peft/tuners/oft/aqlm.py/0
{ "file_path": "peft/src/peft/tuners/oft/aqlm.py", "repo_id": "peft", "token_count": 1432 }
253
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import contextmanager from dataclasses import asdict from enum import Enum from typing import Any import torch from torch import nn from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists from peft.utils import ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, ) from .config import PolyConfig from .layer import Linear, PolyLayer class PolyModel(BaseTuner): prefix: str = "poly_" @staticmethod def _check_target_module_exists(poly_config, key): return check_target_module_exists(poly_config, key) def _create_and_replace( self, poly_config: PolyConfig, adapter_name: str, target: nn.Module, target_name: str, parent: nn.Module, **optional_kwargs: Any, ): if isinstance(target, PolyLayer): target.update_layer(adapter_name, poly_config) else: new_module = self._create_new_module( poly_config, adapter_name, target, ) if adapter_name not in self.active_adapters: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.base_layer if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) # dispatch to correct device for name, module in new_module.named_modules(): if (self.prefix in name) or ("ranknum" in name): weight = child.qweight if hasattr(child, "qweight") else child.weight module.to(weight.device) def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if self.prefix not in n: p.requires_grad = False @staticmethod def _create_new_module(poly_config, adapter_name, target, **kwargs): if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if isinstance(target_base_layer, torch.nn.Linear): return Linear(target, adapter_name, poly_config, **kwargs) else: raise ValueError( f"Target module {target} is not supported. Currently, only the following modules are supported: " "`torch.nn.Linear`." ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) def get_peft_config_as_dict(self, inference: bool = False): config_dict = {} for key, value in self.peft_config.items(): config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} if inference: config["inference_mode"] = True config_dict[key] = config return config def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (PolyLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self): self._set_adapter_layers(enabled=True) def disable_adapter_layers(self): self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name): for module in self.model.modules(): if isinstance(module, PolyLayer): module.set_adapter(adapter_name) def _prepare_adapter_config(self, peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _register_pre_hooks(self, task_ids): """Helper method to register pre hooks.""" if task_ids is None: return [] def pre_hook(_, args, kwargs): kwargs["task_ids"] = task_ids return args, kwargs handles = [] for module in self.model.modules(): if isinstance(module, Linear): handle = module.register_forward_pre_hook(pre_hook, with_kwargs=True) handles.append(handle) return handles @contextmanager def _manage_pre_hooks(self, task_ids): """Context manager to handle the lifecycle of pre hooks.""" handles = self._register_pre_hooks(task_ids) try: yield finally: for handle in handles: handle.remove() def forward(self, *args, task_ids=None, **kwargs): with self._manage_pre_hooks(task_ids): return self.model(*args, **kwargs) def generate(self, *args, task_ids=None, **kwargs): with self._manage_pre_hooks(task_ids): return self.model.generate(*args, **kwargs)
peft/src/peft/tuners/poly/model.py/0
{ "file_path": "peft/src/peft/tuners/poly/model.py", "repo_id": "peft", "token_count": 2932 }
254
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Any, Optional, Union import torch import torch.nn as nn from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from .config import RoadConfig, RoadVariant class RoadLayer(BaseTunerLayer): """ Road layer. Generally the idea of RoAD is to split the input vector into many 2D vectors and rotate each 2D vector with its own 2D rotation matrix. For additional flexibility, each rotation matrix is multiplied by a trainable scale. when applied to vector R @ x each pair of elements of x is transformed like this: `y₀ = x₀ * α * cosθ - xₙ * α * sinθ` and `yₙ = x₀ * α * sinθ + xₙ * α * cosθ` The scales α and angles θ are learned for each pair of elements and, moreover, each of the 4 instances in the rotation matrix may actually be different (when using variant 2 or 4). Note that instead of using two consecutive elements x₀ x₁ we first split the whole vector into groups and pair elements from the first with the second half of the same group, which allows for more efficient inference implementation. The adapter needs to only store the angles θ and scales α, rather than the full matrix R and the inference implementation only needs to do elementwise vector multiplications. For merging the weights, we make use of the following formula: R @ (W @ x + b) = (R @ W) @ x + R @ b. The lhs part is how it is used in unmerged state (using efficient elementwise implementation instead of matrix multiplication) and the rhs part is how it is used in merged state where (R @ W) becomes the new weight matrix and R @ b becomes the new bias. """ adapter_layer_names: tuple[str, ...] = ("road_theta", "road_alpha") other_param_names: tuple[str, ...] = ("variant", "group_size") def __init__(self, base_layer: nn.Module, ephemeral_gpu_offload: bool = False, **kwargs) -> None: self.base_layer = base_layer self.variant = {} self.group_size = {} self.road_theta = nn.ParameterDict({}) self.road_alpha = nn.ParameterDict({}) self._disable_adapters = False self.merged_adapters = [] base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features else: raise ValueError(f"Unsupported layer type '{type(base_layer)}' encountered, cannot apply RoAd adapter.") self.in_features = in_features self.out_features = out_features @property def _available_adapters(self) -> set[str]: return {*self.road_theta} def update_layer( self, adapter_name, variant, group_size, init_weights, ): self.variant[adapter_name] = variant self.group_size[adapter_name] = group_size if self.out_features % group_size != 0: raise ValueError( f"The out_features of the base layer must be divisible by group_size ({group_size}) when using RoadLayer." ) # Actual trainable parameters if variant == "road_1": size = self.out_features // 2 elif variant == "road_2": size = self.out_features elif variant == "road_4": size = self.out_features * 2 else: raise ValueError( f"Unsupported variant {variant} for RoadLayer. Supported variants are road_1, road_2, and road_4." ) self.road_theta[adapter_name] = nn.Parameter(torch.empty(size)) self.road_alpha[adapter_name] = nn.Parameter(torch.empty(size)) self.reset_parameters(adapter_name, init_weights) self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) def reset_parameters(self, adapter_name, init_weights): if init_weights is False: nn.init.normal_(self.road_theta[adapter_name].data, mean=0.0, std=0.5) nn.init.normal_(self.road_alpha[adapter_name].data, mean=1.0, std=0.5) return nn.init.zeros_(self.road_theta[adapter_name].data) nn.init.ones_(self.road_alpha[adapter_name].data) class Linear(nn.Module, RoadLayer): # Road implemented in a dense layer def __init__( self, base_layer, adapter_name: str, variant: RoadVariant = "road_1", group_size: int = 64, init_weights: Union[bool, str] = True, **kwargs, ) -> None: super().__init__() RoadLayer.__init__(self, base_layer, **kwargs) self._active_adapter = adapter_name self.update_layer( adapter_name, variant, group_size, init_weights=init_weights, ) def _check_forward_args(self, x, *args, **kwargs): """Check if the arguments are compatible with the configs and state of the model""" adapter_names = kwargs.get("adapter_names", None) if adapter_names is None: return if len(x) != len(adapter_names): msg = ( "Length of `adapter_names` should be the same as the number of inputs, but got " f"{len(adapter_names)} and {len(x)} respectively." ) raise ValueError(msg) if self.merged: # It is unclear what would be the right thing to do if users pass adapter_names and there are merged # adapters. Therefore, it is better to raise an error in this case. msg = "Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first." raise ValueError(msg) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue result = self._cast_input_dtype(result, self.road_theta[active_adapter].dtype) result = _apply_road( self.variant[active_adapter], self.group_size[active_adapter], self.road_theta[active_adapter], self.road_alpha[active_adapter], result, ) result = result.to(torch_result_dtype) return result def _mixed_batch_forward( self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any ) -> torch.Tensor: # This is a special method that handles the case when users pass the argument `adapter_names`. This is an # extra argument that allows mixing different adapters in the same batch at inference time. result = self.base_layer(x, *args, **kwargs) unique_adapters = set(adapter_names) sub_batch_indices_list = [] for adapter in unique_adapters: sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) for i, active_adapter in enumerate(unique_adapters): if active_adapter == "__base__": continue if active_adapter not in self._available_adapters: continue dtype = self.road_theta[active_adapter].data.dtype # getting the sub-batch, passing it to Road layers and updating the corresponding indices of the linear # layer output sub_batch = result[sub_batch_indices_list[i]].to(dtype) result[sub_batch_indices_list[i]] = _apply_road( self.variant[active_adapter], self.group_size[active_adapter], self.road_theta[active_adapter], self.road_alpha[active_adapter], sub_batch, ) return result def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If `None`, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self._available_adapters: base_layer = self.get_base_layer() orig_dtype = base_layer.weight.dtype road_R = _get_delta_weight( self.variant[active_adapter], self.group_size[active_adapter], self.road_theta[active_adapter].data, self.road_alpha[active_adapter].data, ) if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weight = base_layer.weight.data.clone() orig_weight = torch.matmul(road_R.to(orig_dtype), orig_weight) if not torch.isfinite(orig_weight).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weight.contiguous().to(orig_dtype) if base_layer.bias is not None: orig_bias = base_layer.bias.clone() orig_bias = torch.matmul(road_R.to(orig_dtype), orig_bias) if not torch.isfinite(orig_bias).all(): raise ValueError( f"NaNs detected in the merged bias. The adapter {active_adapter} seems to be broken" ) base_layer.bias.data = orig_bias.contiguous().to(orig_dtype) else: orig_weight = base_layer.weight.data orig_weight = torch.matmul(road_R.to(orig_dtype), orig_weight) base_layer.weight.data = orig_weight.contiguous().to(orig_dtype) if base_layer.bias is not None: orig_bias = base_layer.bias.data orig_bias = torch.matmul(road_R.to(orig_dtype), orig_bias) base_layer.bias.data = orig_bias.contiguous().to(orig_dtype) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: # Going in reverse order active_adapter = self.merged_adapters.pop() if active_adapter in self._available_adapters: weight = self.get_base_layer().weight orig_dtype = weight.dtype road_R = _get_delta_weight( self.variant[active_adapter], self.group_size[active_adapter], self.road_theta[active_adapter].data, self.road_alpha[active_adapter].data, ) # Since our matrix are not necessarily orthogonal we need inverse instead of transpose. # In practice we expect this to basically always work since we start from block diagonal rotation matrix. inv_road_R = torch.linalg.inv(road_R.to(torch.float32)).to(orig_dtype) orig_weight = torch.matmul(inv_road_R, weight.data) weight.data = orig_weight.contiguous() if self.get_base_layer().bias is not None: orig_bias = torch.matmul(inv_road_R, self.get_base_layer().bias.data) self.get_base_layer().bias.data = orig_bias.contiguous() def __repr__(self) -> str: rep = super().__repr__() return "road." + rep def _get_delta_weight(variant: RoadVariant, group_size: int, road_theta: torch.Tensor, road_alpha: torch.Tensor): first_col, second_col = _prepare_cols(variant, group_size, road_theta, road_alpha) # To help understand the logic below consider how rope embeddings work # here it is similar, but done in groups. # https://discuss.huggingface.co/t/is-llama-rotary-embedding-implementation-correct/44509/3 # First column is simply put on the main diagonal output_tensor = torch.diag(first_col) # For second column we need to swap each half groups and add minus sign size = second_col.shape[0] swapped_second_col = second_col.reshape(-1, 2, group_size // 2)[:, [1, 0], :].flatten() rotated_diag_second_col = torch.diag(swapped_second_col).reshape(-1, 2, group_size // 2, size)[:, [1, 0], :, :] rotated_diag_second_col[:, 0, :, :] *= -1 rotated_diag_second_col = rotated_diag_second_col.reshape(size, size) output_tensor += rotated_diag_second_col return output_tensor def _prepare_cols( variant: RoadVariant, group_size: int, road_theta: torch.Tensor, road_alpha: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor]: # In inference mode, this can be cached if variant == "road_1": # In each group there are only group_size // 2 parameters that are reused road_theta = road_theta.reshape(-1, group_size // 2).repeat_interleave(2, dim=0).flatten() road_alpha = road_alpha.reshape(-1, group_size // 2).repeat_interleave(2, dim=0).flatten() theta_cos = road_theta.cos() theta_sin = road_theta.sin() first_col = road_alpha * theta_cos second_col = road_alpha * theta_sin elif variant == "road_2": # Each group has exactly group_size parameters theta_cos = road_theta.cos() theta_sin = road_theta.sin() first_col = road_alpha * theta_cos second_col = road_alpha * theta_sin elif variant == "road_4": # Each group has 2*group_size parameters, first half used for first column, second half for second column road_theta = road_theta.reshape(-1, 2, group_size) theta_cos = road_theta[:, 0, :].cos().flatten() theta_sin = road_theta[:, 1, :].sin().flatten() road_alpha = road_alpha.reshape(-1, 2, group_size) alpha_1 = road_alpha[:, 0, :].flatten() alpha_2 = road_alpha[:, 1, :].flatten() first_col = alpha_1 * theta_cos second_col = alpha_2 * theta_sin else: raise ValueError( f"Unsupported variant {variant} for RoadLayer. Supported variants are road_1, road_2, and road_4." ) return first_col, second_col def _apply_road( variant: RoadVariant, group_size: int, road_theta: torch.Tensor, road_alpha: torch.Tensor, x: torch.Tensor ): first_col, second_col = _prepare_cols(variant, group_size, road_theta, road_alpha) # Split in half groups and join back # See equation 4 in the RoAD paper x_grouped = x.reshape(-1, 2, group_size // 2) x1 = x_grouped[:, 0, :] x2 = x_grouped[:, 1, :] rotate_half_x = torch.stack((-x2, x1), dim=1).reshape(x.shape) result = x * first_col + rotate_half_x * second_col return result def dispatch_default( target: torch.nn.Module, adapter_name: str, road_config: RoadConfig, **kwargs, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if isinstance(target_base_layer, torch.nn.Linear): new_module = Linear(target, adapter_name, **kwargs) return new_module
peft/src/peft/tuners/road/layer.py/0
{ "file_path": "peft/src/peft/tuners/road/layer.py", "repo_id": "peft", "token_count": 7689 }
255
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Literal import torch def reshape_weight_task_tensors(task_tensors, weights): """ Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions. Args: task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`. weights (`torch.Tensor`): The tensor to be reshaped. Returns: `torch.Tensor`: The reshaped tensor. """ new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim()) weights = weights.view(new_shape) return weights def magnitude_based_pruning(tensor: torch.Tensor, density: float) -> torch.Tensor: """ Prune the smallest values of the task tensors and retain the top-k values based on the specified fraction `density`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The tensor with the pruned weights. """ mask = torch.zeros_like(tensor).reshape(-1) k = int(density * tensor.numel()) top_k = torch.topk(tensor.abs().reshape(-1), k=k, largest=True) mask[top_k[1]] = 1 return tensor * mask.reshape(tensor.shape) def random_pruning(tensor: torch.Tensor, density: float, rescale: bool) -> torch.Tensor: """ Prune random values based on the specified fraction `density`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ mask = torch.bernoulli(torch.full_like(input=tensor, fill_value=density)) pruned_tensor = tensor * mask if rescale: torch.div(input=pruned_tensor, other=density) return pruned_tensor def prune( tensor: torch.Tensor, density: float, method: Literal["magnitude", "random"], rescale: bool = False ) -> torch.Tensor: """ Prune the values of task tensors based on the `method`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. method (`str`):The method to use to prune. Should be one of ["magnitude", "random"]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ if density >= 1: warnings.warn(f"The density {density} is greater than or equal to 1, no pruning will be performed.") return tensor elif density < 0: raise ValueError(f"Density should be >= 0, got {density}") if method == "magnitude": return magnitude_based_pruning(tensor, density) elif method == "random": return random_pruning(tensor, density, rescale=rescale) else: raise ValueError(f"Unknown method {method}") def calculate_majority_sign_mask( tensor: torch.Tensor, method: Literal["total", "frequency"] = "total" ) -> torch.Tensor: """ Get the mask of the majority sign across the task tensors. Task tensors are stacked on dimension 0. Args: tensor (`torch.Tensor`):The tensor to get the mask from. method (`str`):The method to use to get the mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The majority sign mask. """ sign = tensor.sign() if method == "total": sign_magnitude = tensor.sum(dim=0) elif method == "frequency": sign_magnitude = sign.sum(dim=0) else: raise RuntimeError(f'Unimplemented mask method "{method}"') majority_sign = torch.where(sign_magnitude >= 0, 1, -1) return sign == majority_sign def disjoint_merge(task_tensors: torch.Tensor, majority_sign_mask: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using disjoint merge. Args: task_tensors (`torch.Tensor`):The task tensors to merge. majority_sign_mask (`torch.Tensor`):The mask of the majority sign across the task tensors. Returns: `torch.Tensor`: The merged tensor. """ mixed_task_tensors = (task_tensors * majority_sign_mask).sum(dim=0) num_params_preserved = majority_sign_mask.sum(dim=0) return mixed_task_tensors / torch.clamp(num_params_preserved, min=1.0) def task_arithmetic(task_tensors: list[torch.Tensor], weights: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. Returns: `torch.Tensor`: The merged tensor. """ task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def magnitude_prune(task_tensors: list[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: """ Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`): The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def ties( task_tensors: list[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor: """ Merge the task tensors using `ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # Elect Sign majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights # Disjoint Merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors def dare_linear(task_tensors: list[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: """ Merge the task tensors using `dare linear`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors def dare_ties( task_tensors: list[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor: """ Merge the task tensors using `dare ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # Elect Sign majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights # Disjoint Merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors
peft/src/peft/utils/merge_utils.py/0
{ "file_path": "peft/src/peft/utils/merge_utils.py", "repo_id": "peft", "token_count": 3817 }
256
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Union import pytest import torch from datasets import load_dataset from torch.utils.data import Dataset from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling, Trainer, TrainingArguments, ) from peft import CPTConfig, TaskType, get_peft_model TEMPLATE = {"input": "input: {}", "intra_seperator": " ", "output": "output: {}", "inter_seperator": "\n"} MODEL_NAME = "hf-internal-testing/tiny-random-OPTForCausalLM" MAX_INPUT_LENGTH = 1024 @pytest.fixture(scope="module") def global_tokenizer(): """Load the tokenizer fixture for the model.""" return AutoTokenizer.from_pretrained(MODEL_NAME, padding_side="right") @pytest.fixture(scope="module") def config_text(): """Load the SST2 dataset and prepare it for testing.""" config = CPTConfig( cpt_token_ids=[0, 1, 2, 3, 4, 5, 6, 7], # Example token IDs for testing cpt_mask=[1, 1, 1, 1, 1, 1, 1, 1], cpt_tokens_type_mask=[1, 2, 2, 2, 3, 3, 3, 4], opt_weighted_loss_type="decay", opt_loss_decay_factor=0.95, opt_projection_epsilon=0.2, opt_projection_format_epsilon=0.1, tokenizer_name_or_path=MODEL_NAME, ) return config @pytest.fixture(scope="module") def config_random(): """Load the SST2 dataset and prepare it for testing.""" config = CPTConfig( opt_weighted_loss_type="decay", opt_loss_decay_factor=0.95, opt_projection_epsilon=0.2, opt_projection_format_epsilon=0.1, tokenizer_name_or_path=MODEL_NAME, ) return config @pytest.fixture(scope="module") def sst_data(): """Load the SST2 dataset and prepare it for testing.""" data = load_dataset("glue", "sst2") def add_string_labels(example): if example["label"] == 0: example["label_text"] = "negative" elif example["label"] == 1: example["label_text"] = "positive" return example train_dataset = data["train"].select(range(4)).map(add_string_labels) test_dataset = data["validation"].select(range(10)).map(add_string_labels) return {"train": train_dataset, "test": test_dataset} @pytest.fixture(scope="module") def collator(global_tokenizer): class CPTDataCollatorForLanguageModeling(DataCollatorForLanguageModeling): def __init__(self, tokenizer, training=True, mlm=False): super().__init__(tokenizer, mlm=mlm) self.training = training self.tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # mk check why needed def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]: # Handle dict or lists with proper padding and conversion to tensor. list_sample_mask = [] for i in range(len(examples)): if "sample_mask" in examples[i].keys(): list_sample_mask.append(examples[i].pop("sample_mask")) max_len = max(len(ex["input_ids"]) for ex in examples) def pad_sequence(sequence, max_len, pad_value=0): return sequence + [pad_value] * (max_len - len(sequence)) input_ids = torch.tensor([pad_sequence(ex["input_ids"], max_len) for ex in examples]) attention_mask = torch.tensor([pad_sequence(ex["attention_mask"], max_len) for ex in examples]) input_type_mask = torch.tensor([pad_sequence(ex["input_type_mask"], max_len) for ex in examples]) batch = {"input_ids": input_ids, "attention_mask": attention_mask, "input_type_mask": input_type_mask} tensor_sample_mask = batch["input_ids"].clone().long() tensor_sample_mask[:, :] = 0 for i in range(len(list_sample_mask)): tensor_sample_mask[i, : len(list_sample_mask[i])] = list_sample_mask[i] batch["labels"] = batch["input_ids"].clone() if not self.training: batch["sample_mask"] = tensor_sample_mask return batch collator = CPTDataCollatorForLanguageModeling(global_tokenizer, training=True, mlm=False) return collator def dataset(data, tokenizer): class CPTDataset(Dataset): def __init__(self, samples, tokenizer, template, max_length=MAX_INPUT_LENGTH): self.template = template self.tokenizer = tokenizer self.max_length = max_length self.attention_mask = [] self.input_ids = [] self.input_type_mask = [] self.inter_seperator_ids = self._get_input_ids(template["inter_seperator"]) for sample_i in tqdm(samples): input_text, label = sample_i["sentence"], sample_i["label_text"] input_ids, attention_mask, input_type_mask = self.preprocess_sentence(input_text, label) self.input_ids.append(input_ids) self.attention_mask.append(attention_mask) self.input_type_mask.append(input_type_mask) def _get_input_ids(self, text): return self.tokenizer(text, add_special_tokens=False)["input_ids"] def preprocess_sentence(self, input_text, label): input_template_part_1_text, input_template_part_2_text = self.template["input"].split("{}") input_template_tokenized_part1 = self._get_input_ids(input_template_part_1_text) input_tokenized = self._get_input_ids(input_text) input_template_tokenized_part2 = self._get_input_ids(input_template_part_2_text) sep_tokenized = self._get_input_ids(self.template["intra_seperator"]) label_template_part_1, label_template_part_2 = self.template["output"].split("{}") label_template_part1_tokenized = self._get_input_ids(label_template_part_1) label_tokenized = self._get_input_ids(label) label_template_part2_tokenized = self._get_input_ids(label_template_part_2) eos = [self.tokenizer.eos_token_id] if self.tokenizer.eos_token_id is not None else [] input_ids = ( input_template_tokenized_part1 + input_tokenized + input_template_tokenized_part2 + sep_tokenized + label_template_part1_tokenized + label_tokenized + label_template_part2_tokenized + eos ) # determine label tokens, to calculate loss only over them when labels_loss == True attention_mask = [1] * len(input_ids) input_type_mask = ( [1] * len(input_template_tokenized_part1) + [2] * len(input_tokenized) + [1] * len(input_template_tokenized_part2) + [0] * len(sep_tokenized) + [3] * len(label_template_part1_tokenized) + [4] * len(label_tokenized) + [3] * len(label_template_part2_tokenized) + [0] * len(eos) ) assert len(input_type_mask) == len(input_ids) == len(attention_mask) return input_ids, attention_mask, input_type_mask def __len__(self): return len(self.input_ids) def __getitem__(self, idx): return { "input_ids": self.input_ids[idx], "attention_mask": self.attention_mask[idx], "input_type_mask": self.input_type_mask[idx], } dataset = CPTDataset(data, tokenizer, TEMPLATE) return dataset def test_model_initialization_text(global_tokenizer, config_text): """Test model loading and PEFT model initialization.""" base_model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) model = get_peft_model(base_model, config_text) assert model is not None, "PEFT model initialization failed" def test_model_initialization_random(global_tokenizer, config_random): """Test model loading and PEFT model initialization.""" base_model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) model = get_peft_model(base_model, config_random) assert model is not None, "PEFT model initialization failed" def test_model_initialization_wrong_task_type_warns(): # TODO: adjust this test to check for an error with PEFT v0.18.0 msg = "CPTConfig only supports task_type = CAUSAL_LM, setting it automatically" with pytest.warns(FutureWarning, match=msg): config = CPTConfig(task_type=TaskType.SEQ_CLS) assert config.task_type == TaskType.CAUSAL_LM def test_model_training_random(sst_data, global_tokenizer, collator, config_random): """Perform a short training run to verify the model and data integration.""" base_model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) model = get_peft_model(base_model, config_random) emb = model.prompt_encoder.default.embedding.weight.data.clone().detach() training_args = TrainingArguments( output_dir="./results", per_device_train_batch_size=1, num_train_epochs=2, remove_unused_columns=False, save_strategy="no", logging_steps=1, ) train_dataset = dataset(sst_data["train"], global_tokenizer) trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, data_collator=collator) trainer.train() # Verify that the embedding tensor remains unchanged (frozen) assert torch.all(model.prompt_encoder.default.embedding.weight.data.clone().detach().cpu() == emb.cpu()) delta_emb = model.prompt_encoder.default.get_projection().clone().detach() norm_delta = delta_emb.norm(dim=1).cpu() epsilon = model.prompt_encoder.default.get_epsilon().cpu() # Verify that the change in tokens is constrained to epsilon assert torch.all(norm_delta <= epsilon) def test_model_batch_training_text(sst_data, global_tokenizer, collator, config_text): """Perform a short training run to verify the model and data integration.""" base_model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) model = get_peft_model(base_model, config_text) emb = model.prompt_encoder.default.embedding.weight.data.clone().detach() training_args = TrainingArguments( output_dir="./results", per_device_train_batch_size=2, num_train_epochs=2, remove_unused_columns=False, save_strategy="no", logging_steps=1, ) train_dataset = dataset(sst_data["train"], global_tokenizer) trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, data_collator=collator) trainer.train() # Verify that the embedding tensor remains unchanged (frozen) assert torch.all(model.prompt_encoder.default.embedding.weight.data.clone().detach().cpu() == emb.cpu()) cpt_tokens_type_mask = torch.Tensor(config_text.cpt_tokens_type_mask).long() non_label_idx = (cpt_tokens_type_mask == 1) | (cpt_tokens_type_mask == 2) | (cpt_tokens_type_mask == 3) delta_emb = model.prompt_encoder.default.get_projection().clone().detach() norm_delta = delta_emb.norm(dim=1).cpu() epsilon = model.prompt_encoder.default.get_epsilon().cpu() # Verify that the change in tokens is constrained to epsilon assert torch.all(norm_delta <= epsilon) # Ensure that label tokens remain unchanged assert torch.all((norm_delta == 0) == (~non_label_idx))
peft/tests/test_cpt.py/0
{ "file_path": "peft/tests/test_cpt.py", "repo_id": "peft", "token_count": 5033 }
257
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import re import pytest import torch from diffusers import StableDiffusionPipeline from transformers import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification from peft import ( AdaLoraConfig, IA3Config, LoKrConfig, LoraConfig, RandLoraConfig, get_peft_model_state_dict, inject_adapter_in_model, ) from peft.tuners import lora from peft.utils import ModulesToSaveWrapper from .testing_utils import hub_online_once class DummyModel(torch.nn.Module): def __init__(self): super().__init__() self.embedding = torch.nn.Embedding(10, 10) self.linear = torch.nn.Linear(10, 10) self.linear2 = torch.nn.Linear(10, 10, bias=True) self.lm_head = torch.nn.Linear(10, 10) def forward(self, input_ids): x = self.embedding(input_ids) x = self.linear(x) x = self.lm_head(x) return x class TestLowLevelFunctional: # Some simple tests for the low level API @pytest.fixture def model(self): model = DummyModel() lora_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", target_modules=["linear"], ) return inject_adapter_in_model(lora_config, model) def test_inject_adapter_in_model(self, model): dummy_inputs = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]) _ = model(dummy_inputs) for name, module in model.named_modules(): if name == "linear": assert hasattr(module, "lora_A") assert hasattr(module, "lora_B") def test_get_peft_model_state_dict(self, model): peft_state_dict = get_peft_model_state_dict(model) for key in peft_state_dict.keys(): assert "lora" in key def test_modules_to_save(self): model = DummyModel() lora_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", target_modules=["linear"], modules_to_save=["embedding", "linear2"], ) model = inject_adapter_in_model(lora_config, model) for name, module in model.named_modules(): if name == "linear": assert hasattr(module, "lora_A") assert hasattr(module, "lora_B") elif name in ["embedding", "linear2"]: assert isinstance(module, ModulesToSaveWrapper) state_dict = get_peft_model_state_dict(model) assert "embedding.weight" in state_dict.keys() assert hasattr(model.embedding, "weight") assert hasattr(model.linear2, "weight") assert hasattr(model.linear2, "bias") class TestInjectAdapterFromStateDict: # The inject_adapter_in_model function can determine the target modules based on the LoraConfig (default) or based # on a state_dict (or rather, the state_dict keys). Here we test that the latter works as expected. # We test a subset of model classes and PEFT configs, testing everything would be excessive @pytest.mark.parametrize( "model_cls_and_id", [ (AutoModelForCausalLM, "trl-internal-testing/tiny-random-LlamaForCausalLM"), (AutoModel, "hf-internal-testing/tiny-random-BertModel"), (AutoModelForSeq2SeqLM, "hf-internal-testing/tiny-random-BartForConditionalGeneration"), (AutoModelForSequenceClassification, "hf-internal-testing/tiny-random-RobertaForSequenceClassification"), ], ids=["Llama", "Bert", "Bart", "Roberta"], ) @pytest.mark.parametrize( "config", [ AdaLoraConfig(total_step=5), IA3Config(), LoKrConfig(), LoraConfig(), RandLoraConfig(), ], ids=["AdaLoRA", "IA3", "LoKr", "LoRA", "RandLoRA"], ) def test_inject_from_state_dict_and_from_config_target_same_layers(self, model_cls_and_id, config, recwarn): model_cls, model_id = model_cls_and_id config = copy.deepcopy(config) # since PEFT may mutate it with hub_online_once(model_id): # use config for injection model = model_cls.from_pretrained(model_id) model = inject_adapter_in_model(config, model) sd_before = get_peft_model_state_dict(model) del model model = model_cls.from_pretrained(model_id) # get other warnings, if any, out of the way recwarn.clear() # assure that this doesn't cause any warnings model = inject_adapter_in_model(config, model, state_dict=sd_before) assert not recwarn.list sd_after = get_peft_model_state_dict(model) # We exepct the same keys and the same shapes of the weights. Don't check the values: injection is only # about creating the PEFT adapter, not about loading the actual weights assert len(sd_before) > 0 assert sd_before.keys() == sd_after.keys() for key in sd_before.keys(): assert sd_before[key].shape == sd_after[key].shape def test_inject_from_state_dict_transformers(self): model_id = "facebook/opt-125m" config = LoraConfig() with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) model.add_adapter(config) sd_before = get_peft_model_state_dict(model) del model model = AutoModelForCausalLM.from_pretrained(model_id) model = inject_adapter_in_model(config, model, state_dict=sd_before) sd_after = get_peft_model_state_dict(model) # We exepct the same keys and the same shapes of the weights. Don't check the values: injection is only # about creating the PEFT adapter, not about loading the actual weights assert len(sd_before) > 0 assert sd_before.keys() == sd_after.keys() for key in sd_before.keys(): assert sd_before[key].shape == sd_after[key].shape def test_inject_from_state_dict_transformers_irregular_targets(self): # ensure that this works even if an "irregular" pattern is used, i.e. only targeting some modules on some layers model_id = "facebook/opt-125m" config = LoraConfig( target_modules=r".*\.[0-5]\.self_attn\.v_proj|.*\.[4-7]\.self_attn\.k_proj", ) with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) model.add_adapter(config) sd_before = get_peft_model_state_dict(model) del model model = AutoModelForCausalLM.from_pretrained(model_id) model = inject_adapter_in_model(config, model, state_dict=sd_before) sd_after = get_peft_model_state_dict(model) # We exepct the same keys and the same shapes of the weights. Don't check the values: injection is only # about creating the PEFT adapter, not about loading the actual weights assert len(sd_before) > 0 assert sd_before.keys() == sd_after.keys() for key in sd_before.keys(): assert sd_before[key].shape == sd_after[key].shape def test_inject_from_state_dict_transformers_target_parameters_raises(self): # Injecting from state_dict does not correctly identify target_parameters. This is because, just from looking at # the state_dict, we cannot tell if the user intends to use target_modules or target_parameters. Currently, we # just assume the former, thus applying normal lora.Linear etc. layers instead of lora.ParamWrapper. When we # detect that the user tries to do this, we raise an error. model_id = "facebook/opt-125m" config = LoraConfig(target_modules=[], target_parameters=["q_proj.weight", "v_proj.weight"]) with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) model.add_adapter(config) sd = get_peft_model_state_dict(model) del model model = AutoModelForCausalLM.from_pretrained(model_id) msg = "Trying to inject a PEFT adapter from a state_dict but the PEFT config uses `target_parameters`" with pytest.raises(ValueError, match=msg): inject_adapter_in_model(config, model, state_dict=sd) @pytest.mark.xfail( reason="Loading from state_dict with target_parameters fails", raises=AssertionError, strict=True ) def test_inject_from_state_dict_transformers_target_parameters_fails(self): # Injecting from state_dict does not correctly identify target_parameters. This is because, just from looking at # the state_dict, we cannot tell if the user intends to use target_modules or target_parameters. Currently, we # just assume the former, thus applying normal lora.Linear etc. layers instead of lora.ParamWrapper. When we # don't detect that the user tries to do this, there is nothing that can be done. model_id = "facebook/opt-125m" config = LoraConfig(target_modules=[], target_parameters=["q_proj.weight", "v_proj.weight"]) with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) model.add_adapter(config) # sanity check: for name, module in model.named_modules(): if name.endswith((".q_proj", ".v_proj")): assert isinstance(module, lora.ParamWrapper) sd_before = get_peft_model_state_dict(model) del model model = AutoModelForCausalLM.from_pretrained(model_id) config = LoraConfig() # no target_parameters defined, we cannot know the original intent model = inject_adapter_in_model(config, model, state_dict=sd_before) sd_after = get_peft_model_state_dict(model) # this fails, we get lora.Linear instances for name, module in model.named_modules(): if name.endswith((".q_proj", ".v_proj")): assert isinstance(module, lora.ParamWrapper) def test_inject_from_state_dict_stable_diffusion(self): # same test as above, but with stable diffusion model and only testing LoRA model_id = "hf-internal-testing/tiny-sd-pipe" config_text_encoder = LoraConfig(target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"]) config_unet = LoraConfig( target_modules=[ "proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2", ] ) with hub_online_once(model_id): pipe = StableDiffusionPipeline.from_pretrained(model_id) pipe.text_encoder.add_adapter(config_text_encoder) pipe.unet.add_adapter(config_unet) sd_te_before = get_peft_model_state_dict(pipe.text_encoder) sd_unet_before = get_peft_model_state_dict(pipe.unet) del pipe pipe = StableDiffusionPipeline.from_pretrained(model_id) inject_adapter_in_model(config_text_encoder, pipe.text_encoder, state_dict=sd_te_before) inject_adapter_in_model(config_unet, pipe.unet, state_dict=sd_unet_before) sd_te_after = get_peft_model_state_dict(pipe.text_encoder) sd_unet_after = get_peft_model_state_dict(pipe.unet) # We exepct the same keys and the same shapes of the weights. Don't check the values: injection is only # about creating the PEFT adapter, not about loading the actual weights assert len(sd_te_before) > 0 assert sd_te_before.keys() == sd_te_after.keys() for key in sd_te_before.keys(): assert sd_te_before[key].shape == sd_te_after[key].shape assert len(sd_unet_before) > 0 assert sd_unet_before.keys() == sd_unet_after.keys() for key in sd_unet_before.keys(): assert sd_unet_before[key].shape == sd_unet_after[key].shape def test_inject_from_state_dict_low_cpu_mem_usage(self): model_id = "facebook/opt-125m" config = LoraConfig() with hub_online_once(model_id): # use config for injection model = AutoModelForCausalLM.from_pretrained(model_id) model = inject_adapter_in_model(config, model) sd_before = get_peft_model_state_dict(model) del model model = AutoModelForCausalLM.from_pretrained(model_id) model = inject_adapter_in_model(config, model, state_dict=sd_before, low_cpu_mem_usage=True) # all PEFT parameters should be on meta device assert {p.device.type for p in get_peft_model_state_dict(model).values()} == {"meta"} def test_inject_from_state_dict_missing_keys_warning(self): # check that if the PEFT config specifies **more** taget modules than the state_dict, we get a warning for that model_id = "facebook/opt-125m" config = LoraConfig() with hub_online_once(model_id): # use config for injection model = AutoModelForCausalLM.from_pretrained(model_id) model = inject_adapter_in_model(config, model) sd_before = get_peft_model_state_dict(model) del model # delete a keys for one module from state_dict del sd_before["model.decoder.layers.5.self_attn.q_proj.lora_A.weight"] del sd_before["model.decoder.layers.5.self_attn.q_proj.lora_B.weight"] model = AutoModelForCausalLM.from_pretrained(model_id) msg = re.escape( "While injecting the PEFT adapters, an inconsistency was discovered between the PEFT config and " "the provided state_dict. This is not necessarily an issue and can be ignored if this was the " "intent. The PEFT config contained these additional target modules: " "['model.decoder.layers.5.self_attn.q_proj']. " ) with pytest.warns(RuntimeWarning, match=msg): # as rec:#(UserWarning, match=msg) as rec: model = inject_adapter_in_model(config, model, state_dict=sd_before, low_cpu_mem_usage=True) # besides the warning, the rest of the injection should work sd_after = get_peft_model_state_dict(model) assert len(sd_before) > 0 assert sd_before.keys() == sd_after.keys() for key in sd_before.keys(): assert sd_before[key].shape == sd_after[key].shape def test_inject_from_state_dict_extra_keys_warning(self): # check that if the PEFT config specifies **fewer** taget modules than the state_dict, we get a warning for that model_id = "facebook/opt-125m" config = LoraConfig() with hub_online_once(model_id): # use config for injection model = AutoModelForCausalLM.from_pretrained(model_id) model = inject_adapter_in_model(config, model) sd_before = get_peft_model_state_dict(model) del model # remove q_proj of layer 5 from the PEFT config config.exclude_modules = ["model.decoder.layers.5.self_attn.q_proj"] model = AutoModelForCausalLM.from_pretrained(model_id) msg = re.escape( "While injecting the PEFT adapters, an inconsistency was discovered between the PEFT config and " "the provided state_dict. This is not necessarily an issue and can be ignored if this was the " "intent. The state_dict contained these additional target modules: " "['model.decoder.layers.5.self_attn.q_proj']. " ) with pytest.warns(RuntimeWarning, match=msg): model = inject_adapter_in_model(config, model, state_dict=sd_before, low_cpu_mem_usage=True) # besides the warning, the rest of the injection should work sd_after = get_peft_model_state_dict(model) assert len(sd_before) > 0 assert sd_before.keys() == sd_after.keys() for key in sd_before.keys(): assert sd_before[key].shape == sd_after[key].shape
peft/tests/test_low_level_api.py/0
{ "file_path": "peft/tests/test_low_level_api.py", "repo_id": "peft", "token_count": 7590 }
258
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is not a full on test suite of vision models, since we already run many tests on dummy models with Conv2d layers # and on stable diffusion models. Instead, this file contains specific tests for bugs that have been found in the past. import gc import numpy as np import pytest import torch from accelerate.utils.memory import clear_device_cache from safetensors.torch import load_file from transformers import ( AutoImageProcessor, AutoModelForImageClassification, AutoProcessor, LlavaForConditionalGeneration, ) from peft import ( HRAConfig, LoHaConfig, LoKrConfig, LoraConfig, OFTConfig, PeftModel, PrefixTuningConfig, get_peft_model, ) from .testing_utils import load_cat_image CONFIGS = { "lora": LoraConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]), "loha": LoHaConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]), "lokr": LoKrConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]), "oft": OFTConfig( r=1, oft_block_size=0, target_modules=["convolution"], modules_to_save=["classifier", "normalization"] ), "hra": HRAConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]), # TODO: cannot use BOFT because some convolutional kernel dimensions are even (64) and others odd (147). There is no # common denominator for the boft_block_size except 1, but using 1 results in an error in the fbd_cuda kernel: # > Error in forward_fast_block_diag_cuda_kernel: an illegal memory access was encountered # "boft": BOFTConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"], boft_block_size=2), } # Ensure that models like Llava that pass past_key_values automatically do not fail, see #1938 class TestPastKV: def test_past_kv(self): model_id = "peft-internal-testing/tiny-LlavaForConditionalGeneration" prompt = "USER: <image>\nWhat are these?\nASSISTANT:" # prepare model and inputs model = LlavaForConditionalGeneration.from_pretrained( model_id, low_cpu_mem_usage=True, ) processor = AutoProcessor.from_pretrained(model_id) raw_image = np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8) inputs = processor(text=prompt, images=raw_image, return_tensors="pt") # get peft model peft_config = PrefixTuningConfig(task_type="CAUSAL_LM", num_virtual_tokens=20) model = get_peft_model(model, peft_config) # check that this does not raise model(**inputs, output_hidden_states=True) class TestResnet: model_id = "hf-internal-testing/tiny-random-ResNetForImageClassification" cat_image = load_cat_image() # for caching @pytest.fixture(autouse=True) def teardown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ clear_device_cache(garbage_collection=True) gc.collect() @pytest.fixture(scope="class") def image_processor(self): image_processor = AutoImageProcessor.from_pretrained(self.model_id) return image_processor @pytest.fixture(scope="class") def data(self, image_processor): return image_processor(self.cat_image, return_tensors="pt") @pytest.mark.parametrize("config", CONFIGS.values(), ids=CONFIGS.keys()) def test_model_with_batchnorm_reproducibility(self, config, tmp_path, data): # see 1732 torch.manual_seed(0) model = AutoModelForImageClassification.from_pretrained(self.model_id) model = get_peft_model(model, config) # record outputs before training model.eval() with torch.inference_mode(): output_before = model(**data) model.train() # train the model optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3) batch_size = 4 max_steps = 5 * batch_size labels = torch.zeros(1, 3) labels[0, 1] = 1 for i in range(0, max_steps, batch_size): optimizer.zero_grad() outputs = model(**data, labels=labels) loss = outputs.loss loss.backward() optimizer.step() # record outputs after training model.eval() with torch.inference_mode(): output_after = model(**data) assert torch.isfinite(output_after.logits).all() atol, rtol = 1e-4, 1e-4 # sanity check: model was updated assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol) # check saving the model and loading it model.save_pretrained(tmp_path) del model torch.manual_seed(0) model = AutoModelForImageClassification.from_pretrained(self.model_id) model = PeftModel.from_pretrained(model, tmp_path).eval() with torch.inference_mode(): output_loaded = model(**data) assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol) # ensure that the checkpoint file contains the buffers model_running_mean = len([k for k in model.state_dict().keys() if "running_mean" in k]) state_dict = load_file(tmp_path / "adapter_model.safetensors") checkpoint_running_mean = len([k for k in state_dict.keys() if "running_mean" in k]) # note that the model has twice as many "running_mean", as there is one copy per ModulesToSaveWrapper, we need # to multiply by 2 to get the same number assert model_running_mean == checkpoint_running_mean * 2
peft/tests/test_vision_models.py/0
{ "file_path": "peft/tests/test_vision_models.py", "repo_id": "peft", "token_count": 2432 }
259
# PyTorch Image Models - [What's New](#whats-new) - [Introduction](#introduction) - [Models](#models) - [Features](#features) - [Results](#results) - [Getting Started (Documentation)](#getting-started-documentation) - [Train, Validation, Inference Scripts](#train-validation-inference-scripts) - [Awesome PyTorch Resources](#awesome-pytorch-resources) - [Licenses](#licenses) - [Citing](#citing) ## What's New ## July 23, 2025 * Add `set_input_size()` method to EVA models, used by OpenCLIP 3.0.0 to allow resizing for timm based encoder models. * Release 1.0.18, needed for PE-Core S & T models in OpenCLIP 3.0.0 * Fix small typing issue that broke Python 3.9 compat. 1.0.19 patch release. ## July 21, 2025 * ROPE support added to NaFlexViT. All models covered by the EVA base (`eva.py`) including EVA, EVA02, Meta PE ViT, `timm` SBB ViT w/ ROPE, and Naver ROPE-ViT can be now loaded in NaFlexViT when `use_naflex=True` passed at model creation time * More Meta PE ViT encoders added, including small/tiny variants, lang variants w/ tiling, and more spatial variants. * PatchDropout fixed with NaFlexViT and also w/ EVA models (regression after adding Naver ROPE-ViT) * Fix XY order with grid_indexing='xy', impacted non-square image use in 'xy' mode (only ROPE-ViT and PE impacted). ## July 7, 2025 * MobileNet-v5 backbone tweaks for improved Google Gemma 3n behaviour (to pair with updated official weights) * Add stem bias (zero'd in updated weights, compat break with old weights) * GELU -> GELU (tanh approx). A minor change to be closer to JAX * Add two arguments to layer-decay support, a min scale clamp and 'no optimization' scale threshold * Add 'Fp32' LayerNorm, RMSNorm, SimpleNorm variants that can be enabled to force computation of norm in float32 * Some typing, argument cleanup for norm, norm+act layers done with above * Support Naver ROPE-ViT (https://github.com/naver-ai/rope-vit) in `eva.py`, add RotaryEmbeddingMixed module for mixed mode, weights on HuggingFace Hub |model |img_size|top1 |top5 |param_count| |--------------------------------------------------|--------|------|------|-----------| |vit_large_patch16_rope_mixed_ape_224.naver_in1k |224 |84.84 |97.122|304.4 | |vit_large_patch16_rope_mixed_224.naver_in1k |224 |84.828|97.116|304.2 | |vit_large_patch16_rope_ape_224.naver_in1k |224 |84.65 |97.154|304.37 | |vit_large_patch16_rope_224.naver_in1k |224 |84.648|97.122|304.17 | |vit_base_patch16_rope_mixed_ape_224.naver_in1k |224 |83.894|96.754|86.59 | |vit_base_patch16_rope_mixed_224.naver_in1k |224 |83.804|96.712|86.44 | |vit_base_patch16_rope_ape_224.naver_in1k |224 |83.782|96.61 |86.59 | |vit_base_patch16_rope_224.naver_in1k |224 |83.718|96.672|86.43 | |vit_small_patch16_rope_224.naver_in1k |224 |81.23 |95.022|21.98 | |vit_small_patch16_rope_mixed_224.naver_in1k |224 |81.216|95.022|21.99 | |vit_small_patch16_rope_ape_224.naver_in1k |224 |81.004|95.016|22.06 | |vit_small_patch16_rope_mixed_ape_224.naver_in1k |224 |80.986|94.976|22.06 | * Some cleanup of ROPE modules, helpers, and FX tracing leaf registration * Preparing version 1.0.17 release ## June 26, 2025 * MobileNetV5 backbone (w/ encoder only variant) for [Gemma 3n](https://ai.google.dev/gemma/docs/gemma-3n#parameters) image encoder * Version 1.0.16 released ## June 23, 2025 * Add F.grid_sample based 2D and factorized pos embed resize to NaFlexViT. Faster when lots of different sizes (based on example by https://github.com/stas-sl). * Further speed up patch embed resample by replacing vmap with matmul (based on snippet by https://github.com/stas-sl). * Add 3 initial native aspect NaFlexViT checkpoints created while testing, ImageNet-1k and 3 different pos embed configs w/ same hparams. | Model | Top-1 Acc | Top-5 Acc | Params (M) | Eval Seq Len | |:---|:---:|:---:|:---:|:---:| | [naflexvit_base_patch16_par_gap.e300_s576_in1k](https://hf.co/timm/naflexvit_base_patch16_par_gap.e300_s576_in1k) | 83.67 | 96.45 | 86.63 | 576 | | [naflexvit_base_patch16_parfac_gap.e300_s576_in1k](https://hf.co/timm/naflexvit_base_patch16_parfac_gap.e300_s576_in1k) | 83.63 | 96.41 | 86.46 | 576 | | [naflexvit_base_patch16_gap.e300_s576_in1k](https://hf.co/timm/naflexvit_base_patch16_gap.e300_s576_in1k) | 83.50 | 96.46 | 86.63 | 576 | * Support gradient checkpointing for `forward_intermediates` and fix some checkpointing bugs. Thanks https://github.com/brianhou0208 * Add 'corrected weight decay' (https://arxiv.org/abs/2506.02285) as option to AdamW (legacy), Adopt, Kron, Adafactor (BV), Lamb, LaProp, Lion, NadamW, RmsPropTF, SGDW optimizers * Switch PE (perception encoder) ViT models to use native timm weights instead of remapping on the fly * Fix cuda stream bug in prefetch loader ## June 5, 2025 * Initial NaFlexVit model code. NaFlexVit is a Vision Transformer with: 1. Encapsulated embedding and position encoding in a single module 2. Support for nn.Linear patch embedding on pre-patchified (dictionary) inputs 3. Support for NaFlex variable aspect, variable resolution (SigLip-2: https://arxiv.org/abs/2502.14786) 4. Support for FlexiViT variable patch size (https://arxiv.org/abs/2212.08013) 5. Support for NaViT fractional/factorized position embedding (https://arxiv.org/abs/2307.06304) * Existing vit models in `vision_transformer.py` can be loaded into the NaFlexVit model by adding the `use_naflex=True` flag to `create_model` * Some native weights coming soon * A full NaFlex data pipeline is available that allows training / fine-tuning / evaluating with variable aspect / size images * To enable in `train.py` and `validate.py` add the `--naflex-loader` arg, must be used with a NaFlexVit * To evaluate an existing (classic) ViT loaded in NaFlexVit model w/ NaFlex data pipe: * `python validate.py /imagenet --amp -j 8 --model vit_base_patch16_224 --model-kwargs use_naflex=True --naflex-loader --naflex-max-seq-len 256` * The training has some extra args features worth noting * The `--naflex-train-seq-lens'` argument specifies which sequence lengths to randomly pick from per batch during training * The `--naflex-max-seq-len` argument sets the target sequence length for validation * Adding `--model-kwargs enable_patch_interpolator=True --naflex-patch-sizes 12 16 24` will enable random patch size selection per-batch w/ interpolation * The `--naflex-loss-scale` arg changes loss scaling mode per batch relative to the batch size, `timm` NaFlex loading changes the batch size for each seq len ## May 28, 2025 * Add a number of small/fast models thanks to https://github.com/brianhou0208 * SwiftFormer - [(ICCV2023) SwiftFormer: Efficient Additive Attention for Transformer-based Real-time Mobile Vision Applications](https://github.com/Amshaker/SwiftFormer) * FasterNet - [(CVPR2023) Run, Don’t Walk: Chasing Higher FLOPS for Faster Neural Networks](https://github.com/JierunChen/FasterNet) * SHViT - [(CVPR2024) SHViT: Single-Head Vision Transformer with Memory Efficient](https://github.com/ysj9909/SHViT) * StarNet - [(CVPR2024) Rewrite the Stars](https://github.com/ma-xu/Rewrite-the-Stars) * GhostNet-V3 [GhostNetV3: Exploring the Training Strategies for Compact Models](https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/ghostnetv3_pytorch) * Update EVA ViT (closest match) to support Perception Encoder models (https://arxiv.org/abs/2504.13181) from Meta, loading Hub weights but I still need to push dedicated `timm` weights * Add some flexibility to ROPE impl * Big increase in number of models supporting `forward_intermediates()` and some additional fixes thanks to https://github.com/brianhou0208 * DaViT, EdgeNeXt, EfficientFormerV2, EfficientViT(MIT), EfficientViT(MSRA), FocalNet, GCViT, HGNet /V2, InceptionNeXt, Inception-V4, MambaOut, MetaFormer, NesT, Next-ViT, PiT, PVT V2, RepGhostNet, RepViT, ResNetV2, ReXNet, TinyViT, TResNet, VoV * TNT model updated w/ new weights `forward_intermediates()` thanks to https://github.com/brianhou0208 * Add `local-dir:` pretrained schema, can use `local-dir:/path/to/model/folder` for model name to source model / pretrained cfg & weights Hugging Face Hub models (config.json + weights file) from a local folder. * Fixes, improvements for onnx export ## Feb 21, 2025 * SigLIP 2 ViT image encoders added (https://huggingface.co/collections/timm/siglip-2-67b8e72ba08b09dd97aecaf9) * Variable resolution / aspect NaFlex versions are a WIP * Add 'SO150M2' ViT weights trained with SBB recipes, great results, better for ImageNet than previous attempt w/ less training. * `vit_so150m2_patch16_reg1_gap_448.sbb_e200_in12k_ft_in1k` - 88.1% top-1 * `vit_so150m2_patch16_reg1_gap_384.sbb_e200_in12k_ft_in1k` - 87.9% top-1 * `vit_so150m2_patch16_reg1_gap_256.sbb_e200_in12k_ft_in1k` - 87.3% top-1 * `vit_so150m2_patch16_reg4_gap_256.sbb_e200_in12k` * Updated InternViT-300M '2.5' weights * Release 1.0.15 ## Feb 1, 2025 * FYI PyTorch 2.6 & Python 3.13 are tested and working w/ current main and released version of `timm` ## Jan 27, 2025 * Add Kron Optimizer (PSGD w/ Kronecker-factored preconditioner) * Code from https://github.com/evanatyourservice/kron_torch * See also https://sites.google.com/site/lixilinx/home/psgd ## Jan 19, 2025 * Fix loading of LeViT safetensor weights, remove conversion code which should have been deactivated * Add 'SO150M' ViT weights trained with SBB recipes, decent results, but not optimal shape for ImageNet-12k/1k pretrain/ft * `vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k_ft_in1k` - 86.7% top-1 * `vit_so150m_patch16_reg4_gap_384.sbb_e250_in12k_ft_in1k` - 87.4% top-1 * `vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k` * Misc typing, typo, etc. cleanup * 1.0.14 release to get above LeViT fix out ## Jan 9, 2025 * Add support to train and validate in pure `bfloat16` or `float16` * `wandb` project name arg added by https://github.com/caojiaolong, use arg.experiment for name * Fix old issue w/ checkpoint saving not working on filesystem w/o hard-link support (e.g. FUSE fs mounts) * 1.0.13 release ## Jan 6, 2025 * Add `torch.utils.checkpoint.checkpoint()` wrapper in `timm.models` that defaults `use_reentrant=False`, unless `TIMM_REENTRANT_CKPT=1` is set in env. ## Dec 31, 2024 * `convnext_nano` 384x384 ImageNet-12k pretrain & fine-tune. https://huggingface.co/models?search=convnext_nano%20r384 * Add AIM-v2 encoders from https://github.com/apple/ml-aim, see on Hub: https://huggingface.co/models?search=timm%20aimv2 * Add PaliGemma2 encoders from https://github.com/google-research/big_vision to existing PaliGemma, see on Hub: https://huggingface.co/models?search=timm%20pali2 * Add missing L/14 DFN2B 39B CLIP ViT, `vit_large_patch14_clip_224.dfn2b_s39b` * Fix existing `RmsNorm` layer & fn to match standard formulation, use PT 2.5 impl when possible. Move old impl to `SimpleNorm` layer, it's LN w/o centering or bias. There were only two `timm` models using it, and they have been updated. * Allow override of `cache_dir` arg for model creation * Pass through `trust_remote_code` for HF datasets wrapper * `inception_next_atto` model added by creator * Adan optimizer caution, and Lamb decoupled weight decay options * Some feature_info metadata fixed by https://github.com/brianhou0208 * All OpenCLIP and JAX (CLIP, SigLIP, Pali, etc) model weights that used load time remapping were given their own HF Hub instances so that they work with `hf-hub:` based loading, and thus will work with new Transformers `TimmWrapperModel` ## Nov 28, 2024 * More optimizers * Add MARS optimizer (https://arxiv.org/abs/2411.10438, https://github.com/AGI-Arena/MARS) * Add LaProp optimizer (https://arxiv.org/abs/2002.04839, https://github.com/Z-T-WANG/LaProp-Optimizer) * Add masking from 'Cautious Optimizers' (https://arxiv.org/abs/2411.16085, https://github.com/kyleliang919/C-Optim) to Adafactor, Adafactor Big Vision, AdamW (legacy), Adopt, Lamb, LaProp, Lion, NadamW, RMSPropTF, SGDW * Cleanup some docstrings and type annotations re optimizers and factory * Add MobileNet-V4 Conv Medium models pretrained on in12k and fine-tuned in1k @ 384x384 * https://huggingface.co/timm/mobilenetv4_conv_medium.e250_r384_in12k_ft_in1k * https://huggingface.co/timm/mobilenetv4_conv_medium.e250_r384_in12k * https://huggingface.co/timm/mobilenetv4_conv_medium.e180_ad_r384_in12k * https://huggingface.co/timm/mobilenetv4_conv_medium.e180_r384_in12k * Add small cs3darknet, quite good for the speed * https://huggingface.co/timm/cs3darknet_focus_s.ra4_e3600_r256_in1k ## Nov 12, 2024 * Optimizer factory refactor * New factory works by registering optimizers using an OptimInfo dataclass w/ some key traits * Add `list_optimizers`, `get_optimizer_class`, `get_optimizer_info` to reworked `create_optimizer_v2` fn to explore optimizers, get info or class * deprecate `optim.optim_factory`, move fns to `optim/_optim_factory.py` and `optim/_param_groups.py` and encourage import via `timm.optim` * Add Adopt (https://github.com/iShohei220/adopt) optimizer * Add 'Big Vision' variant of Adafactor (https://github.com/google-research/big_vision/blob/main/big_vision/optax.py) optimizer * Fix original Adafactor to pick better factorization dims for convolutions * Tweak LAMB optimizer with some improvements in torch.where functionality since original, refactor clipping a bit * dynamic img size support in vit, deit, eva improved to support resize from non-square patch grids, thanks https://github.com/wojtke * ## Oct 31, 2024 Add a set of new very well trained ResNet & ResNet-V2 18/34 (basic block) weights. See https://huggingface.co/blog/rwightman/resnet-trick-or-treat ## Oct 19, 2024 * Cleanup torch amp usage to avoid cuda specific calls, merge support for Ascend (NPU) devices from [MengqingCao](https://github.com/MengqingCao) that should work now in PyTorch 2.5 w/ new device extension autoloading feature. Tested Intel Arc (XPU) in Pytorch 2.5 too and it (mostly) worked. ## Oct 16, 2024 * Fix error on importing from deprecated path `timm.models.registry`, increased priority of existing deprecation warnings to be visible * Port weights of InternViT-300M (https://huggingface.co/OpenGVLab/InternViT-300M-448px) to `timm` as `vit_intern300m_patch14_448` ### Oct 14, 2024 * Pre-activation (ResNetV2) version of 18/18d/34/34d ResNet model defs added by request (weights pending) * Release 1.0.10 ### Oct 11, 2024 * MambaOut (https://github.com/yuweihao/MambaOut) model & weights added. A cheeky take on SSM vision models w/o the SSM (essentially ConvNeXt w/ gating). A mix of original weights + custom variations & weights. |model |img_size|top1 |top5 |param_count| |---------------------------------------------------------------------------------------------------------------------|--------|------|------|-----------| |[mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k)|384 |87.506|98.428|101.66 | |[mambaout_base_plus_rw.sw_e150_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_in12k_ft_in1k)|288 |86.912|98.236|101.66 | |[mambaout_base_plus_rw.sw_e150_in12k_ft_in1k](http://huggingface.co/timm/mambaout_base_plus_rw.sw_e150_in12k_ft_in1k)|224 |86.632|98.156|101.66 | |[mambaout_base_tall_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_tall_rw.sw_e500_in1k) |288 |84.974|97.332|86.48 | |[mambaout_base_wide_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_wide_rw.sw_e500_in1k) |288 |84.962|97.208|94.45 | |[mambaout_base_short_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_short_rw.sw_e500_in1k) |288 |84.832|97.27 |88.83 | |[mambaout_base.in1k](http://huggingface.co/timm/mambaout_base.in1k) |288 |84.72 |96.93 |84.81 | |[mambaout_small_rw.sw_e450_in1k](http://huggingface.co/timm/mambaout_small_rw.sw_e450_in1k) |288 |84.598|97.098|48.5 | |[mambaout_small.in1k](http://huggingface.co/timm/mambaout_small.in1k) |288 |84.5 |96.974|48.49 | |[mambaout_base_wide_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_wide_rw.sw_e500_in1k) |224 |84.454|96.864|94.45 | |[mambaout_base_tall_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_tall_rw.sw_e500_in1k) |224 |84.434|96.958|86.48 | |[mambaout_base_short_rw.sw_e500_in1k](http://huggingface.co/timm/mambaout_base_short_rw.sw_e500_in1k) |224 |84.362|96.952|88.83 | |[mambaout_base.in1k](http://huggingface.co/timm/mambaout_base.in1k) |224 |84.168|96.68 |84.81 | |[mambaout_small.in1k](http://huggingface.co/timm/mambaout_small.in1k) |224 |84.086|96.63 |48.49 | |[mambaout_small_rw.sw_e450_in1k](http://huggingface.co/timm/mambaout_small_rw.sw_e450_in1k) |224 |84.024|96.752|48.5 | |[mambaout_tiny.in1k](http://huggingface.co/timm/mambaout_tiny.in1k) |288 |83.448|96.538|26.55 | |[mambaout_tiny.in1k](http://huggingface.co/timm/mambaout_tiny.in1k) |224 |82.736|96.1 |26.55 | |[mambaout_kobe.in1k](http://huggingface.co/timm/mambaout_kobe.in1k) |288 |81.054|95.718|9.14 | |[mambaout_kobe.in1k](http://huggingface.co/timm/mambaout_kobe.in1k) |224 |79.986|94.986|9.14 | |[mambaout_femto.in1k](http://huggingface.co/timm/mambaout_femto.in1k) |288 |79.848|95.14 |7.3 | |[mambaout_femto.in1k](http://huggingface.co/timm/mambaout_femto.in1k) |224 |78.87 |94.408|7.3 | * SigLIP SO400M ViT fine-tunes on ImageNet-1k @ 378x378, added 378x378 option for existing SigLIP 384x384 models * [vit_so400m_patch14_siglip_378.webli_ft_in1k](https://huggingface.co/timm/vit_so400m_patch14_siglip_378.webli_ft_in1k) - 89.42 top-1 * [vit_so400m_patch14_siglip_gap_378.webli_ft_in1k](https://huggingface.co/timm/vit_so400m_patch14_siglip_gap_378.webli_ft_in1k) - 89.03 * SigLIP SO400M ViT encoder from recent multi-lingual (i18n) variant, patch16 @ 256x256 (https://huggingface.co/timm/ViT-SO400M-16-SigLIP-i18n-256). OpenCLIP update pending. * Add two ConvNeXt 'Zepto' models & weights (one w/ overlapped stem and one w/ patch stem). Uses RMSNorm, smaller than previous 'Atto', 2.2M params. * [convnext_zepto_rms_ols.ra4_e3600_r224_in1k](https://huggingface.co/timm/convnext_zepto_rms_ols.ra4_e3600_r224_in1k) - 73.20 top-1 @ 224 * [convnext_zepto_rms.ra4_e3600_r224_in1k](https://huggingface.co/timm/convnext_zepto_rms.ra4_e3600_r224_in1k) - 72.81 @ 224 ### Sept 2024 * Add a suite of tiny test models for improved unit tests and niche low-resource applications (https://huggingface.co/blog/rwightman/timm-tiny-test) * Add MobileNetV4-Conv-Small (0.5x) model (https://huggingface.co/posts/rwightman/793053396198664) * [mobilenetv4_conv_small_050.e3000_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small_050.e3000_r224_in1k) - 65.81 top-1 @ 256, 64.76 @ 224 * Add MobileNetV3-Large variants trained with MNV4 Small recipe * [mobilenetv3_large_150d.ra4_e3600_r256_in1k](http://hf.co/timm/mobilenetv3_large_150d.ra4_e3600_r256_in1k) - 81.81 @ 320, 80.94 @ 256 * [mobilenetv3_large_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv3_large_100.ra4_e3600_r224_in1k) - 77.16 @ 256, 76.31 @ 224 ### Aug 21, 2024 * Updated SBB ViT models trained on ImageNet-12k and fine-tuned on ImageNet-1k, challenging quite a number of much larger, slower models | model | top1 | top5 | param_count | img_size | | -------------------------------------------------- | ------ | ------ | ----------- | -------- | | [vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k) | 87.438 | 98.256 | 64.11 | 384 | | [vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k) | 86.608 | 97.934 | 64.11 | 256 | | [vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k) | 86.594 | 98.02 | 60.4 | 384 | | [vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k) | 85.734 | 97.61 | 60.4 | 256 | * MobileNet-V1 1.25, EfficientNet-B1, & ResNet50-D weights w/ MNV4 baseline challenge recipe | model | top1 | top5 | param_count | img_size | |--------------------------------------------------------------------------------------------------------------------------|--------|--------|-------------|----------| | [resnet50d.ra4_e3600_r224_in1k](http://hf.co/timm/resnet50d.ra4_e3600_r224_in1k) | 81.838 | 95.922 | 25.58 | 288 | | [efficientnet_b1.ra4_e3600_r240_in1k](http://hf.co/timm/efficientnet_b1.ra4_e3600_r240_in1k) | 81.440 | 95.700 | 7.79 | 288 | | [resnet50d.ra4_e3600_r224_in1k](http://hf.co/timm/resnet50d.ra4_e3600_r224_in1k) | 80.952 | 95.384 | 25.58 | 224 | | [efficientnet_b1.ra4_e3600_r240_in1k](http://hf.co/timm/efficientnet_b1.ra4_e3600_r240_in1k) | 80.406 | 95.152 | 7.79 | 240 | | [mobilenetv1_125.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_125.ra4_e3600_r224_in1k) | 77.600 | 93.804 | 6.27 | 256 | | [mobilenetv1_125.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_125.ra4_e3600_r224_in1k) | 76.924 | 93.234 | 6.27 | 224 | * Add SAM2 (HieraDet) backbone arch & weight loading support * Add Hiera Small weights trained w/ abswin pos embed on in12k & fine-tuned on 1k |model |top1 |top5 |param_count| |---------------------------------|------|------|-----------| |hiera_small_abswin_256.sbb2_e200_in12k_ft_in1k |84.912|97.260|35.01 | |hiera_small_abswin_256.sbb2_pd_e200_in12k_ft_in1k |84.560|97.106|35.01 | ### Aug 8, 2024 * Add RDNet ('DenseNets Reloaded', https://arxiv.org/abs/2403.19588), thanks [Donghyun Kim](https://github.com/dhkim0225) ### July 28, 2024 * Add `mobilenet_edgetpu_v2_m` weights w/ `ra4` mnv4-small based recipe. 80.1% top-1 @ 224 and 80.7 @ 256. * Release 1.0.8 ### July 26, 2024 * More MobileNet-v4 weights, ImageNet-12k pretrain w/ fine-tunes, and anti-aliased ConvLarge models | model |top1 |top1_err|top5 |top5_err|param_count|img_size| |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------| | [mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k)|84.99 |15.01 |97.294|2.706 |32.59 |544 | | [mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k)|84.772|15.228 |97.344|2.656 |32.59 |480 | | [mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k)|84.64 |15.36 |97.114|2.886 |32.59 |448 | | [mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k)|84.314|15.686 |97.102|2.898 |32.59 |384 | | [mobilenetv4_conv_aa_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e600_r384_in1k) |83.824|16.176 |96.734|3.266 |32.59 |480 | | [mobilenetv4_conv_aa_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_aa_large.e600_r384_in1k) |83.244|16.756 |96.392|3.608 |32.59 |384 | | [mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k)|82.99 |17.01 |96.67 |3.33 |11.07 |320 | | [mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k)|82.364|17.636 |96.256|3.744 |11.07 |256 | * Impressive MobileNet-V1 and EfficientNet-B0 baseline challenges (https://huggingface.co/blog/rwightman/mobilenet-baselines) | model |top1 |top1_err|top5 |top5_err|param_count|img_size| |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------| | [efficientnet_b0.ra4_e3600_r224_in1k](http://hf.co/timm/efficientnet_b0.ra4_e3600_r224_in1k) |79.364|20.636 |94.754|5.246 |5.29 |256 | | [efficientnet_b0.ra4_e3600_r224_in1k](http://hf.co/timm/efficientnet_b0.ra4_e3600_r224_in1k) |78.584|21.416 |94.338|5.662 |5.29 |224 | | [mobilenetv1_100h.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100h.ra4_e3600_r224_in1k) |76.596|23.404 |93.272|6.728 |5.28 |256 | | [mobilenetv1_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100.ra4_e3600_r224_in1k) |76.094|23.906 |93.004|6.996 |4.23 |256 | | [mobilenetv1_100h.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100h.ra4_e3600_r224_in1k) |75.662|24.338 |92.504|7.496 |5.28 |224 | | [mobilenetv1_100.ra4_e3600_r224_in1k](http://hf.co/timm/mobilenetv1_100.ra4_e3600_r224_in1k) |75.382|24.618 |92.312|7.688 |4.23 |224 | * Prototype of `set_input_size()` added to vit and swin v1/v2 models to allow changing image size, patch size, window size after model creation. * Improved support in swin for different size handling, in addition to `set_input_size`, `always_partition` and `strict_img_size` args have been added to `__init__` to allow more flexible input size constraints * Fix out of order indices info for intermediate 'Getter' feature wrapper, check out or range indices for same. * Add several `tiny` < .5M param models for testing that are actually trained on ImageNet-1k |model |top1 |top1_err|top5 |top5_err|param_count|img_size|crop_pct| |----------------------------|------|--------|------|--------|-----------|--------|--------| |test_efficientnet.r160_in1k |47.156|52.844 |71.726|28.274 |0.36 |192 |1.0 | |test_byobnet.r160_in1k |46.698|53.302 |71.674|28.326 |0.46 |192 |1.0 | |test_efficientnet.r160_in1k |46.426|53.574 |70.928|29.072 |0.36 |160 |0.875 | |test_byobnet.r160_in1k |45.378|54.622 |70.572|29.428 |0.46 |160 |0.875 | |test_vit.r160_in1k|42.0 |58.0 |68.664|31.336 |0.37 |192 |1.0 | |test_vit.r160_in1k|40.822|59.178 |67.212|32.788 |0.37 |160 |0.875 | * Fix vit reg token init, thanks [Promisery](https://github.com/Promisery) * Other misc fixes ### June 24, 2024 * 3 more MobileNetV4 hybrid weights with different MQA weight init scheme | model |top1 |top1_err|top5 |top5_err|param_count|img_size| |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------| | [mobilenetv4_hybrid_large.ix_e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.ix_e600_r384_in1k) |84.356|15.644 |96.892 |3.108 |37.76 |448 | | [mobilenetv4_hybrid_large.ix_e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.ix_e600_r384_in1k) |83.990|16.010 |96.702 |3.298 |37.76 |384 | | [mobilenetv4_hybrid_medium.ix_e550_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r384_in1k) |83.394|16.606 |96.760|3.240 |11.07 |448 | | [mobilenetv4_hybrid_medium.ix_e550_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r384_in1k) |82.968|17.032 |96.474|3.526 |11.07 |384 | | [mobilenetv4_hybrid_medium.ix_e550_r256_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r256_in1k) |82.492|17.508 |96.278|3.722 |11.07 |320 | | [mobilenetv4_hybrid_medium.ix_e550_r256_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.ix_e550_r256_in1k) |81.446|18.554 |95.704|4.296 |11.07 |256 | * florence2 weight loading in DaViT model ### June 12, 2024 * MobileNetV4 models and initial set of `timm` trained weights added: | model |top1 |top1_err|top5 |top5_err|param_count|img_size| |--------------------------------------------------------------------------------------------------|------|--------|------|--------|-----------|--------| | [mobilenetv4_hybrid_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.e600_r384_in1k) |84.266|15.734 |96.936 |3.064 |37.76 |448 | | [mobilenetv4_hybrid_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_hybrid_large.e600_r384_in1k) |83.800|16.200 |96.770 |3.230 |37.76 |384 | | [mobilenetv4_conv_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_large.e600_r384_in1k) |83.392|16.608 |96.622 |3.378 |32.59 |448 | | [mobilenetv4_conv_large.e600_r384_in1k](http://hf.co/timm/mobilenetv4_conv_large.e600_r384_in1k) |82.952|17.048 |96.266 |3.734 |32.59 |384 | | [mobilenetv4_conv_large.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_large.e500_r256_in1k) |82.674|17.326 |96.31 |3.69 |32.59 |320 | | [mobilenetv4_conv_large.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_large.e500_r256_in1k) |81.862|18.138 |95.69 |4.31 |32.59 |256 | | [mobilenetv4_hybrid_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e500_r224_in1k) |81.276|18.724 |95.742|4.258 |11.07 |256 | | [mobilenetv4_conv_medium.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r256_in1k) |80.858|19.142 |95.768|4.232 |9.72 |320 | | [mobilenetv4_hybrid_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_hybrid_medium.e500_r224_in1k) |80.442|19.558 |95.38 |4.62 |11.07 |224 | | [mobilenetv4_conv_blur_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_blur_medium.e500_r224_in1k) |80.142|19.858 |95.298|4.702 |9.72 |256 | | [mobilenetv4_conv_medium.e500_r256_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r256_in1k) |79.928|20.072 |95.184|4.816 |9.72 |256 | | [mobilenetv4_conv_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r224_in1k) |79.808|20.192 |95.186|4.814 |9.72 |256 | | [mobilenetv4_conv_blur_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_blur_medium.e500_r224_in1k) |79.438|20.562 |94.932|5.068 |9.72 |224 | | [mobilenetv4_conv_medium.e500_r224_in1k](http://hf.co/timm/mobilenetv4_conv_medium.e500_r224_in1k) |79.094|20.906 |94.77 |5.23 |9.72 |224 | | [mobilenetv4_conv_small.e2400_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e2400_r224_in1k) |74.616|25.384 |92.072|7.928 |3.77 |256 | | [mobilenetv4_conv_small.e1200_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e1200_r224_in1k) |74.292|25.708 |92.116|7.884 |3.77 |256 | | [mobilenetv4_conv_small.e2400_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e2400_r224_in1k) |73.756|26.244 |91.422|8.578 |3.77 |224 | | [mobilenetv4_conv_small.e1200_r224_in1k](http://hf.co/timm/mobilenetv4_conv_small.e1200_r224_in1k) |73.454|26.546 |91.34 |8.66 |3.77 |224 | * Apple MobileCLIP (https://arxiv.org/pdf/2311.17049, FastViT and ViT-B) image tower model support & weights added (part of OpenCLIP support). * ViTamin (https://arxiv.org/abs/2404.02132) CLIP image tower model & weights added (part of OpenCLIP support). * OpenAI CLIP Modified ResNet image tower modelling & weight support (via ByobNet). Refactor AttentionPool2d. ### May 14, 2024 * Support loading PaliGemma jax weights into SigLIP ViT models with average pooling. * Add Hiera models from Meta (https://github.com/facebookresearch/hiera). * Add `normalize=` flag for transforms, return non-normalized torch.Tensor with original dtype (for `chug`) * Version 1.0.3 release ### May 11, 2024 * `Searching for Better ViT Baselines (For the GPU Poor)` weights and vit variants released. Exploring model shapes between Tiny and Base. | model | top1 | top5 | param_count | img_size | | -------------------------------------------------- | ------ | ------ | ----------- | -------- | | [vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k) | 86.202 | 97.874 | 64.11 | 256 | | [vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k) | 85.418 | 97.48 | 60.4 | 256 | | [vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k) | 84.322 | 96.812 | 63.95 | 256 | | [vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k) | 83.906 | 96.684 | 60.23 | 256 | | [vit_base_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_base_patch16_rope_reg1_gap_256.sbb_in1k) | 83.866 | 96.67 | 86.43 | 256 | | [vit_medium_patch16_rope_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_rope_reg1_gap_256.sbb_in1k) | 83.81 | 96.824 | 38.74 | 256 | | [vit_betwixt_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in1k) | 83.706 | 96.616 | 60.4 | 256 | | [vit_betwixt_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_betwixt_patch16_reg1_gap_256.sbb_in1k) | 83.628 | 96.544 | 60.4 | 256 | | [vit_medium_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_reg4_gap_256.sbb_in1k) | 83.47 | 96.622 | 38.88 | 256 | | [vit_medium_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_medium_patch16_reg1_gap_256.sbb_in1k) | 83.462 | 96.548 | 38.88 | 256 | | [vit_little_patch16_reg4_gap_256.sbb_in1k](https://huggingface.co/timm/vit_little_patch16_reg4_gap_256.sbb_in1k) | 82.514 | 96.262 | 22.52 | 256 | | [vit_wee_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_wee_patch16_reg1_gap_256.sbb_in1k) | 80.256 | 95.360 | 13.42 | 256 | | [vit_pwee_patch16_reg1_gap_256.sbb_in1k](https://huggingface.co/timm/vit_pwee_patch16_reg1_gap_256.sbb_in1k) | 80.072 | 95.136 | 15.25 | 256 | | [vit_mediumd_patch16_reg4_gap_256.sbb_in12k](https://huggingface.co/timm/vit_mediumd_patch16_reg4_gap_256.sbb_in12k) | N/A | N/A | 64.11 | 256 | | [vit_betwixt_patch16_reg4_gap_256.sbb_in12k](https://huggingface.co/timm/vit_betwixt_patch16_reg4_gap_256.sbb_in12k) | N/A | N/A | 60.4 | 256 | * AttentionExtract helper added to extract attention maps from `timm` models. See example in https://github.com/huggingface/pytorch-image-models/discussions/1232#discussioncomment-9320949 * `forward_intermediates()` API refined and added to more models including some ConvNets that have other extraction methods. * 1017 of 1047 model architectures support `features_only=True` feature extraction. Remaining 34 architectures can be supported but based on priority requests. * Remove torch.jit.script annotated functions including old JIT activations. Conflict with dynamo and dynamo does a much better job when used. ### April 11, 2024 * Prepping for a long overdue 1.0 release, things have been stable for a while now. * Significant feature that's been missing for a while, `features_only=True` support for ViT models with flat hidden states or non-std module layouts (so far covering `'vit_*', 'twins_*', 'deit*', 'beit*', 'mvitv2*', 'eva*', 'samvit_*', 'flexivit*'`) * Above feature support achieved through a new `forward_intermediates()` API that can be used with a feature wrapping module or directly. ```python model = timm.create_model('vit_base_patch16_224') final_feat, intermediates = model.forward_intermediates(input) output = model.forward_head(final_feat) # pooling + classifier head print(final_feat.shape) torch.Size([2, 197, 768]) for f in intermediates: print(f.shape) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) torch.Size([2, 768, 14, 14]) print(output.shape) torch.Size([2, 1000]) ``` ```python model = timm.create_model('eva02_base_patch16_clip_224', pretrained=True, img_size=512, features_only=True, out_indices=(-3, -2,)) output = model(torch.randn(2, 3, 512, 512)) for o in output: print(o.shape) torch.Size([2, 768, 32, 32]) torch.Size([2, 768, 32, 32]) ``` * TinyCLIP vision tower weights added, thx [Thien Tran](https://github.com/gau-nernst) ### Feb 19, 2024 * Next-ViT models added. Adapted from https://github.com/bytedance/Next-ViT * HGNet and PP-HGNetV2 models added. Adapted from https://github.com/PaddlePaddle/PaddleClas by [SeeFun](https://github.com/seefun) * Removed setup.py, moved to pyproject.toml based build supported by PDM * Add updated model EMA impl using _for_each for less overhead * Support device args in train script for non GPU devices * Other misc fixes and small additions * Min supported Python version increased to 3.8 * Release 0.9.16 ## Introduction Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results. The work of many others is present here. I've tried to make sure all source material is acknowledged via links to github, arxiv papers, etc in the README, documentation, and code docstrings. Please let me know if I missed anything. ## Features ### Models All model architecture families include variants with pretrained weights. There are specific model variants without any weights, it is NOT a bug. Help training new or better weights is always appreciated. * Aggregating Nested Transformers - https://arxiv.org/abs/2105.12723 * BEiT - https://arxiv.org/abs/2106.08254 * BEiT-V2 - https://arxiv.org/abs/2208.06366 * BEiT3 - https://arxiv.org/abs/2208.10442 * Big Transfer ResNetV2 (BiT) - https://arxiv.org/abs/1912.11370 * Bottleneck Transformers - https://arxiv.org/abs/2101.11605 * CaiT (Class-Attention in Image Transformers) - https://arxiv.org/abs/2103.17239 * CoaT (Co-Scale Conv-Attentional Image Transformers) - https://arxiv.org/abs/2104.06399 * CoAtNet (Convolution and Attention) - https://arxiv.org/abs/2106.04803 * ConvNeXt - https://arxiv.org/abs/2201.03545 * ConvNeXt-V2 - http://arxiv.org/abs/2301.00808 * ConViT (Soft Convolutional Inductive Biases Vision Transformers)- https://arxiv.org/abs/2103.10697 * CspNet (Cross-Stage Partial Networks) - https://arxiv.org/abs/1911.11929 * DeiT - https://arxiv.org/abs/2012.12877 * DeiT-III - https://arxiv.org/pdf/2204.07118.pdf * DenseNet - https://arxiv.org/abs/1608.06993 * DLA - https://arxiv.org/abs/1707.06484 * DPN (Dual-Path Network) - https://arxiv.org/abs/1707.01629 * EdgeNeXt - https://arxiv.org/abs/2206.10589 * EfficientFormer - https://arxiv.org/abs/2206.01191 * EfficientFormer-V2 - https://arxiv.org/abs/2212.08059 * EfficientNet (MBConvNet Family) * EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252 * EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665 * EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946 * EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html * EfficientNet V2 - https://arxiv.org/abs/2104.00298 * FBNet-C - https://arxiv.org/abs/1812.03443 * MixNet - https://arxiv.org/abs/1907.09595 * MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626 * MobileNet-V2 - https://arxiv.org/abs/1801.04381 * Single-Path NAS - https://arxiv.org/abs/1904.02877 * TinyNet - https://arxiv.org/abs/2010.14819 * EfficientViT (MIT) - https://arxiv.org/abs/2205.14756 * EfficientViT (MSRA) - https://arxiv.org/abs/2305.07027 * EVA - https://arxiv.org/abs/2211.07636 * EVA-02 - https://arxiv.org/abs/2303.11331 * FasterNet - https://arxiv.org/abs/2303.03667 * FastViT - https://arxiv.org/abs/2303.14189 * FlexiViT - https://arxiv.org/abs/2212.08013 * FocalNet (Focal Modulation Networks) - https://arxiv.org/abs/2203.11926 * GCViT (Global Context Vision Transformer) - https://arxiv.org/abs/2206.09959 * GhostNet - https://arxiv.org/abs/1911.11907 * GhostNet-V2 - https://arxiv.org/abs/2211.12905 * GhostNet-V3 - https://arxiv.org/abs/2404.11202 * gMLP - https://arxiv.org/abs/2105.08050 * GPU-Efficient Networks - https://arxiv.org/abs/2006.14090 * Halo Nets - https://arxiv.org/abs/2103.12731 * HGNet / HGNet-V2 - TBD * HRNet - https://arxiv.org/abs/1908.07919 * InceptionNeXt - https://arxiv.org/abs/2303.16900 * Inception-V3 - https://arxiv.org/abs/1512.00567 * Inception-ResNet-V2 and Inception-V4 - https://arxiv.org/abs/1602.07261 * Lambda Networks - https://arxiv.org/abs/2102.08602 * LeViT (Vision Transformer in ConvNet's Clothing) - https://arxiv.org/abs/2104.01136 * MambaOut - https://arxiv.org/abs/2405.07992 * MaxViT (Multi-Axis Vision Transformer) - https://arxiv.org/abs/2204.01697 * MetaFormer (PoolFormer-v2, ConvFormer, CAFormer) - https://arxiv.org/abs/2210.13452 * MLP-Mixer - https://arxiv.org/abs/2105.01601 * MobileCLIP - https://arxiv.org/abs/2311.17049 * MobileNet-V3 (MBConvNet w/ Efficient Head) - https://arxiv.org/abs/1905.02244 * FBNet-V3 - https://arxiv.org/abs/2006.02049 * HardCoRe-NAS - https://arxiv.org/abs/2102.11646 * LCNet - https://arxiv.org/abs/2109.15099 * MobileNetV4 - https://arxiv.org/abs/2404.10518 * MobileOne - https://arxiv.org/abs/2206.04040 * MobileViT - https://arxiv.org/abs/2110.02178 * MobileViT-V2 - https://arxiv.org/abs/2206.02680 * MViT-V2 (Improved Multiscale Vision Transformer) - https://arxiv.org/abs/2112.01526 * NASNet-A - https://arxiv.org/abs/1707.07012 * NesT - https://arxiv.org/abs/2105.12723 * Next-ViT - https://arxiv.org/abs/2207.05501 * NFNet-F - https://arxiv.org/abs/2102.06171 * NF-RegNet / NF-ResNet - https://arxiv.org/abs/2101.08692 * PE (Perception Encoder) - https://arxiv.org/abs/2504.13181 * PNasNet - https://arxiv.org/abs/1712.00559 * PoolFormer (MetaFormer) - https://arxiv.org/abs/2111.11418 * Pooling-based Vision Transformer (PiT) - https://arxiv.org/abs/2103.16302 * PVT-V2 (Improved Pyramid Vision Transformer) - https://arxiv.org/abs/2106.13797 * RDNet (DenseNets Reloaded) - https://arxiv.org/abs/2403.19588 * RegNet - https://arxiv.org/abs/2003.13678 * RegNetZ - https://arxiv.org/abs/2103.06877 * RepVGG - https://arxiv.org/abs/2101.03697 * RepGhostNet - https://arxiv.org/abs/2211.06088 * RepViT - https://arxiv.org/abs/2307.09283 * ResMLP - https://arxiv.org/abs/2105.03404 * ResNet/ResNeXt * ResNet (v1b/v1.5) - https://arxiv.org/abs/1512.03385 * ResNeXt - https://arxiv.org/abs/1611.05431 * 'Bag of Tricks' / Gluon C, D, E, S variations - https://arxiv.org/abs/1812.01187 * Weakly-supervised (WSL) Instagram pretrained / ImageNet tuned ResNeXt101 - https://arxiv.org/abs/1805.00932 * Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet/ResNeXts - https://arxiv.org/abs/1905.00546 * ECA-Net (ECAResNet) - https://arxiv.org/abs/1910.03151v4 * Squeeze-and-Excitation Networks (SEResNet) - https://arxiv.org/abs/1709.01507 * ResNet-RS - https://arxiv.org/abs/2103.07579 * Res2Net - https://arxiv.org/abs/1904.01169 * ResNeSt - https://arxiv.org/abs/2004.08955 * ReXNet - https://arxiv.org/abs/2007.00992 * ROPE-ViT - https://arxiv.org/abs/2403.13298 * SelecSLS - https://arxiv.org/abs/1907.00837 * Selective Kernel Networks - https://arxiv.org/abs/1903.06586 * Sequencer2D - https://arxiv.org/abs/2205.01972 * SHViT - https://arxiv.org/abs/2401.16456 * SigLIP (image encoder) - https://arxiv.org/abs/2303.15343 * SigLIP 2 (image encoder) - https://arxiv.org/abs/2502.14786 * StarNet - https://arxiv.org/abs/2403.19967 * SwiftFormer - https://arxiv.org/pdf/2303.15446 * Swin S3 (AutoFormerV2) - https://arxiv.org/abs/2111.14725 * Swin Transformer - https://arxiv.org/abs/2103.14030 * Swin Transformer V2 - https://arxiv.org/abs/2111.09883 * TinyViT - https://arxiv.org/abs/2207.10666 * Transformer-iN-Transformer (TNT) - https://arxiv.org/abs/2103.00112 * TResNet - https://arxiv.org/abs/2003.13630 * Twins (Spatial Attention in Vision Transformers) - https://arxiv.org/pdf/2104.13840.pdf * VGG - https://arxiv.org/abs/1409.1556 * Visformer - https://arxiv.org/abs/2104.12533 * Vision Transformer - https://arxiv.org/abs/2010.11929 * ViTamin - https://arxiv.org/abs/2404.02132 * VOLO (Vision Outlooker) - https://arxiv.org/abs/2106.13112 * VovNet V2 and V1 - https://arxiv.org/abs/1911.06667 * Xception - https://arxiv.org/abs/1610.02357 * Xception (Modified Aligned, Gluon) - https://arxiv.org/abs/1802.02611 * Xception (Modified Aligned, TF) - https://arxiv.org/abs/1802.02611 * XCiT (Cross-Covariance Image Transformers) - https://arxiv.org/abs/2106.09681 ### Optimizers To see full list of optimizers w/ descriptions: `timm.optim.list_optimizers(with_description=True)` Included optimizers available via `timm.optim.create_optimizer_v2` factory method: * `adabelief` an implementation of AdaBelief adapted from https://github.com/juntang-zhuang/Adabelief-Optimizer - https://arxiv.org/abs/2010.07468 * `adafactor` adapted from [FAIRSeq impl](https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py) - https://arxiv.org/abs/1804.04235 * `adafactorbv` adapted from [Big Vision](https://github.com/google-research/big_vision/blob/main/big_vision/optax.py) - https://arxiv.org/abs/2106.04560 * `adahessian` by [David Samuel](https://github.com/davda54/ada-hessian) - https://arxiv.org/abs/2006.00719 * `adamp` and `sgdp` by [Naver ClovAI](https://github.com/clovaai) - https://arxiv.org/abs/2006.08217 * `adan` an implementation of Adan adapted from https://github.com/sail-sg/Adan - https://arxiv.org/abs/2208.06677 * `adopt` ADOPT adapted from https://github.com/iShohei220/adopt - https://arxiv.org/abs/2411.02853 * `kron` PSGD w/ Kronecker-factored preconditioner from https://github.com/evanatyourservice/kron_torch - https://sites.google.com/site/lixilinx/home/psgd * `lamb` an implementation of Lamb and LambC (w/ trust-clipping) cleaned up and modified to support use with XLA - https://arxiv.org/abs/1904.00962 * `laprop` optimizer from https://github.com/Z-T-WANG/LaProp-Optimizer - https://arxiv.org/abs/2002.04839 * `lars` an implementation of LARS and LARC (w/ trust-clipping) - https://arxiv.org/abs/1708.03888 * `lion` and implementation of Lion adapted from https://github.com/google/automl/tree/master/lion - https://arxiv.org/abs/2302.06675 * `lookahead` adapted from impl by [Liam](https://github.com/alphadl/lookahead.pytorch) - https://arxiv.org/abs/1907.08610 * `madgrad` an implementation of MADGRAD adapted from https://github.com/facebookresearch/madgrad - https://arxiv.org/abs/2101.11075 * `mars` MARS optimizer from https://github.com/AGI-Arena/MARS - https://arxiv.org/abs/2411.10438 * `nadam` an implementation of Adam w/ Nesterov momentum * `nadamw` an implementation of AdamW (Adam w/ decoupled weight-decay) w/ Nesterov momentum. A simplified impl based on https://github.com/mlcommons/algorithmic-efficiency * `novograd` by [Masashi Kimura](https://github.com/convergence-lab/novograd) - https://arxiv.org/abs/1905.11286 * `radam` by [Liyuan Liu](https://github.com/LiyuanLucasLiu/RAdam) - https://arxiv.org/abs/1908.03265 * `rmsprop_tf` adapted from PyTorch RMSProp by myself. Reproduces much improved Tensorflow RMSProp behaviour * `sgdw` and implementation of SGD w/ decoupled weight-decay * `fused<name>` optimizers by name with [NVIDIA Apex](https://github.com/NVIDIA/apex/tree/master/apex/optimizers) installed * `bnb<name>` optimizers by name with [BitsAndBytes](https://github.com/TimDettmers/bitsandbytes) installed * `cadamw`, `clion`, and more 'Cautious' optimizers from https://github.com/kyleliang919/C-Optim - https://arxiv.org/abs/2411.16085 * `adam`, `adamw`, `rmsprop`, `adadelta`, `adagrad`, and `sgd` pass through to `torch.optim` implementations * `c` suffix (eg `adamc`, `nadamc` to implement 'corrected weight decay' in https://arxiv.org/abs/2506.02285) ### Augmentations * Random Erasing from [Zhun Zhong](https://github.com/zhunzhong07/Random-Erasing/blob/master/transforms.py) - https://arxiv.org/abs/1708.04896) * Mixup - https://arxiv.org/abs/1710.09412 * CutMix - https://arxiv.org/abs/1905.04899 * AutoAugment (https://arxiv.org/abs/1805.09501) and RandAugment (https://arxiv.org/abs/1909.13719) ImageNet configurations modeled after impl for EfficientNet training (https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py) * AugMix w/ JSD loss, JSD w/ clean + augmented mixing support works with AutoAugment and RandAugment as well - https://arxiv.org/abs/1912.02781 * SplitBachNorm - allows splitting batch norm layers between clean and augmented (auxiliary batch norm) data ### Regularization * DropPath aka "Stochastic Depth" - https://arxiv.org/abs/1603.09382 * DropBlock - https://arxiv.org/abs/1810.12890 * Blur Pooling - https://arxiv.org/abs/1904.11486 ### Other Several (less common) features that I often utilize in my projects are included. Many of their additions are the reason why I maintain my own set of models, instead of using others' via PIP: * All models have a common default configuration interface and API for * accessing/changing the classifier - `get_classifier` and `reset_classifier` * doing a forward pass on just the features - `forward_features` (see [documentation](https://huggingface.co/docs/timm/feature_extraction)) * these makes it easy to write consistent network wrappers that work with any of the models * All models support multi-scale feature map extraction (feature pyramids) via create_model (see [documentation](https://huggingface.co/docs/timm/feature_extraction)) * `create_model(name, features_only=True, out_indices=..., output_stride=...)` * `out_indices` creation arg specifies which feature maps to return, these indices are 0 based and generally correspond to the `C(i + 1)` feature level. * `output_stride` creation arg controls output stride of the network by using dilated convolutions. Most networks are stride 32 by default. Not all networks support this. * feature map channel counts, reduction level (stride) can be queried AFTER model creation via the `.feature_info` member * All models have a consistent pretrained weight loader that adapts last linear if necessary, and from 3 to 1 channel input if desired * High performance [reference training, validation, and inference scripts](https://huggingface.co/docs/timm/training_script) that work in several process/GPU modes: * NVIDIA DDP w/ a single GPU per process, multiple processes with APEX present (AMP mixed-precision optional) * PyTorch DistributedDataParallel w/ multi-gpu, single process (AMP disabled as it crashes when enabled) * PyTorch w/ single GPU single process (AMP optional) * A dynamic global pool implementation that allows selecting from average pooling, max pooling, average + max, or concat([average, max]) at model creation. All global pooling is adaptive average by default and compatible with pretrained weights. * A 'Test Time Pool' wrapper that can wrap any of the included models and usually provides improved performance doing inference with input images larger than the training size. Idea adapted from original DPN implementation when I ported (https://github.com/cypw/DPNs) * Learning rate schedulers * Ideas adopted from * [AllenNLP schedulers](https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers) * [FAIRseq lr_scheduler](https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler) * SGDR: Stochastic Gradient Descent with Warm Restarts (https://arxiv.org/abs/1608.03983) * Schedulers include `step`, `cosine` w/ restarts, `tanh` w/ restarts, `plateau` * Space-to-Depth by [mrT23](https://github.com/mrT23/TResNet/blob/master/src/models/tresnet/layers/space_to_depth.py) (https://arxiv.org/abs/1801.04590) -- original paper? * Adaptive Gradient Clipping (https://arxiv.org/abs/2102.06171, https://github.com/deepmind/deepmind-research/tree/master/nfnets) * An extensive selection of channel and/or spatial attention modules: * Bottleneck Transformer - https://arxiv.org/abs/2101.11605 * CBAM - https://arxiv.org/abs/1807.06521 * Effective Squeeze-Excitation (ESE) - https://arxiv.org/abs/1911.06667 * Efficient Channel Attention (ECA) - https://arxiv.org/abs/1910.03151 * Gather-Excite (GE) - https://arxiv.org/abs/1810.12348 * Global Context (GC) - https://arxiv.org/abs/1904.11492 * Halo - https://arxiv.org/abs/2103.12731 * Involution - https://arxiv.org/abs/2103.06255 * Lambda Layer - https://arxiv.org/abs/2102.08602 * Non-Local (NL) - https://arxiv.org/abs/1711.07971 * Squeeze-and-Excitation (SE) - https://arxiv.org/abs/1709.01507 * Selective Kernel (SK) - (https://arxiv.org/abs/1903.06586 * Split (SPLAT) - https://arxiv.org/abs/2004.08955 * Shifted Window (SWIN) - https://arxiv.org/abs/2103.14030 ## Results Model validation results can be found in the [results tables](results/README.md) ## Getting Started (Documentation) The official documentation can be found at https://huggingface.co/docs/hub/timm. Documentation contributions are welcome. [Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055-2/) by [Chris Hughes](https://github.com/Chris-hughes10) is an extensive blog post covering many aspects of `timm` in detail. [timmdocs](http://timm.fast.ai/) is an alternate set of documentation for `timm`. A big thanks to [Aman Arora](https://github.com/amaarora) for his efforts creating timmdocs. [paperswithcode](https://paperswithcode.com/lib/timm) is a good resource for browsing the models within `timm`. ## Train, Validation, Inference Scripts The root folder of the repository contains reference train, validation, and inference scripts that work with the included models and other features of this repository. They are adaptable for other datasets and use cases with a little hacking. See [documentation](https://huggingface.co/docs/timm/training_script). ## Awesome PyTorch Resources One of the greatest assets of PyTorch is the community and their contributions. A few of my favourite resources that pair well with the models and components here are listed below. ### Object Detection, Instance and Semantic Segmentation * Detectron2 - https://github.com/facebookresearch/detectron2 * Segmentation Models (Semantic) - https://github.com/qubvel/segmentation_models.pytorch * EfficientDet (Obj Det, Semantic soon) - https://github.com/rwightman/efficientdet-pytorch ### Computer Vision / Image Augmentation * Albumentations - https://github.com/albumentations-team/albumentations * Kornia - https://github.com/kornia/kornia ### Knowledge Distillation * RepDistiller - https://github.com/HobbitLong/RepDistiller * torchdistill - https://github.com/yoshitomo-matsubara/torchdistill ### Metric Learning * PyTorch Metric Learning - https://github.com/KevinMusgrave/pytorch-metric-learning ### Training / Frameworks * fastai - https://github.com/fastai/fastai * lightly_train - https://github.com/lightly-ai/lightly-train ## Licenses ### Code The code here is licensed Apache 2.0. I've taken care to make sure any third party code included or adapted has compatible (permissive) licenses such as MIT, BSD, etc. I've made an effort to avoid any GPL / LGPL conflicts. That said, it is your responsibility to ensure you comply with licenses here and conditions of any dependent licenses. Where applicable, I've linked the sources/references for various components in docstrings. If you think I've missed anything please create an issue. ### Pretrained Weights So far all of the pretrained weights available here are pretrained on ImageNet with a select few that have some additional pretraining (see extra note below). ImageNet was released for non-commercial research purposes only (https://image-net.org/download). It's not clear what the implications of that are for the use of pretrained weights from that dataset. Any models I have trained with ImageNet are done for research purposes and one should assume that the original dataset license applies to the weights. It's best to seek legal advice if you intend to use the pretrained weights in a commercial product. #### Pretrained on more than ImageNet Several weights included or references here were pretrained with proprietary datasets that I do not have access to. These include the Facebook WSL, SSL, SWSL ResNe(Xt) and the Google Noisy Student EfficientNet models. The Facebook models have an explicit non-commercial license (CC-BY-NC 4.0, https://github.com/facebookresearch/semi-supervised-ImageNet1K-models, https://github.com/facebookresearch/WSL-Images). The Google models do not appear to have any restriction beyond the Apache 2.0 license (and ImageNet concerns). In either case, you should contact Facebook or Google with any questions. ## Citing ### BibTeX ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} } ``` ### Latest DOI [![DOI](https://zenodo.org/badge/168799526.svg)](https://zenodo.org/badge/latestdoi/168799526)
pytorch-image-models/README.md/0
{ "file_path": "pytorch-image-models/README.md", "repo_id": "pytorch-image-models", "token_count": 25068 }
260
# Model Summaries The model architectures included come from a wide variety of sources. Sources, including papers, original impl ("reference code") that I rewrote / adapted, and PyTorch impl that I leveraged directly ("code") are listed below. Most included models have pretrained weights. The weights are either: 1. from their original sources 2. ported by myself from their original impl in a different framework (e.g. Tensorflow models) 3. trained from scratch using the included training script The validation results for the pretrained weights are [here](results) A more exciting view (with pretty pictures) of the models within `timm` can be found at [paperswithcode](https://paperswithcode.com/lib/timm). ## Big Transfer ResNetV2 (BiT) * Implementation: [resnetv2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnetv2.py) * Paper: `Big Transfer (BiT): General Visual Representation Learning` - https://arxiv.org/abs/1912.11370 * Reference code: https://github.com/google-research/big_transfer ## Cross-Stage Partial Networks * Implementation: [cspnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cspnet.py) * Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 * Reference impl: https://github.com/WongKinYiu/CrossStagePartialNetworks ## DenseNet * Implementation: [densenet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/densenet.py) * Paper: `Densely Connected Convolutional Networks` - https://arxiv.org/abs/1608.06993 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models ## DLA * Implementation: [dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py) * Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 * Code: https://github.com/ucbdrive/dla ## Dual-Path Networks * Implementation: [dpn.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dpn.py) * Paper: `Dual Path Networks` - https://arxiv.org/abs/1707.01629 * My PyTorch code: https://github.com/rwightman/pytorch-dpn-pretrained * Reference code: https://github.com/cypw/DPNs ## GPU-Efficient Networks * Implementation: [byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py) * Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 * Reference code: https://github.com/idstcv/GPU-Efficient-Networks ## HRNet * Implementation: [hrnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/hrnet.py) * Paper: `Deep High-Resolution Representation Learning for Visual Recognition` - https://arxiv.org/abs/1908.07919 * Code: https://github.com/HRNet/HRNet-Image-Classification ## Inception-V3 * Implementation: [inception_v3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v3.py) * Paper: `Rethinking the Inception Architecture for Computer Vision` - https://arxiv.org/abs/1512.00567 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models ## Inception-V4 * Implementation: [inception_v4.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v4.py) * Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets ## Inception-ResNet-V2 * Implementation: [inception_resnet_v2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_resnet_v2.py) * Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets ## NASNet-A * Implementation: [nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py) * Paper: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet ## PNasNet-5 * Implementation: [pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py) * Paper: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559 * Code: https://github.com/Cadene/pretrained-models.pytorch * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet ## EfficientNet * Implementation: [efficientnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py) * Papers: * EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252 * EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665 * EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946 * EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html * MixNet - https://arxiv.org/abs/1907.09595 * MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626 * MobileNet-V2 - https://arxiv.org/abs/1801.04381 * FBNet-C - https://arxiv.org/abs/1812.03443 * Single-Path NAS - https://arxiv.org/abs/1904.02877 * My PyTorch code: https://github.com/rwightman/gen-efficientnet-pytorch * Reference code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## MobileNet-V3 * Implementation: [mobilenetv3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py) * Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244 * Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet ## RegNet * Implementation: [regnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/regnet.py) * Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 * Reference code: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py ## RepVGG * Implementation: [byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py) * Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 * Reference code: https://github.com/DingXiaoH/RepVGG ## ResNet, ResNeXt * Implementation: [resnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py) * ResNet (V1B) * Paper: `Deep Residual Learning for Image Recognition` - https://arxiv.org/abs/1512.03385 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models * ResNeXt * Paper: `Aggregated Residual Transformations for Deep Neural Networks` - https://arxiv.org/abs/1611.05431 * Code: https://github.com/pytorch/vision/tree/master/torchvision/models * 'Bag of Tricks' / Gluon C, D, E, S ResNet variants * Paper: `Bag of Tricks for Image Classification with CNNs` - https://arxiv.org/abs/1812.01187 * Code: https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnetv1b.py * Instagram pretrained / ImageNet tuned ResNeXt101 * Paper: `Exploring the Limits of Weakly Supervised Pretraining` - https://arxiv.org/abs/1805.00932 * Weights: https://pytorch.org/hub/facebookresearch_WSL-Images_resnext (NOTE: CC BY-NC 4.0 License, NOT commercial friendly) * Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet and ResNeXts * Paper: `Billion-scale semi-supervised learning for image classification` - https://arxiv.org/abs/1905.00546 * Weights: https://github.com/facebookresearch/semi-supervised-ImageNet1K-models (NOTE: CC BY-NC 4.0 License, NOT commercial friendly) * Squeeze-and-Excitation Networks * Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 * Code: Added to ResNet base, this is current version going forward, old `senet.py` is being deprecated * ECAResNet (ECA-Net) * Paper: `ECA-Net: Efficient Channel Attention for Deep CNN` - https://arxiv.org/abs/1910.03151v4 * Code: Added to ResNet base, ECA module contributed by @VRandme, reference https://github.com/BangguWu/ECANet ## Res2Net * Implementation: [res2net.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/res2net.py) * Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 * Code: https://github.com/gasvn/Res2Net ## ResNeSt * Implementation: [resnest.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnest.py) * Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 * Code: https://github.com/zhanghang1989/ResNeSt ## ReXNet * Implementation: [rexnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/rexnet.py) * Paper: `ReXNet: Diminishing Representational Bottleneck on CNN` - https://arxiv.org/abs/2007.00992 * Code: https://github.com/clovaai/rexnet ## Selective-Kernel Networks * Implementation: [sknet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/sknet.py) * Paper: `Selective-Kernel Networks` - https://arxiv.org/abs/1903.06586 * Code: https://github.com/implus/SKNet, https://github.com/clovaai/assembled-cnn ## SelecSLS * Implementation: [selecsls.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/selecsls.py) * Paper: `XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera` - https://arxiv.org/abs/1907.00837 * Code: https://github.com/mehtadushy/SelecSLS-Pytorch ## Squeeze-and-Excitation Networks * Implementation: [senet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/senet.py) NOTE: I am deprecating this version of the networks, the new ones are part of `resnet.py` * Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 * Code: https://github.com/Cadene/pretrained-models.pytorch ## TResNet * Implementation: [tresnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tresnet.py) * Paper: `TResNet: High Performance GPU-Dedicated Architecture` - https://arxiv.org/abs/2003.13630 * Code: https://github.com/mrT23/TResNet ## VGG * Implementation: [vgg.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vgg.py) * Paper: `Very Deep Convolutional Networks For Large-Scale Image Recognition` - https://arxiv.org/pdf/1409.1556.pdf * Reference code: https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py ## Vision Transformer * Implementation: [vision_transformer.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py) * Paper: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 * Reference code and pretrained weights: https://github.com/google-research/vision_transformer ## VovNet V2 and V1 * Implementation: [vovnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vovnet.py) * Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 * Reference code: https://github.com/youngwanLEE/vovnet-detectron2 ## Xception * Implementation: [xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/xception.py) * Paper: `Xception: Deep Learning with Depthwise Separable Convolutions` - https://arxiv.org/abs/1610.02357 * Code: https://github.com/Cadene/pretrained-models.pytorch ## Xception (Modified Aligned, Gluon) * Implementation: [gluon_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/gluon_xception.py) * Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611 * Reference code: https://github.com/dmlc/gluon-cv/tree/master/gluoncv/model_zoo, https://github.com/jfzhang95/pytorch-deeplab-xception/ ## Xception (Modified Aligned, TF) * Implementation: [aligned_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/aligned_xception.py) * Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611 * Reference code: https://github.com/tensorflow/models/tree/master/research/deeplab
pytorch-image-models/hfdocs/source/models.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models.mdx", "repo_id": "pytorch-image-models", "token_count": 4501 }
261
# MobileNet v2 **MobileNetV2** is a convolutional neural network architecture that seeks to perform well on mobile devices. It is based on an [inverted residual structure](https://paperswithcode.com/method/inverted-residual-block) where the residual connections are between the bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. As a whole, the architecture of MobileNetV2 contains the initial fully convolution layer with 32 filters, followed by 19 residual bottleneck layers. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('mobilenetv2_100', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `mobilenetv2_100`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('mobilenetv2_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1801-04381, author = {Mark Sandler and Andrew G. Howard and Menglong Zhu and Andrey Zhmoginov and Liang{-}Chieh Chen}, title = {Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation}, journal = {CoRR}, volume = {abs/1801.04381}, year = {2018}, url = {http://arxiv.org/abs/1801.04381}, archivePrefix = {arXiv}, eprint = {1801.04381}, timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-1801-04381.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: MobileNet V2 Paper: Title: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks' URL: https://paperswithcode.com/paper/mobilenetv2-inverted-residuals-and-linear Models: - Name: mobilenetv2_100 In Collection: MobileNet V2 Metadata: FLOPs: 401920448 Parameters: 3500000 File Size: 14202571 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - Inverted Residual Block - Max Pooling - ReLU6 - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: mobilenetv2_100 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1536 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L955 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.95% Top 5 Accuracy: 91.0% - Name: mobilenetv2_110d In Collection: MobileNet V2 Metadata: FLOPs: 573958832 Parameters: 4520000 File Size: 18316431 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - Inverted Residual Block - Max Pooling - ReLU6 - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: mobilenetv2_110d LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1536 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L969 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.05% Top 5 Accuracy: 92.19% - Name: mobilenetv2_120d In Collection: MobileNet V2 Metadata: FLOPs: 888510048 Parameters: 5830000 File Size: 23651121 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - Inverted Residual Block - Max Pooling - ReLU6 - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: mobilenetv2_120d LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1536 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L977 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.28% Top 5 Accuracy: 93.51% - Name: mobilenetv2_140 In Collection: MobileNet V2 Metadata: FLOPs: 770196784 Parameters: 6110000 File Size: 24673555 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - Inverted Residual Block - Max Pooling - ReLU6 - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 16x GPUs ID: mobilenetv2_140 LR: 0.045 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1536 Image Size: '224' Weight Decay: 4.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L962 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.51% Top 5 Accuracy: 93.0% -->
pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/mobilenet-v2.mdx", "repo_id": "pytorch-image-models", "token_count": 3404 }
262
# Quickstart This quickstart is intended for developers who are ready to dive into the code and see an example of how to integrate `timm` into their model training workflow. First, you'll need to install `timm`. For more information on installation, see [Installation](installation). ```bash pip install timm ``` ## Load a Pretrained Model Pretrained models can be loaded using [`create_model`]. Here, we load the pretrained `mobilenetv3_large_100` model. ```py >>> import timm >>> m = timm.create_model('mobilenetv3_large_100', pretrained=True) >>> m.eval() ``` <Tip> Note: The returned PyTorch model is set to train mode by default, so you must call .eval() on it if you plan to use it for inference. </Tip> ## List Models with Pretrained Weights To list models packaged with `timm`, you can use [`list_models`]. If you specify `pretrained=True`, this function will only return model names that have associated pretrained weights available. ```py >>> import timm >>> from pprint import pprint >>> model_names = timm.list_models(pretrained=True) >>> pprint(model_names) [ 'adv_inception_v3', 'cspdarknet53', 'cspresnext50', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'densenetblur121d', 'dla34', 'dla46_c', ] ``` You can also list models with a specific pattern in their name. ```py >>> import timm >>> from pprint import pprint >>> model_names = timm.list_models('*resne*t*') >>> pprint(model_names) [ 'cspresnet50', 'cspresnet50d', 'cspresnet50w', 'cspresnext50', ... ] ``` ## Fine-Tune a Pretrained Model You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('mobilenetv3_large_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To fine-tune on your own dataset, you have to write a PyTorch training loop or adapt `timm`'s [training script](training_script) to use your dataset. ## Use a Pretrained Model for Feature Extraction Without modifying the network, one can call model.forward_features(input) on any model instead of the usual model(input). This will bypass the head classifier and global pooling for networks. For a more in depth guide to using `timm` for feature extraction, see [Feature Extraction](feature_extraction). ```py >>> import timm >>> import torch >>> x = torch.randn(1, 3, 224, 224) >>> model = timm.create_model('mobilenetv3_large_100', pretrained=True) >>> features = model.forward_features(x) >>> print(features.shape) torch.Size([1, 960, 7, 7]) ``` ## Image Augmentation To transform images into valid inputs for a model, you can use [`timm.data.create_transform`], providing the desired `input_size` that the model expects. This will return a generic transform that uses reasonable defaults. ```py >>> timm.data.create_transform((3, 224, 224)) Compose( Resize(size=256, interpolation=bilinear, max_size=None, antialias=None) CenterCrop(size=(224, 224)) ToTensor() Normalize(mean=tensor([0.4850, 0.4560, 0.4060]), std=tensor([0.2290, 0.2240, 0.2250])) ) ``` Pretrained models have specific transforms that were applied to images fed into them while training. If you use the wrong transform on your image, the model won't understand what it's seeing! To figure out which transformations were used for a given pretrained model, we can start by taking a look at its `pretrained_cfg` ```py >>> model.pretrained_cfg {'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth', 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225), 'first_conv': 'conv_stem', 'classifier': 'classifier', 'architecture': 'mobilenetv3_large_100'} ``` We can then resolve only the data related configuration by using [`timm.data.resolve_data_config`]. ```py >>> timm.data.resolve_data_config(model.pretrained_cfg) {'input_size': (3, 224, 224), 'interpolation': 'bicubic', 'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225), 'crop_pct': 0.875} ``` We can pass this data config to [`timm.data.create_transform`] to initialize the model's associated transform. ```py >>> data_cfg = timm.data.resolve_data_config(model.pretrained_cfg) >>> transform = timm.data.create_transform(**data_cfg) >>> transform Compose( Resize(size=256, interpolation=bicubic, max_size=None, antialias=None) CenterCrop(size=(224, 224)) ToTensor() Normalize(mean=tensor([0.4850, 0.4560, 0.4060]), std=tensor([0.2290, 0.2240, 0.2250])) ) ``` <Tip> Note: Here, the pretrained model's config happens to be the same as the generic config we made earlier. This is not always the case. So, it's safer to use the data config to create the transform as we did here instead of using the generic transform. </Tip> ## Using Pretrained Models for Inference Here, we will put together the above sections and use a pretrained model for inference. First we'll need an image to do inference on. Here we load a picture of a leaf from the web: ```py >>> import requests >>> from PIL import Image >>> from io import BytesIO >>> url = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/timm/cat.jpg' >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` Here's the image we loaded: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/timm/cat.jpg" alt="An Image from a link" width="300"/> Now, we'll create our model and transforms again. This time, we make sure to set our model in evaluation mode. ```py >>> model = timm.create_model('mobilenetv3_large_100', pretrained=True).eval() >>> transform = timm.data.create_transform( **timm.data.resolve_data_config(model.pretrained_cfg) ) ``` We can prepare this image for the model by passing it to the transform. ```py >>> image_tensor = transform(image) >>> image_tensor.shape torch.Size([3, 224, 224]) ``` Now we can pass that image to the model to get the predictions. We use `unsqueeze(0)` in this case, as the model is expecting a batch dimension. ```py >>> output = model(image_tensor.unsqueeze(0)) >>> output.shape torch.Size([1, 1000]) ``` To get the predicted probabilities, we apply softmax to the output. This leaves us with a tensor of shape `(num_classes,)`. ```py >>> probabilities = torch.nn.functional.softmax(output[0], dim=0) >>> probabilities.shape torch.Size([1000]) ``` Now we'll find the top 5 predicted class indexes and values using `torch.topk`. ```py >>> values, indices = torch.topk(probabilities, 5) >>> indices tensor([281, 282, 285, 673, 670]) ``` If we check the imagenet labels for the top index, we can see what the model predicted... ```py >>> IMAGENET_1k_URL = 'https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt' >>> IMAGENET_1k_LABELS = requests.get(IMAGENET_1k_URL).text.strip().split('\n') >>> [{'label': IMAGENET_1k_LABELS[idx], 'value': val.item()} for val, idx in zip(values, indices)] [{'label': 'tabby, tabby_cat', 'value': 0.5101025700569153}, {'label': 'tiger_cat', 'value': 0.22490699589252472}, {'label': 'Egyptian_cat', 'value': 0.1835290789604187}, {'label': 'mouse, computer_mouse', 'value': 0.006752475164830685}, {'label': 'motor_scooter, scooter', 'value': 0.004942195490002632}] ```
pytorch-image-models/hfdocs/source/quickstart.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/quickstart.mdx", "repo_id": "pytorch-image-models", "token_count": 2581 }
263
"""Variable‑size Mixup / CutMix utilities for NaFlex data loaders. This module provides: * `mix_batch_variable_size` – pixel‑level Mixup/CutMix that operates on a list of images whose spatial sizes differ, mixing only their central overlap so no resizing is required. * `pairwise_mixup_target` – builds soft‑label targets that exactly match the per‑sample pixel provenance produced by the mixer. * `NaFlexMixup` – a callable functor that wraps the two helpers and stores all augmentation hyper‑parameters in one place, making it easy to plug into different dataset wrappers. Hacked together by / Copyright 2025, Ross Wightman, Hugging Face """ import math import random from typing import Dict, List, Tuple, Union import torch def mix_batch_variable_size( imgs: List[torch.Tensor], *, mixup_alpha: float = 0.8, cutmix_alpha: float = 1.0, switch_prob: float = 0.5, local_shuffle: int = 4, ) -> Tuple[List[torch.Tensor], List[float], Dict[int, int]]: """Apply Mixup or CutMix on a batch of variable-sized images. Sorts images by aspect ratio and pairs neighboring samples. Only the mutual central overlap region of each pair is mixed. Args: imgs: List of transformed images shaped (C, H, W). mixup_alpha: Beta distribution alpha for Mixup. Set to 0 to disable. cutmix_alpha: Beta distribution alpha for CutMix. Set to 0 to disable. switch_prob: Probability of using CutMix when both modes are enabled. local_shuffle: Size of local windows for shuffling after aspect sorting. Returns: Tuple of (mixed_imgs, lam_list, pair_to) where: - mixed_imgs: List of mixed images - lam_list: Per-sample lambda values representing mixing degree - pair_to: Mapping i -> j of which sample was mixed with which """ if len(imgs) < 2: raise ValueError("Need at least two images to perform Mixup/CutMix.") # Decide augmentation mode and raw λ if mixup_alpha > 0.0 and cutmix_alpha > 0.0: use_cutmix = torch.rand(()).item() < switch_prob alpha = cutmix_alpha if use_cutmix else mixup_alpha elif mixup_alpha > 0.0: use_cutmix = False alpha = mixup_alpha elif cutmix_alpha > 0.0: use_cutmix = True alpha = cutmix_alpha else: raise ValueError("Both mixup_alpha and cutmix_alpha are zero – nothing to do.") lam_raw = torch.distributions.Beta(alpha, alpha).sample().item() lam_raw = max(0.0, min(1.0, lam_raw)) # numerical safety # Pair images by nearest aspect ratio order = sorted(range(len(imgs)), key=lambda i: imgs[i].shape[2] / imgs[i].shape[1]) if local_shuffle > 1: for start in range(0, len(order), local_shuffle): random.shuffle(order[start:start + local_shuffle]) pair_to: Dict[int, int] = {} for a, b in zip(order[::2], order[1::2]): pair_to[a] = b pair_to[b] = a odd_one = order[-1] if len(imgs) % 2 else None mixed_imgs: List[torch.Tensor] = [None] * len(imgs) lam_list: List[float] = [1.0] * len(imgs) for i in range(len(imgs)): if i == odd_one: mixed_imgs[i] = imgs[i] continue j = pair_to[i] xi, xj = imgs[i], imgs[j] _, hi, wi = xi.shape _, hj, wj = xj.shape dest_area = hi * wi # Central overlap common to both images oh, ow = min(hi, hj), min(wi, wj) overlap_area = oh * ow top_i, left_i = (hi - oh) // 2, (wi - ow) // 2 top_j, left_j = (hj - oh) // 2, (wj - ow) // 2 xi = xi.clone() if use_cutmix: # CutMix: random rectangle inside the overlap cut_ratio = math.sqrt(1.0 - lam_raw) ch, cw = int(oh * cut_ratio), int(ow * cut_ratio) cut_area = ch * cw y_off = random.randint(0, oh - ch) x_off = random.randint(0, ow - cw) yl_i, xl_i = top_i + y_off, left_i + x_off yl_j, xl_j = top_j + y_off, left_j + x_off xi[:, yl_i: yl_i + ch, xl_i: xl_i + cw] = xj[:, yl_j: yl_j + ch, xl_j: xl_j + cw] mixed_imgs[i] = xi corrected_lam = 1.0 - cut_area / float(dest_area) lam_list[i] = corrected_lam else: # Mixup: blend the entire overlap region patch_i = xi[:, top_i:top_i + oh, left_i:left_i + ow] patch_j = xj[:, top_j:top_j + oh, left_j:left_j + ow] blended = patch_i.mul(lam_raw).add_(patch_j, alpha=1.0 - lam_raw) xi[:, top_i:top_i + oh, left_i:left_i + ow] = blended mixed_imgs[i] = xi corrected_lam = (dest_area - overlap_area) / dest_area + lam_raw * overlap_area / dest_area lam_list[i] = corrected_lam return mixed_imgs, lam_list, pair_to def smoothed_sparse_target( targets: torch.Tensor, *, num_classes: int, smoothing: float = 0.0, ) -> torch.Tensor: off_val = smoothing / num_classes on_val = 1.0 - smoothing + off_val y_onehot = torch.full( (targets.size(0), num_classes), off_val, dtype=torch.float32, device=targets.device ) y_onehot.scatter_(1, targets.unsqueeze(1), on_val) return y_onehot def pairwise_mixup_target( targets: torch.Tensor, pair_to: Dict[int, int], lam_list: List[float], *, num_classes: int, smoothing: float = 0.0, ) -> torch.Tensor: """Create soft targets that match the pixel‑level mixing performed. Args: targets: (B,) tensor of integer class indices. pair_to: Mapping of sample index to its mixed partner as returned by mix_batch_variable_size(). lam_list: Per‑sample fractions of own pixels, also from the mixer. num_classes: Total number of classes in the dataset. smoothing: Label‑smoothing value in the range [0, 1). Returns: Tensor of shape (B, num_classes) whose rows sum to 1. """ y_onehot = smoothed_sparse_target(targets, num_classes=num_classes, smoothing=smoothing) targets = y_onehot.clone() for i, j in pair_to.items(): lam = lam_list[i] targets[i].mul_(lam).add_(y_onehot[j], alpha=1.0 - lam) return targets class NaFlexMixup: """Callable wrapper that combines mixing and target generation.""" def __init__( self, *, num_classes: int, mixup_alpha: float = 0.8, cutmix_alpha: float = 1.0, switch_prob: float = 0.5, prob: float = 1.0, local_shuffle: int = 4, label_smoothing: float = 0.0, ) -> None: """Configure the augmentation. Args: num_classes: Total number of classes. mixup_alpha: Beta α for Mixup. 0 disables Mixup. cutmix_alpha: Beta α for CutMix. 0 disables CutMix. switch_prob: Probability of selecting CutMix when both modes are enabled. prob: Probability of applying any mixing per batch. local_shuffle: Window size used to shuffle images after aspect sorting so pairings vary between epochs. smoothing: Label‑smoothing value. 0 disables smoothing. """ self.num_classes = num_classes self.mixup_alpha = mixup_alpha self.cutmix_alpha = cutmix_alpha self.switch_prob = switch_prob self.prob = prob self.local_shuffle = local_shuffle self.smoothing = label_smoothing def __call__( self, imgs: List[torch.Tensor], targets: torch.Tensor, ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: """Apply the augmentation and generate matching targets. Args: imgs: List of already transformed images shaped (C, H, W). targets: Hard labels with shape (B,). Returns: mixed_imgs: List of mixed images in the same order and shapes as the input. targets: Soft‑label tensor shaped (B, num_classes) suitable for cross‑entropy with soft targets. """ if not isinstance(targets, torch.Tensor): targets = torch.tensor(targets) if random.random() > self.prob: targets = smoothed_sparse_target(targets, num_classes=self.num_classes, smoothing=self.smoothing) return imgs, targets.unbind(0) mixed_imgs, lam_list, pair_to = mix_batch_variable_size( imgs, mixup_alpha=self.mixup_alpha, cutmix_alpha=self.cutmix_alpha, switch_prob=self.switch_prob, local_shuffle=self.local_shuffle, ) targets = pairwise_mixup_target( targets, pair_to, lam_list, num_classes=self.num_classes, smoothing=self.smoothing, ) return mixed_imgs, targets.unbind(0)
pytorch-image-models/timm/data/naflex_mixup.py/0
{ "file_path": "pytorch-image-models/timm/data/naflex_mixup.py", "repo_id": "pytorch-image-models", "token_count": 4066 }
264
from multiprocessing import Value class SharedCount: def __init__(self, epoch: int = 0): self.shared_epoch = Value('i', epoch) @property def value(self): return self.shared_epoch.value @value.setter def value(self, epoch): self.shared_epoch.value = epoch
pytorch-image-models/timm/data/readers/shared_count.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/shared_count.py", "repo_id": "pytorch-image-models", "token_count": 122 }
265
""" CBAM (sort-of) Attention Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on some tasks, especially fine-grained it seems. I may end up removing this impl. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn import torch.nn.functional as F from .conv_bn_act import ConvNormAct from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible class ChannelAttn(nn.Module): """ Original CBAM channel attention module, currently avg + max pool variant only. """ def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(ChannelAttn, self).__init__() if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) self.act = act_layer(inplace=True) self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) return x * self.gate(x_avg + x_max) class LightChannelAttn(ChannelAttn): """An experimental 'lightweight' that sums avg + max pool first """ def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(LightChannelAttn, self).__init__( channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) def forward(self, x): x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) x_attn = self.fc2(self.act(self.fc1(x_pool))) return x * F.sigmoid(x_attn) class SpatialAttn(nn.Module): """ Original CBAM spatial attention module """ def __init__(self, kernel_size=7, gate_layer='sigmoid'): super(SpatialAttn, self).__init__() self.conv = ConvNormAct(2, 1, kernel_size, apply_act=False) self.gate = create_act_layer(gate_layer) def forward(self, x): x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) x_attn = self.conv(x_attn) return x * self.gate(x_attn) class LightSpatialAttn(nn.Module): """An experimental 'lightweight' variant that sums avg_pool and max_pool results. """ def __init__(self, kernel_size=7, gate_layer='sigmoid'): super(LightSpatialAttn, self).__init__() self.conv = ConvNormAct(1, 1, kernel_size, apply_act=False) self.gate = create_act_layer(gate_layer) def forward(self, x): x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) x_attn = self.conv(x_attn) return x * self.gate(x_attn) class CbamModule(nn.Module): def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(CbamModule, self).__init__() self.channel = ChannelAttn( channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x class LightCbamModule(nn.Module): def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(LightCbamModule, self).__init__() self.channel = LightChannelAttn( channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = LightSpatialAttn(spatial_kernel_size) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x
pytorch-image-models/timm/layers/cbam.py/0
{ "file_path": "pytorch-image-models/timm/layers/cbam.py", "repo_id": "pytorch-image-models", "token_count": 2016 }
266
from enum import Enum from typing import Union import torch class Format(str, Enum): NCHW = 'NCHW' NHWC = 'NHWC' NCL = 'NCL' NLC = 'NLC' FormatT = Union[str, Format] def get_spatial_dim(fmt: FormatT): fmt = Format(fmt) if fmt is Format.NLC: dim = (1,) elif fmt is Format.NCL: dim = (2,) elif fmt is Format.NHWC: dim = (1, 2) else: dim = (2, 3) return dim def get_channel_dim(fmt: FormatT): fmt = Format(fmt) if fmt is Format.NHWC: dim = 3 elif fmt is Format.NLC: dim = 2 else: dim = 1 return dim def nchw_to(x: torch.Tensor, fmt: Format): if fmt == Format.NHWC: x = x.permute(0, 2, 3, 1) elif fmt == Format.NLC: x = x.flatten(2).transpose(1, 2) elif fmt == Format.NCL: x = x.flatten(2) return x def nhwc_to(x: torch.Tensor, fmt: Format): if fmt == Format.NCHW: x = x.permute(0, 3, 1, 2) elif fmt == Format.NLC: x = x.flatten(1, 2) elif fmt == Format.NCL: x = x.flatten(1, 2).transpose(1, 2) return x
pytorch-image-models/timm/layers/format.py/0
{ "file_path": "pytorch-image-models/timm/layers/format.py", "repo_id": "pytorch-image-models", "token_count": 572 }
267
""" MLP module w/ dropout and configurable activation layer Hacked together by / Copyright 2020 Ross Wightman """ from functools import partial from torch import nn as nn from .grn import GlobalResponseNorm from .helpers import to_2tuple class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks NOTE: When use_conv=True, expects 2D NCHW tensors, otherwise N*C expected. """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=None, bias=True, drop=0., use_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class GluMlp(nn.Module): """ MLP w/ GLU style gating See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 NOTE: When use_conv=True, expects 2D NCHW tensors, otherwise N*C expected. """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, norm_layer=None, bias=True, drop=0., use_conv=False, gate_last=True, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features assert hidden_features % 2 == 0 bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.chunk_dim = 1 if use_conv else -1 self.gate_last = gate_last # use second half of width for gate self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features // 2) if norm_layer is not None else nn.Identity() self.fc2 = linear_layer(hidden_features // 2, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def init_weights(self): # override init of fc1 w/ gate portion set to weight near zero, bias=1 if self.fc1.bias is not None: nn.init.ones_(self.fc1.bias[self.fc1.bias.shape[0] // 2:]) nn.init.normal_(self.fc1.weight[self.fc1.weight.shape[0] // 2:], std=1e-6) def forward(self, x): x = self.fc1(x) x1, x2 = x.chunk(2, dim=self.chunk_dim) x = x1 * self.act(x2) if self.gate_last else self.act(x1) * x2 x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x SwiGLUPacked = partial(GluMlp, act_layer=nn.SiLU, gate_last=False) class SwiGLU(nn.Module): """ SwiGLU NOTE: GluMLP above can implement SwiGLU, but this impl has split fc1 and better matches some other common impl which makes mapping checkpoints simpler. """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, norm_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) self.fc1_g = nn.Linear(in_features, hidden_features, bias=bias[0]) self.fc1_x = nn.Linear(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def init_weights(self): # override init of fc1 w/ gate portion set to weight near zero, bias=1 if self.fc1_g.bias is not None: nn.init.ones_(self.fc1_g.bias) nn.init.normal_(self.fc1_g.weight, std=1e-6) def forward(self, x): x_gate = self.fc1_g(x) x = self.fc1_x(x) x = self.act(x_gate) * x x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class GatedMlp(nn.Module): """ MLP as used in gMLP """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=None, gate_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) if gate_layer is not None: assert hidden_features % 2 == 0 self.gate = gate_layer(hidden_features) hidden_features = hidden_features // 2 # FIXME base reduction on gate property? else: self.gate = nn.Identity() self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.gate(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class ConvMlp(nn.Module): """ MLP using 1x1 convs that keeps spatial dims (for 2D NCHW tensors) """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() self.act = act_layer() self.drop = nn.Dropout(drop) self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x class GlobalResponseNormMlp(nn.Module): """ MLP w/ Global Response Norm (see grn.py), nn.Linear or 1x1 Conv2d NOTE: Intended for '2D' NCHW (use_conv=True) or NHWC (use_conv=False, channels-last) tensor layouts """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0., use_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.grn = GlobalResponseNorm(hidden_features, channels_last=not use_conv) self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.grn(x) x = self.fc2(x) x = self.drop2(x) return x
pytorch-image-models/timm/layers/mlp.py/0
{ "file_path": "pytorch-image-models/timm/layers/mlp.py", "repo_id": "pytorch-image-models", "token_count": 4398 }
268
""" Split BatchNorm A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through a separate BN layer. The first split is passed through the parent BN layers with weight/bias keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' namespace. This allows easily removing the auxiliary BN layers after training to efficiently achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, 'Disentangled Learning via An Auxiliary BN' Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn class SplitBatchNorm2d(torch.nn.BatchNorm2d): def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, num_splits=2): super().__init__(num_features, eps, momentum, affine, track_running_stats) assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' self.num_splits = num_splits self.aux_bn = nn.ModuleList([ nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) def forward(self, input: torch.Tensor): if self.training: # aux BN only relevant while training split_size = input.shape[0] // self.num_splits assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" split_input = input.split(split_size) x = [super().forward(split_input[0])] for i, a in enumerate(self.aux_bn): x.append(a(split_input[i + 1])) return torch.cat(x, dim=0) else: return super().forward(input) def convert_splitbn_model(module, num_splits=2): """ Recursively traverse module and its children to replace all instances of ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. Args: module (torch.nn.Module): input module num_splits: number of separate batchnorm layers to split input across Example:: >>> # model is an instance of torch.nn.Module >>> model = timm.models.convert_splitbn_model(model, num_splits=2) """ mod = module if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): return module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): mod = SplitBatchNorm2d( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, num_splits=num_splits) mod.running_mean = module.running_mean mod.running_var = module.running_var mod.num_batches_tracked = module.num_batches_tracked if module.affine: mod.weight.data = module.weight.data.clone().detach() mod.bias.data = module.bias.data.clone().detach() for aux in mod.aux_bn: aux.running_mean = module.running_mean.clone() aux.running_var = module.running_var.clone() aux.num_batches_tracked = module.num_batches_tracked.clone() if module.affine: aux.weight.data = module.weight.data.clone().detach() aux.bias.data = module.bias.data.clone().detach() for name, child in module.named_children(): mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) del module return mod
pytorch-image-models/timm/layers/split_batchnorm.py/0
{ "file_path": "pytorch-image-models/timm/layers/split_batchnorm.py", "repo_id": "pytorch-image-models", "token_count": 1394 }
269
import os from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union from urllib.parse import urlsplit from torch import nn from timm.layers import set_layer_config from ._helpers import load_checkpoint from ._hub import load_model_config_from_hf, load_model_config_from_path from ._pretrained import PretrainedCfg from ._registry import is_model, model_entrypoint, split_model_name_tag __all__ = ['parse_model_name', 'safe_model_name', 'create_model'] def parse_model_name(model_name: str) -> Tuple[Optional[str], str]: """Parse source and name from potentially prefixed model name.""" if model_name.startswith('hf_hub'): # NOTE for backwards compat, deprecate hf_hub use model_name = model_name.replace('hf_hub', 'hf-hub') parsed = urlsplit(model_name) assert parsed.scheme in ('', 'hf-hub', 'local-dir') if parsed.scheme == 'hf-hub': # FIXME may use fragment as revision, currently `@` in URI path return parsed.scheme, parsed.path elif parsed.scheme == 'local-dir': return parsed.scheme, parsed.path else: model_name = os.path.split(parsed.path)[-1] return None, model_name def safe_model_name(model_name: str, remove_source: bool = True) -> str: """Return a filename / path safe model name.""" def make_safe(name: str) -> str: return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_') if remove_source: model_name = parse_model_name(model_name)[-1] return make_safe(model_name) def create_model( model_name: str, pretrained: bool = False, pretrained_cfg: Optional[Union[str, Dict[str, Any], PretrainedCfg]] = None, pretrained_cfg_overlay: Optional[Dict[str, Any]] = None, checkpoint_path: Optional[Union[str, Path]] = None, cache_dir: Optional[Union[str, Path]] = None, scriptable: Optional[bool] = None, exportable: Optional[bool] = None, no_jit: Optional[bool] = None, **kwargs: Any, ) -> nn.Module: """Create a model. Lookup model's entrypoint function and pass relevant args to create a new model. Tip: **kwargs will be passed through entrypoint fn to ``timm.models.build_model_with_cfg()`` and then the model class __init__(). kwargs values set to None are pruned before passing. Args: model_name: Name of model to instantiate. pretrained: If set to `True`, load pretrained ImageNet-1k weights. pretrained_cfg: Pass in an external pretrained_cfg for model. pretrained_cfg_overlay: Replace key-values in base pretrained_cfg with these. checkpoint_path: Path of checkpoint to load _after_ the model is initialized. cache_dir: Override model cache dir for Hugging Face Hub and Torch checkpoints. scriptable: Set layer config so that model is jit scriptable (not working for all models yet). exportable: Set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet). no_jit: Set layer config so that model doesn't utilize jit scripted layers (so far activations only). Keyword Args: drop_rate (float): Classifier dropout rate for training. drop_path_rate (float): Stochastic depth drop rate for training. global_pool (str): Classifier global pooling type. Example: ```py >>> from timm import create_model >>> # Create a MobileNetV3-Large model with no pretrained weights. >>> model = create_model('mobilenetv3_large_100') >>> # Create a MobileNetV3-Large model with pretrained weights. >>> model = create_model('mobilenetv3_large_100', pretrained=True) >>> model.num_classes 1000 >>> # Create a MobileNetV3-Large model with pretrained weights and a new head with 10 classes. >>> model = create_model('mobilenetv3_large_100', pretrained=True, num_classes=10) >>> model.num_classes 10 >>> # Create a Dinov2 small model with pretrained weights and save weights in a custom directory. >>> model = create_model('vit_small_patch14_dinov2.lvd142m', pretrained=True, cache_dir="/data/my-models") >>> # Data will be stored at `/data/my-models/models--timm--vit_small_patch14_dinov2.lvd142m/` ``` """ # Parameters that aren't supported by all models or are intended to only override model defaults if set # should default to None in command line args/cfg. Remove them if they are present and not set so that # non-supporting models don't break and default args remain in effect. kwargs = {k: v for k, v in kwargs.items() if v is not None} model_source, model_id = parse_model_name(model_name) if model_source: assert not pretrained_cfg, 'pretrained_cfg should not be set when sourcing model from Hugging Face Hub.' if model_source == 'hf-hub': # For model names specified in the form `hf-hub:path/architecture_name@revision`, # load model weights + pretrained_cfg from Hugging Face hub. pretrained_cfg, model_name, model_args = load_model_config_from_hf( model_id, cache_dir=cache_dir, ) elif model_source == 'local-dir': pretrained_cfg, model_name, model_args = load_model_config_from_path( model_id, ) else: assert False, f'Unknown model_source {model_source}' if model_args: for k, v in model_args.items(): kwargs.setdefault(k, v) else: model_name, pretrained_tag = split_model_name_tag(model_id) if pretrained_tag and not pretrained_cfg: # a valid pretrained_cfg argument takes priority over tag in model name pretrained_cfg = pretrained_tag if not is_model(model_name): raise RuntimeError('Unknown model (%s)' % model_name) create_fn = model_entrypoint(model_name) with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit): model = create_fn( pretrained=pretrained, pretrained_cfg=pretrained_cfg, pretrained_cfg_overlay=pretrained_cfg_overlay, cache_dir=cache_dir, **kwargs, ) if checkpoint_path: load_checkpoint(model, checkpoint_path) return model
pytorch-image-models/timm/models/_factory.py/0
{ "file_path": "pytorch-image-models/timm/models/_factory.py", "repo_id": "pytorch-image-models", "token_count": 2427 }
270
""" Bring-Your-Own-Blocks Network A flexible network w/ dataclass based config for stacking those NN blocks. This model is currently used to implement the following networks: GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)). Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0 RepVGG - repvgg_* Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT MobileOne - mobileone_* Paper: `MobileOne: An Improved One millisecond Mobile Backbone` - https://arxiv.org/abs/2206.04040 Code and weights: https://github.com/apple/ml-mobileone, licensed MIT In all cases the models have been modified to fit within the design of ByobNet. I've remapped the original weights and verified accuracies. For GPU Efficient nets, I used the original names for the blocks since they were for the most part the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some changes introduced in RegNet were also present in the stem and bottleneck blocks for this model. A significant number of different network archs can be implemented here, including variants of the above nets that include attention. Hacked together by / copyright Ross Wightman, 2021. """ import math from dataclasses import dataclass, field, replace from functools import partial from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import ( ClassifierHead, NormMlpClassifierHead, ConvNormAct, BatchNormAct2d, EvoNorm2dS0a, AttentionPool2d, RotAttentionPool2d, DropPath, AvgPool2dSame, create_conv2d, get_act_layer, get_norm_act_layer, get_attn, make_divisible, to_2tuple, ) from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block'] @dataclass class ByoBlockCfg: """Block configuration for Bring-Your-Own-Blocks. Defines configuration for a single block or stage of blocks. """ type: Union[str, nn.Module] d: int # block depth (number of block repeats in stage) c: int # number of output channels for each block in stage s: int = 2 # stride of stage (first block) gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1 br: float = 1. # bottleneck-ratio of blocks in stage # NOTE: these config items override the model cfgs that are applied to all blocks by default attn_layer: Optional[str] = None attn_kwargs: Optional[Dict[str, Any]] = None self_attn_layer: Optional[str] = None self_attn_kwargs: Optional[Dict[str, Any]] = None block_kwargs: Optional[Dict[str, Any]] = None @dataclass class ByoModelCfg: """Model configuration for Bring-Your-Own-Blocks network. Defines overall architecture configuration. """ blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...] downsample: str = 'conv1x1' stem_type: str = '3x3' stem_pool: Optional[str] = 'maxpool' stem_chs: Union[int, List[int], Tuple[int, ...]] = 32 width_factor: float = 1.0 num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0 zero_init_last: bool = True # zero init last weight (usually bn) in residual path fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation # layer config act_layer: str = 'relu' norm_layer: str = 'batchnorm' aa_layer: str = '' # Head config head_hidden_size: Optional[int] = None # feat dim of MLP head or AttentionPool output head_type: str = 'classifier' # Block config # NOTE: these config items will be overridden by the block cfg (per-block) if they are set there attn_layer: Optional[str] = None attn_kwargs: dict = field(default_factory=lambda: dict()) self_attn_layer: Optional[str] = None self_attn_kwargs: dict = field(default_factory=lambda: dict()) block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict()) def _rep_vgg_bcfg(d: Tuple[int, ...] = (4, 6, 16, 1), wf: Tuple[float, ...] = (1., 1., 1., 1.), groups: int = 0) -> \ Tuple[ByoBlockCfg, ...]: """Create RepVGG block configuration. Args: d: Depth (number of blocks) per stage. wf: Width factor per stage. groups: Number of groups for grouped convolution. Returns: Tuple of block configurations. """ c = (64, 128, 256, 512) group_size = 0 if groups > 0: group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0 bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)]) return bcfg def _mobileone_bcfg( d: Tuple[int, ...] = (2, 8, 10, 1), wf: Tuple[float, ...] = (1., 1., 1., 1.), se_blocks: Tuple[int, ...] = (), num_conv_branches: int = 1 ) -> List[List[ByoBlockCfg]]: """Create MobileOne block configuration. Args: d: Depth (number of blocks) per stage. wf: Width factor per stage. se_blocks: Number of SE blocks per stage. num_conv_branches: Number of conv branches. Returns: List of block configurations per stage. """ c = (64, 128, 256, 512) prev_c = min(64, c[0] * wf[0]) se_blocks = se_blocks or (0,) * len(d) bcfg = [] for d, c, w, se in zip(d, c, wf, se_blocks): scfg = [] for i in range(d): out_c = c * w bk = dict(num_conv_branches=num_conv_branches) ak = {} if i >= d - se: ak['attn_layer'] = 'se' scfg += [ByoBlockCfg(type='one', d=1, c=prev_c, gs=1, block_kwargs=bk, **ak)] # depthwise block scfg += [ByoBlockCfg( type='one', d=1, c=out_c, gs=0, block_kwargs=dict(kernel_size=1, **bk), **ak)] # pointwise block prev_c = out_c bcfg += [scfg] return bcfg def interleave_blocks( types: Tuple[str, str], d: int, every: Union[int, List[int]] = 1, first: bool = False, **kwargs, ) -> Tuple[ByoBlockCfg, ...]: """Interleave 2 block types in stack. Args: types: Two block type names to interleave. d: Total depth of blocks. every: Interval for alternating blocks. first: Whether to start with alternate block. **kwargs: Additional block arguments. Returns: Tuple of interleaved block configurations. """ assert len(types) == 2 if isinstance(every, int): every = list(range(0 if first else every, d, every + 1)) if not every: every = [d - 1] set(every) blocks = [] for i in range(d): block_type = types[1] if i in every else types[0] blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)] return tuple(blocks) def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]: """Expand block config into individual block instances. Args: stage_blocks_cfg: Block configuration(s) for a stage. Returns: List of individual block configurations. """ if not isinstance(stage_blocks_cfg, Sequence): stage_blocks_cfg = (stage_blocks_cfg,) block_cfgs = [] for i, cfg in enumerate(stage_blocks_cfg): block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)] return block_cfgs def num_groups(group_size: Optional[int], channels: int) -> int: """Calculate number of groups for grouped convolution. Args: group_size: Size of each group (1 for depthwise). channels: Number of channels. Returns: Number of groups. """ if not group_size: # 0 or None return 1 # normal conv with 1 group else: # NOTE group_size == 1 -> depthwise conv assert channels % group_size == 0 return channels // group_size @dataclass class LayerFn: """Container for layer factory functions.""" conv_norm_act: Callable = ConvNormAct norm_act: Callable = BatchNormAct2d act: Callable = nn.ReLU attn: Optional[Callable] = None self_attn: Optional[Callable] = None class DownsampleAvg(nn.Module): """Average pool downsampling module. AvgPool Downsampling as in 'D' ResNet variants. """ def __init__( self, in_chs: int, out_chs: int, stride: int = 1, dilation: int = 1, apply_act: bool = False, layers: Optional[LayerFn] = None, ): """Initialize DownsampleAvg. Args: in_chs: Number of input channels. out_chs: Number of output channels. stride: Stride for downsampling. dilation: Dilation rate. apply_act: Whether to apply activation. layers: Layer factory functions. """ super(DownsampleAvg, self).__init__() layers = layers or LayerFn() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x: Input tensor. Returns: Output tensor. """ return self.conv(self.pool(x)) def create_shortcut( downsample_type: str, in_chs: int, out_chs: int, stride: int, dilation: Tuple[int, int], layers: LayerFn, **kwargs, ) -> Optional[nn.Module]: """Create shortcut connection for residual blocks. Args: downsample_type: Type of downsampling ('avg', 'conv1x1', or ''). in_chs: Input channels. out_chs: Output channels. stride: Stride for downsampling. dilation: Dilation rates. layers: Layer factory functions. **kwargs: Additional arguments. Returns: Shortcut module or None. """ assert downsample_type in ('avg', 'conv1x1', '') if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: if not downsample_type: return None # no shortcut elif downsample_type == 'avg': return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs) else: return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs) else: return nn.Identity() # identity shortcut class BasicBlock(nn.Module): """ ResNet Basic Block - kxk + kxk """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), group_size: Optional[int] = None, bottle_ratio: float = 1.0, downsample: str = 'avg', attn_last: bool = True, linear_out: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(BasicBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0]) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_kxk = layers.conv_norm_act( mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_layer=drop_block, apply_act=False, ) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2_kxk.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_kxk(x) x = self.attn(x) x = self.conv2_kxk(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class BottleneckBlock(nn.Module): """ ResNet-like Bottleneck Block - 1x1 - kxk - 1x1 """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1., group_size: Optional[int] = None, downsample: str = 'avg', attn_last: bool = False, linear_out: bool = False, extra_conv: bool = False, bottle_in: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(BottleneckBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) self.conv2_kxk = layers.conv_norm_act( mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, ) if extra_conv: self.conv2b_kxk = layers.conv_norm_act( mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups) else: self.conv2b_kxk = nn.Identity() self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None: nn.init.zeros_(self.conv3_1x1.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.conv2_kxk(x) x = self.conv2b_kxk(x) x = self.attn(x) x = self.conv3_1x1(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class DarkBlock(nn.Module): """ DarkNet-like (1x1 + 3x3 w/ stride) block The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models. This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats). If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1) for more optimal compute. """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, group_size: Optional[int] = None, downsample: str = 'avg', attn_last: bool = True, linear_out: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(DarkBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_kxk = layers.conv_norm_act( mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False, ) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv2_kxk.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2_kxk.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.attn(x) x = self.conv2_kxk(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class EdgeBlock(nn.Module): """ EdgeResidual-like (3x3 + 1x1) block A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed. Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs. FIXME is there a more common 3x3 + 1x1 conv block to name this after? """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, group_size: Optional[int] = None, downsample: str = 'avg', attn_last: bool = False, linear_out: bool = False, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(EdgeBlock, self).__init__() layers = layers or LayerFn() mid_chs = make_divisible(out_chs * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_kxk = layers.conv_norm_act( in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, ) self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv2_1x1.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2_1x1.bn.weight) for attn in (self.attn, self.attn_last): if hasattr(attn, 'reset_parameters'): attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_kxk(x) x = self.attn(x) x = self.conv2_1x1(x) x = self.attn_last(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) class RepVggBlock(nn.Module): """ RepVGG Block. Adapted from impl at https://github.com/DingXiaoH/RepVGG """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, group_size: Optional[int] = None, downsample: str = '', layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., inference_mode: bool = False ): super(RepVggBlock, self).__init__() self.groups = groups = num_groups(group_size, in_chs) layers = layers or LayerFn() if inference_mode: self.reparam_conv = nn.Conv2d( in_channels=in_chs, out_channels=out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=groups, bias=True, ) else: self.reparam_conv = None use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None self.conv_kxk = layers.conv_norm_act( in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, apply_act=False, ) self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) self.act = layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): # NOTE this init overrides that base model init with specific changes for the block type for m in self.modules(): if isinstance(m, nn.BatchNorm2d): nn.init.normal_(m.weight, .1, .1) nn.init.normal_(m.bias, 0, .1) if hasattr(self.attn, 'reset_parameters'): self.attn.reset_parameters() def forward(self, x): if self.reparam_conv is not None: return self.act(self.attn(self.reparam_conv(x))) if self.identity is None: x = self.conv_1x1(x) + self.conv_kxk(x) else: identity = self.identity(x) x = self.conv_1x1(x) + self.conv_kxk(x) x = self.drop_path(x) # not in the paper / official impl, experimental x += identity x = self.attn(x) # no attn in the paper / official impl, experimental return self.act(x) def reparameterize(self): """ Following works like `RepVGG: Making VGG-style ConvNets Great Again` - https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched architecture used at training time to obtain a plain CNN-like structure for inference. """ if self.reparam_conv is not None: return kernel, bias = self._get_kernel_bias() self.reparam_conv = nn.Conv2d( in_channels=self.conv_kxk.conv.in_channels, out_channels=self.conv_kxk.conv.out_channels, kernel_size=self.conv_kxk.conv.kernel_size, stride=self.conv_kxk.conv.stride, padding=self.conv_kxk.conv.padding, dilation=self.conv_kxk.conv.dilation, groups=self.conv_kxk.conv.groups, bias=True, ) self.reparam_conv.weight.data = kernel self.reparam_conv.bias.data = bias # Delete un-used branches for name, para in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('conv_kxk') self.__delattr__('conv_1x1') self.__delattr__('identity') self.__delattr__('drop_path') def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to obtain re-parameterized kernel and bias. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 """ # get weights and bias of scale branch kernel_1x1 = 0 bias_1x1 = 0 if self.conv_1x1 is not None: kernel_1x1, bias_1x1 = self._fuse_bn_tensor(self.conv_1x1) # Pad scale branch kernel to match conv branch kernel size. pad = self.conv_kxk.conv.kernel_size[0] // 2 kernel_1x1 = torch.nn.functional.pad(kernel_1x1, [pad, pad, pad, pad]) # get weights and bias of skip branch kernel_identity = 0 bias_identity = 0 if self.identity is not None: kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) # get weights and bias of conv branches kernel_conv, bias_conv = self._fuse_bn_tensor(self.conv_kxk) kernel_final = kernel_conv + kernel_1x1 + kernel_identity bias_final = bias_conv + bias_1x1 + bias_identity return kernel_final, bias_final def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to fuse batchnorm layer with preceding conv layer. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 """ if isinstance(branch, ConvNormAct): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps else: assert isinstance(branch, nn.BatchNorm2d) if not hasattr(self, 'id_tensor'): in_chs = self.conv_kxk.conv.in_channels input_dim = in_chs // self.groups kernel_size = self.conv_kxk.conv.kernel_size kernel_value = torch.zeros_like(self.conv_kxk.conv.weight) for i in range(in_chs): kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1 self.id_tensor = kernel_value kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std class MobileOneBlock(nn.Module): """ MobileOne building block. This block has a multi-branched architecture at train-time and plain-CNN style architecture at inference time For more details, please refer to our paper: `An Improved One millisecond Mobile Backbone` - https://arxiv.org/pdf/2206.04040.pdf """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1.0, # unused group_size: Optional[int] = None, downsample: str = '', # unused inference_mode: bool = False, num_conv_branches: int = 1, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ) -> None: """ Construct a MobileOneBlock module. """ super(MobileOneBlock, self).__init__() self.num_conv_branches = num_conv_branches self.groups = groups = num_groups(group_size, in_chs) layers = layers or LayerFn() if inference_mode: self.reparam_conv = nn.Conv2d( in_channels=in_chs, out_channels=out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, groups=groups, bias=True) else: self.reparam_conv = None # Re-parameterizable skip connection use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None # Re-parameterizable conv branches convs = [] for _ in range(self.num_conv_branches): convs.append(layers.conv_norm_act( in_chs, out_chs, kernel_size=kernel_size, stride=stride, groups=groups, apply_act=False)) self.conv_kxk = nn.ModuleList(convs) # Re-parameterizable scale branch self.conv_scale = None if kernel_size > 1: self.conv_scale = layers.conv_norm_act( in_chs, out_chs, kernel_size=1, stride=stride, groups=groups, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) self.act = layers.act(inplace=True) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Apply forward pass. """ # Inference mode forward pass. if self.reparam_conv is not None: return self.act(self.attn(self.reparam_conv(x))) # Multi-branched train-time forward pass. # Skip branch output identity_out = 0 if self.identity is not None: identity_out = self.identity(x) # Scale branch output scale_out = 0 if self.conv_scale is not None: scale_out = self.conv_scale(x) # Other branches out = scale_out for ck in self.conv_kxk: out += ck(x) out = self.drop_path(out) out += identity_out return self.act(self.attn(out)) def reparameterize(self): """ Following works like `RepVGG: Making VGG-style ConvNets Great Again` - https://arxiv.org/pdf/2101.03697.pdf. We re-parameterize multi-branched architecture used at training time to obtain a plain CNN-like structure for inference. """ if self.reparam_conv is not None: return kernel, bias = self._get_kernel_bias() self.reparam_conv = nn.Conv2d( in_channels=self.conv_kxk[0].conv.in_channels, out_channels=self.conv_kxk[0].conv.out_channels, kernel_size=self.conv_kxk[0].conv.kernel_size, stride=self.conv_kxk[0].conv.stride, padding=self.conv_kxk[0].conv.padding, dilation=self.conv_kxk[0].conv.dilation, groups=self.conv_kxk[0].conv.groups, bias=True) self.reparam_conv.weight.data = kernel self.reparam_conv.bias.data = bias # Delete un-used branches for name, para in self.named_parameters(): if 'reparam_conv' in name: continue para.detach_() self.__delattr__('conv_kxk') self.__delattr__('conv_scale') self.__delattr__('identity') self.__delattr__('drop_path') def _get_kernel_bias(self) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to obtain re-parameterized kernel and bias. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L83 """ # get weights and bias of scale branch kernel_scale = 0 bias_scale = 0 if self.conv_scale is not None: kernel_scale, bias_scale = self._fuse_bn_tensor(self.conv_scale) # Pad scale branch kernel to match conv branch kernel size. pad = self.conv_kxk[0].conv.kernel_size[0] // 2 kernel_scale = torch.nn.functional.pad(kernel_scale, [pad, pad, pad, pad]) # get weights and bias of skip branch kernel_identity = 0 bias_identity = 0 if self.identity is not None: kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) # get weights and bias of conv branches kernel_conv = 0 bias_conv = 0 for ix in range(self.num_conv_branches): _kernel, _bias = self._fuse_bn_tensor(self.conv_kxk[ix]) kernel_conv += _kernel bias_conv += _bias kernel_final = kernel_conv + kernel_scale + kernel_identity bias_final = bias_conv + bias_scale + bias_identity return kernel_final, bias_final def _fuse_bn_tensor(self, branch) -> Tuple[torch.Tensor, torch.Tensor]: """ Method to fuse batchnorm layer with preceding conv layer. Reference: https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py#L95 """ if isinstance(branch, ConvNormAct): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps else: assert isinstance(branch, nn.BatchNorm2d) if not hasattr(self, 'id_tensor'): in_chs = self.conv_kxk[0].conv.in_channels input_dim = in_chs // self.groups kernel_size = self.conv_kxk[0].conv.kernel_size kernel_value = torch.zeros_like(self.conv_kxk[0].conv.weight) for i in range(in_chs): kernel_value[i, i % input_dim, kernel_size[0] // 2, kernel_size[1] // 2] = 1 self.id_tensor = kernel_value kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std class SelfAttnBlock(nn.Module): """ ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1 """ def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, stride: int = 1, dilation: Tuple[int, int] = (1, 1), bottle_ratio: float = 1., group_size: Optional[int] = None, downsample: str = 'avg', extra_conv: bool = False, linear_out: bool = False, bottle_in: bool = False, post_attn_na: bool = True, feat_size: Optional[Tuple[int, int]] = None, layers: LayerFn = None, drop_block: Callable = None, drop_path_rate: float = 0., ): super(SelfAttnBlock, self).__init__() assert layers is not None mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) groups = num_groups(group_size, mid_chs) self.shortcut = create_shortcut( downsample, in_chs, out_chs, stride=stride, dilation=dilation, apply_act=False, layers=layers, ) self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) if extra_conv: self.conv2_kxk = layers.conv_norm_act( mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, ) stride = 1 # striding done via conv if enabled else: self.conv2_kxk = nn.Identity() opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size) # FIXME need to dilate self attn to have dilated network support, moop moop self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs) self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity() self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() self.act = nn.Identity() if linear_out else layers.act(inplace=True) def init_weights(self, zero_init_last: bool = False): if zero_init_last and self.shortcut is not None and getattr(self.conv3_1x1.bn, 'weight', None) is not None: nn.init.zeros_(self.conv3_1x1.bn.weight) if hasattr(self.self_attn, 'reset_parameters'): self.self_attn.reset_parameters() def forward(self, x): shortcut = x x = self.conv1_1x1(x) x = self.conv2_kxk(x) x = self.self_attn(x) x = self.post_attn(x) x = self.conv3_1x1(x) x = self.drop_path(x) if self.shortcut is not None: x = x + self.shortcut(shortcut) return self.act(x) _block_registry = dict( basic=BasicBlock, bottle=BottleneckBlock, dark=DarkBlock, edge=EdgeBlock, rep=RepVggBlock, one=MobileOneBlock, self_attn=SelfAttnBlock, ) def register_block(block_type: str, block_fn: nn.Module): _block_registry[block_type] = block_fn def create_block(block: Union[str, nn.Module], **kwargs): if isinstance(block, (nn.Module, partial)): return block(**kwargs) assert block in _block_registry, f'Unknown block type ({block}' return _block_registry[block](**kwargs) class Stem(nn.Sequential): def __init__( self, in_chs: int, out_chs: Union[int, List[int], Tuple[int, ...]], kernel_size: int = 3, stride: int = 4, pool: str = 'maxpool', num_rep: int = 3, num_act: Optional[int] = None, chs_decay: float = 0.5, layers: LayerFn = None, ): super().__init__() assert stride in (2, 4) layers = layers or LayerFn() if isinstance(out_chs, (list, tuple)): num_rep = len(out_chs) stem_chs = out_chs else: stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1] self.stride = stride self.feature_info = [] # track intermediate features prev_feat = '' stem_strides = [2] + [1] * (num_rep - 1) if stride == 4 and not pool: # set last conv in stack to be strided if stride == 4 and no pooling layer stem_strides[-1] = 2 num_act = num_rep if num_act is None else num_act # if num_act < num_rep, first convs in stack won't have bn + act stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act prev_chs = in_chs curr_stride = 1 last_feat_idx = -1 for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)): layer_fn = layers.conv_norm_act if na else create_conv2d conv_name = f'conv{i + 1}' if i > 0 and s > 1: last_feat_idx = i - 1 self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s)) prev_chs = ch curr_stride *= s prev_feat = conv_name if pool: pool = pool.lower() assert pool in ('max', 'maxpool', 'avg', 'avgpool', 'max2', 'avg2') last_feat_idx = i self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) if pool == 'max2': self.add_module('pool', nn.MaxPool2d(2)) elif pool == 'avg2': self.add_module('pool', nn.AvgPool2d(2)) elif 'max' in pool: self.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) elif 'avg' in pool: self.add_module('pool', nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False)) curr_stride *= 2 prev_feat = 'pool' self.last_feat_idx = last_feat_idx if last_feat_idx >= 0 else None self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat, stage=0)) assert curr_stride == stride def forward_intermediates(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: intermediate: Optional[torch.Tensor] = None for i, m in enumerate(self): x = m(x) if self.last_feat_idx is not None and i == self.last_feat_idx: intermediate = x return x, intermediate def create_byob_stem( in_chs: int, out_chs: int, stem_type: str = '', pool_type: str = '', feat_prefix: str = 'stem', layers: LayerFn = None, ): layers = layers or LayerFn() assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', 'one', '7x7', '3x3') if 'quad' in stem_type: # based on NFNet stem, stack of 4 3x3 convs num_act = 2 if 'quad2' in stem_type else None stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers) elif 'tiered' in stem_type: # 3x3 stack of 3 convs as in my ResNet-T stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers) elif 'deep' in stem_type: # 3x3 stack of 3 convs as in ResNet-D stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers) elif 'rep' in stem_type: stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers) elif 'one' in stem_type: stem = MobileOneBlock(in_chs, out_chs, kernel_size=3, stride=2, layers=layers) elif '7x7' in stem_type: # 7x7 stem conv as in ResNet if pool_type: stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers) else: stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2) else: if isinstance(out_chs, (tuple, list)): stem = Stem(in_chs, out_chs, 3, pool=pool_type, layers=layers) else: # 3x3 stem conv as in RegNet is the default if pool_type: stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers) else: stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2) if isinstance(stem, Stem): feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info] else: feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix, stage=0)] return stem, feature_info def reduce_feat_size(feat_size, stride=2): return None if feat_size is None else tuple([s // stride for s in feat_size]) def override_kwargs(block_kwargs, model_kwargs): """ Override model level attn/self-attn/block kwargs w/ block level NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs for the block if set to anything that isn't None. i.e. an empty block_kwargs dict will remove kwargs set at model level for that block """ out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs return out_kwargs or {} # make sure None isn't returned def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ): layer_fns = block_kwargs['layers'] # override attn layer / args with block local config attn_set = block_cfg.attn_layer is not None if attn_set or block_cfg.attn_kwargs is not None: # override attn layer config if attn_set and not block_cfg.attn_layer: # empty string for attn_layer type will disable attn for this block attn_layer = None else: attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs) attn_layer = block_cfg.attn_layer or model_cfg.attn_layer attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None layer_fns = replace(layer_fns, attn=attn_layer) # override self-attn layer / args with block local cfg self_attn_set = block_cfg.self_attn_layer is not None if self_attn_set or block_cfg.self_attn_kwargs is not None: # override attn layer config if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == '' # empty string for self_attn_layer type will disable attn for this block self_attn_layer = None else: self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs) self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \ if self_attn_layer is not None else None layer_fns = replace(layer_fns, self_attn=self_attn_layer) block_kwargs['layers'] = layer_fns # add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs)) def create_byob_stages( cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any], feat_size: Optional[int] = None, layers: Optional[LayerFn] = None, block_kwargs_fn: Optional[Callable] = update_block_kwargs, ): layers = layers or LayerFn() feature_info = [] block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks] depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs] dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] dilation = 1 net_stride = stem_feat['reduction'] prev_chs = stem_feat['num_chs'] prev_feat = stem_feat stages = [] for stage_idx, stage_block_cfgs in enumerate(block_cfgs): stride = stage_block_cfgs[0].s if stride != 1 and prev_feat: feature_info.append(prev_feat) if net_stride >= output_stride and stride > 1: dilation *= stride stride = 1 net_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 blocks = [] for block_idx, block_cfg in enumerate(stage_block_cfgs): out_chs = make_divisible(block_cfg.c * cfg.width_factor) group_size = block_cfg.gs if isinstance(group_size, Callable): group_size = group_size(out_chs, block_idx) block_kwargs = dict( # Blocks used in this model must accept these arguments in_chs=prev_chs, out_chs=out_chs, stride=stride if block_idx == 0 else 1, dilation=(first_dilation, dilation), group_size=group_size, bottle_ratio=block_cfg.br, downsample=cfg.downsample, drop_path_rate=dpr[stage_idx][block_idx], layers=layers, ) if block_cfg.type in ('self_attn',): # add feat_size arg for blocks that support/need it block_kwargs['feat_size'] = feat_size block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg) blocks += [create_block(block_cfg.type, **block_kwargs)] first_dilation = dilation prev_chs = out_chs if stride > 1 and block_idx == 0: feat_size = reduce_feat_size(feat_size, stride) stages += [nn.Sequential(*blocks)] prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}', stage=stage_idx + 1) feature_info.append(prev_feat) return nn.Sequential(*stages), feature_info, feat_size def get_layer_fns(cfg: ByoModelCfg, allow_aa: bool = True): act = get_act_layer(cfg.act_layer) norm_act = get_norm_act_layer(norm_layer=cfg.norm_layer, act_layer=act) if cfg.aa_layer and allow_aa: conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act, aa_layer=cfg.aa_layer) else: conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act) attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn) return layer_fn class ByobNet(nn.Module): """Bring-your-own-blocks Network. A flexible network backbone that allows building model stem + blocks via dataclass cfg definition w/ factory functions for module instantiation. Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act). """ def __init__( self, cfg: ByoModelCfg, num_classes: int = 1000, in_chans: int = 3, global_pool: Optional[str] = None, output_stride: int = 32, img_size: Optional[Union[int, Tuple[int, int]]] = None, drop_rate: float = 0., drop_path_rate: float = 0., zero_init_last: bool = True, **kwargs, ): """ Args: cfg: Model architecture configuration. num_classes: Number of classifier classes. in_chans: Number of input channels. global_pool: Global pooling type. output_stride: Output stride of network, one of (8, 16, 32). img_size: Image size for fixed image size models (i.e. self-attn). drop_rate: Classifier dropout rate. drop_path_rate: Stochastic depth drop-path rate. zero_init_last: Zero-init last weight of residual path. **kwargs: Extra kwargs overlayed onto cfg. """ super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False cfg = replace(cfg, **kwargs) # overlay kwargs onto cfg stem_layers = get_layer_fns(cfg, allow_aa=False) # keep aa off for stem-layers stage_layers = get_layer_fns(cfg) if cfg.fixed_input_size: assert img_size is not None, 'img_size argument is required for fixed input size model' feat_size = to_2tuple(img_size) if img_size is not None else None self.feature_info = [] if isinstance(cfg.stem_chs, (list, tuple)): stem_chs = [int(round(c * cfg.width_factor)) for c in cfg.stem_chs] else: stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor)) self.stem, stem_feat = create_byob_stem( in_chs=in_chans, out_chs=stem_chs, stem_type=cfg.stem_type, pool_type=cfg.stem_pool, layers=stem_layers, ) self.feature_info.extend(stem_feat[:-1]) feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction']) self.stages, stage_feat, feat_size = create_byob_stages( cfg, drop_path_rate, output_stride, stem_feat[-1], layers=stage_layers, feat_size=feat_size, ) self.feature_info.extend(stage_feat[:-1]) reduction = stage_feat[-1]['reduction'] prev_chs = stage_feat[-1]['num_chs'] if cfg.num_features: self.num_features = int(round(cfg.width_factor * cfg.num_features)) self.final_conv = stage_layers.conv_norm_act(prev_chs, self.num_features, 1) else: self.num_features = prev_chs self.final_conv = nn.Identity() self.feature_info += [ dict(num_chs=self.num_features, reduction=reduction, module='final_conv', stage=len(self.stages))] self.stage_ends = [f['stage'] for f in self.feature_info] self.head_hidden_size = self.num_features assert cfg.head_type in ('', 'classifier', 'mlp', 'attn_abs', 'attn_rot') if cfg.head_type == 'mlp': if global_pool is None: global_pool = 'avg' self.head = NormMlpClassifierHead( self.num_features, num_classes, hidden_size=cfg.head_hidden_size, pool_type=global_pool, norm_layer=cfg.norm_layer, act_layer=cfg.act_layer, drop_rate=self.drop_rate, ) self.head_hidden_size = self.head.hidden_size elif cfg.head_type == 'attn_abs': if global_pool is None: global_pool = 'token' assert global_pool in ('', 'token') self.head = AttentionPool2d( self.num_features, embed_dim=cfg.head_hidden_size, out_features=num_classes, feat_size=feat_size, pool_type=global_pool, drop_rate=self.drop_rate, qkv_separate=True, ) self.head_hidden_size = self.head.embed_dim elif cfg.head_type == 'attn_rot': if global_pool is None: global_pool = 'token' assert global_pool in ('', 'token') self.head = RotAttentionPool2d( self.num_features, embed_dim=cfg.head_hidden_size, out_features=num_classes, ref_feat_size=feat_size, pool_type=global_pool, drop_rate=self.drop_rate, qkv_separate=True, ) self.head_hidden_size = self.head.embed_dim else: if global_pool is None: global_pool = 'avg' assert cfg.head_hidden_size is None self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) self.global_pool = global_pool # init weights named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Any]: """Group matcher for parameter groups. Args: coarse: Whether to use coarse grouping. Returns: Dictionary mapping group names to patterns. """ matcher = dict( stem=r'^stem', blocks=[ (r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None), (r'^final_conv', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing. Args: enable: Whether to enable gradient checkpointing. """ self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: """Get classifier module. Returns: Classifier module. """ return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: """Reset classifier. Args: num_classes: Number of classes for new classifier. global_pool: Global pooling type. """ self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, exclude_final_conv: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features exclude_final_conv: Exclude final_conv from last intermediate Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] # forward pass feat_idx = 0 # stem is index 0 if hasattr(self.stem, 'forward_intermediates'): # returns last intermediate features in stem (before final stride in stride > 2 stems) x, x_inter = self.stem.forward_intermediates(x) else: x, x_inter = self.stem(x), None if feat_idx in take_indices: intermediates.append(x if x_inter is None else x_inter) last_idx = self.stage_ends[-1] if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index] for stage in stages: feat_idx += 1 if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(stage, x) else: x = stage(x) if not exclude_final_conv and feat_idx == last_idx: # default feature_info for this model uses final_conv as the last feature output (if present) x = self.final_conv(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates if exclude_final_conv and feat_idx == last_idx: x = self.final_conv(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ) -> List[int]: """Prune layers not required for specified intermediates. Args: indices: Indices of intermediate layers to keep. prune_norm: Whether to prune normalization layer. prune_head: Whether to prune the classifier head. Returns: List of indices that were kept. """ take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0 if max_index < self.stage_ends[-1]: self.final_conv = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: """Forward pass through feature extraction. Args: x: Input tensor. Returns: Feature tensor. """ x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.final_conv(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: """Forward pass through head. Args: x: Input features. pre_logits: Return features before final linear layer. Returns: Classification logits or features. """ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x: Input tensor. Returns: Output logits. """ x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module: nn.Module, name: str = '', zero_init_last: bool = False) -> None: """Initialize weights. Args: module: Module to initialize. name: Module name. zero_init_last: Zero-initialize last layer. """ if isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Linear): nn.init.normal_(module.weight, mean=0.0, std=0.01) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.BatchNorm2d): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights(zero_init_last=zero_init_last) model_cfgs = dict( gernet_l=ByoModelCfg( blocks=( ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.), ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.), ), stem_chs=32, stem_pool=None, num_features=2560, ), gernet_m=ByoModelCfg( blocks=( ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.), ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.), ), stem_chs=32, stem_pool=None, num_features=2560, ), gernet_s=ByoModelCfg( blocks=( ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.), ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.), ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4), ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.), ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.), ), stem_chs=13, stem_pool=None, num_features=1920, ), repvgg_a0=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(0.75, 0.75, 0.75, 2.5)), stem_type='rep', stem_chs=48, ), repvgg_a1=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1, 1, 1, 2.5)), stem_type='rep', stem_chs=64, ), repvgg_a2=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)), stem_type='rep', stem_chs=64, ), repvgg_b0=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)), stem_type='rep', stem_chs=64, ), repvgg_b1=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)), stem_type='rep', stem_chs=64, ), repvgg_b1g4=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4), stem_type='rep', stem_chs=64, ), repvgg_b2=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)), stem_type='rep', stem_chs=64, ), repvgg_b2g4=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4), stem_type='rep', stem_chs=64, ), repvgg_b3=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)), stem_type='rep', stem_chs=64, ), repvgg_b3g4=ByoModelCfg( blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4), stem_type='rep', stem_chs=64, ), repvgg_d2se=ByoModelCfg( blocks=_rep_vgg_bcfg(d=(8, 14, 24, 1), wf=(2.5, 2.5, 2.5, 5.)), stem_type='rep', stem_chs=64, attn_layer='se', attn_kwargs=dict(rd_ratio=0.0625, rd_divisor=1), ), # 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks # DW convs in last block, 2048 pre-FC, silu act resnet51q=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), ), stem_chs=128, stem_type='quad2', stem_pool=None, num_features=2048, act_layer='silu', ), # 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks # DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act resnet61q=ByoModelCfg( blocks=( ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), ), stem_chs=128, stem_type='quad', stem_pool=None, num_features=2048, act_layer='silu', block_kwargs=dict(extra_conv=True), ), # A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act, # and a tiered stem w/ maxpool resnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', ), gcresnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='gca', ), seresnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='se', ), eca_resnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='eca', ), bat_resnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='bat', attn_kwargs=dict(block_size=8) ), # ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool resnet32ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=0, act_layer='silu', ), # ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool resnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', ), # A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat # and a tiered stem w/ no maxpool gcresnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='gca', ), seresnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='se', ), eca_resnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', num_features=1280, act_layer='silu', attn_layer='eca', ), gcresnet50t=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='', attn_layer='gca', ), gcresnext50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='gca', ), # experimental models, closer to a RegNetZ than a ResNet. Similar to EfficientNets but w/ groups instead of DW regnetz_b16=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_c16=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_d32=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=32, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=32, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=32, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=32, br=4), ), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=1792, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_d8=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), ), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=1792, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_e8=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=96, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=8, c=192, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=16, c=384, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=8, br=4), ), stem_chs=64, stem_type='tiered', stem_pool='', downsample='', num_features=2048, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), # experimental EvoNorm configs regnetz_b16_evos=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_c16_evos=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), regnetz_d8_evos=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), ), stem_chs=64, stem_type='deep', stem_pool='', downsample='', num_features=1792, act_layer='silu', norm_layer=partial(EvoNorm2dS0a, group_size=16), attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), ), mobileone_s0=ByoModelCfg( blocks=_mobileone_bcfg(wf=(0.75, 1.0, 1.0, 2.), num_conv_branches=4), stem_type='one', stem_chs=48, ), mobileone_s1=ByoModelCfg( blocks=_mobileone_bcfg(wf=(1.5, 1.5, 2.0, 2.5)), stem_type='one', stem_chs=64, ), mobileone_s2=ByoModelCfg( blocks=_mobileone_bcfg(wf=(1.5, 2.0, 2.5, 4.0)), stem_type='one', stem_chs=64, ), mobileone_s3=ByoModelCfg( blocks=_mobileone_bcfg(wf=(2.0, 2.5, 3.0, 4.0)), stem_type='one', stem_chs=64, ), mobileone_s4=ByoModelCfg( blocks=_mobileone_bcfg(wf=(3.0, 3.5, 3.5, 4.0), se_blocks=(0, 0, 5, 1)), stem_type='one', stem_chs=64, ), resnet50_clip=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), ), stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs', ), resnet101_clip=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=23, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), ), stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs', ), resnet50x4_clip=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=4, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=6, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=10, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=2048, s=2, br=0.25), ), width_factor=1.25, stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs', ), resnet50x16_clip=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=6, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=8, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=18, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=8, c=2048, s=2, br=0.25), ), width_factor=1.5, stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs', ), resnet50x64_clip=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=15, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=36, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=10, c=2048, s=2, br=0.25), ), width_factor=2.0, stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_type='attn_abs', ), resnet50_mlp=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), ), stem_chs=(32, 32, 64), stem_type='', stem_pool='avg2', downsample='avg', aa_layer='avg', head_hidden_size=1024, head_type='mlp', ), test_byobnet=ByoModelCfg( blocks=( ByoBlockCfg(type='edge', d=1, c=32, s=2, gs=0, br=0.5), ByoBlockCfg(type='dark', d=1, c=64, s=2, gs=0, br=0.5), ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=32, br=0.25), ByoBlockCfg(type='bottle', d=1, c=256, s=2, gs=64, br=0.25), ), stem_chs=24, downsample='avg', stem_pool='', act_layer='relu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), ), ) for k in ('resnet50_clip', 'resnet101_clip', 'resnet50x4_clip', 'resnet50x16_clip', 'resnet50x64_clip'): model_cfgs[k + '_gap'] = replace(model_cfgs[k], head_type='classifier') def _convert_openai_clip( state_dict: Dict[str, torch.Tensor], model: ByobNet, prefix: str = 'visual.', ) -> Dict[str, torch.Tensor]: model_has_attn_pool = isinstance(model.head, (RotAttentionPool2d, AttentionPool2d)) import re def _stage_sub(m): stage_idx = int(m.group(1)) - 1 layer_idx, layer_type, layer_id = int(m.group(2)), m.group(3), int(m.group(4)) prefix_str = f'stages.{stage_idx}.{layer_idx}.' id_map = {1: 'conv1_1x1.', 2: 'conv2_kxk.', 3: 'conv3_1x1.'} suffix_str = id_map[layer_id] + layer_type return prefix_str + suffix_str def _down_sub(m): stage_idx = int(m.group(1)) - 1 layer_idx, layer_id = int(m.group(2)), int(m.group(3)) return f'stages.{stage_idx}.{layer_idx}.shortcut.' + ('conv.conv' if layer_id == 0 else 'conv.bn') out_dict = {} for k, v in state_dict.items(): if not k.startswith(prefix): continue k = re.sub(rf'{prefix}conv([0-9])', r'stem.conv\1.conv', k) k = re.sub(rf'{prefix}bn([0-9])', r'stem.conv\1.bn', k) k = re.sub(rf'{prefix}layer([0-9])\.([0-9]+)\.([a-z]+)([0-9])', _stage_sub, k) k = re.sub(rf'{prefix}layer([0-9])\.([0-9]+)\.downsample\.([0-9])', _down_sub, k) if k.startswith(f'{prefix}attnpool'): if not model_has_attn_pool: continue k = k.replace(prefix + 'attnpool', 'head') # 'attn_pool') k = k.replace('positional_embedding', 'pos_embed') k = k.replace('q_proj', 'q') k = k.replace('k_proj', 'k') k = k.replace('v_proj', 'v') k = k.replace('c_proj', 'proj') out_dict[k] = v return out_dict def checkpoint_filter_fn( state_dict: Dict[str, torch.Tensor], model: ByobNet ): if 'visual.conv1.weight' in state_dict: state_dict = _convert_openai_clip(state_dict, model) return state_dict def _create_byobnet(variant: str, pretrained: bool = False, **kwargs) -> ByobNet: """Create a ByobNet model. Args: variant: Model variant name. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: ByobNet model instance. """ return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant], pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True), **kwargs, ) def _cfg(url: str = '', **kwargs) -> Dict[str, Any]: """Create default configuration dictionary. Args: url: Model weight URL. **kwargs: Additional configuration options. Returns: Configuration dictionary. """ return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs } def _cfgr(url: str = '', **kwargs) -> Dict[str, Any]: """Create RepVGG configuration dictionary. Args: url: Model weight URL. **kwargs: Additional configuration options. Returns: Configuration dictionary. """ return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ # GPU-Efficient (ResNet) weights 'gernet_s.idstcv_in1k': _cfg(hf_hub_id='timm/'), 'gernet_m.idstcv_in1k': _cfg(hf_hub_id='timm/'), 'gernet_l.idstcv_in1k': _cfg(hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), # RepVGG weights 'repvgg_a0.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_a1.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_a2.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b0.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b1.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b1g4.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b2.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b2g4.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b3.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_b3g4.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit'), 'repvgg_d2se.rvgg_in1k': _cfg( hf_hub_id='timm/', first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv'), license='mit', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, ), # experimental ResNet configs 'resnet51q.ra2_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth', first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnet61q.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), # ResNeXt-26 models with different attention in Bottleneck blocks 'resnext26ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'seresnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_resnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'bat_resnext26ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth', min_input_size=(3, 256, 256)), # ResNet-32 / 33 models with different attention in Bottleneck blocks 'resnet32ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'seresnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_resnet33ts.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnet50t.ra2_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'gcresnext50ts.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), # custom `timm` specific RegNetZ inspired models w/ different sizing from paper 'regnetz_b16.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_b_raa-677d9606.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.94, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'regnetz_c16.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_c_rab2_256-a54bf36a.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_d32.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d_rab_256-b8073a89.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)), 'regnetz_d8.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d8_bh-afc03c55.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_e8.ra3_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_e8_bh-aace8e6e.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'regnetz_b16_evos.untrained': _cfgr( first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.95, test_input_size=(3, 288, 288)), 'regnetz_c16_evos.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_c16_evos_ch-d8311942.pth', first_conv='stem.conv', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320)), 'regnetz_d8_evos.ch_in1k': _cfgr( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_d8_evos_ch-2bc12646.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'mobileone_s0.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.875, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s1.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s2.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s3.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), 'mobileone_s4.apple_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9, first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'), ), # original attention pool head variants 'resnet50_clip.openai': _cfgr( hf_hub_id='timm/', num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), classifier='head.proj', ), 'resnet101_clip.openai': _cfgr( hf_hub_id='timm/', num_classes=512, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), classifier='head.proj', ), 'resnet50x4_clip.openai': _cfgr( hf_hub_id='timm/', num_classes=640, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 288, 288), pool_size=(9, 9), classifier='head.proj', ), 'resnet50x16_clip.openai': _cfgr( hf_hub_id='timm/', num_classes=768, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 384, 384), pool_size=(12, 12), classifier='head.proj', ), 'resnet50x64_clip.openai': _cfgr( hf_hub_id='timm/', num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 448, 448), pool_size=(14, 14), classifier='head.proj', ), 'resnet50_clip.cc12m': _cfgr( hf_hub_id='timm/', num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), classifier='head.proj', ), 'resnet50_clip.yfcc15m': _cfgr( hf_hub_id='timm/', num_classes=1024, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), classifier='head.proj', ), 'resnet101_clip.yfcc15m': _cfgr( hf_hub_id='timm/', num_classes=512, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, fixed_input_size=True, input_size=(3, 224, 224), pool_size=(7, 7), classifier='head.proj', ), # avg-pool w/ optional standard classifier head variants 'resnet50_clip_gap.openai': _cfgr( hf_hub_id='timm/', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 224, 224), pool_size=(7, 7), ), 'resnet101_clip_gap.openai': _cfgr( hf_hub_id='timm/', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 224, 224), pool_size=(7, 7), ), 'resnet50x4_clip_gap.openai': _cfgr( hf_hub_id='timm/', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 288, 288), pool_size=(9, 9), ), 'resnet50x16_clip_gap.openai': _cfgr( hf_hub_id='timm/', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), ), 'resnet50x64_clip_gap.openai': _cfgr( hf_hub_id='timm/', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 448, 448), pool_size=(14, 14), ), 'resnet50_clip_gap.cc12m': _cfgr( hf_hub_id='timm/', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 224, 224), pool_size=(7, 7), ), 'resnet50_clip_gap.yfcc15m': _cfgr( hf_hub_id='timm/', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 224, 224), pool_size=(7, 7), ), 'resnet101_clip_gap.yfcc15m': _cfgr( hf_hub_id='timm/', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 224, 224), pool_size=(7, 7), ), 'resnet50_mlp.untrained': _cfgr( input_size=(3, 256, 256), pool_size=(8, 8), ), 'test_byobnet.r160_in1k': _cfgr( hf_hub_id='timm/', first_conv='stem.conv', input_size=(3, 160, 160), crop_pct=0.95, pool_size=(5, 5), ), }) @register_model def gernet_l(pretrained=False, **kwargs) -> ByobNet: """ GEResNet-Large (GENet-Large from official impl) `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 """ return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs) @register_model def gernet_m(pretrained=False, **kwargs) -> ByobNet: """ GEResNet-Medium (GENet-Normal from official impl) `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 """ return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs) @register_model def gernet_s(pretrained=False, **kwargs) -> ByobNet: """ EResNet-Small (GENet-Small from official impl) `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 """ return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs) @register_model def repvgg_a0(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-A0 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_a0', pretrained=pretrained, **kwargs) @register_model def repvgg_a1(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-A1 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_a1', pretrained=pretrained, **kwargs) @register_model def repvgg_a2(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-A2 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs) @register_model def repvgg_b0(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B0 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs) @register_model def repvgg_b1(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B1 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs) @register_model def repvgg_b1g4(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B1g4 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs) @register_model def repvgg_b2(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B2 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs) @register_model def repvgg_b2g4(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B2g4 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs) @register_model def repvgg_b3(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B3 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs) @register_model def repvgg_b3g4(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-B3g4 `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs) @register_model def repvgg_d2se(pretrained=False, **kwargs) -> ByobNet: """ RepVGG-D2se `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 """ return _create_byobnet('repvgg_d2se', pretrained=pretrained, **kwargs) @register_model def resnet51q(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs) @register_model def resnet61q(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs) @register_model def resnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs) @register_model def gcresnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs) @register_model def seresnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs) @register_model def eca_resnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs) @register_model def bat_resnext26ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs) @register_model def resnet32ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs) @register_model def resnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs) @register_model def gcresnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs) @register_model def seresnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs) @register_model def eca_resnet33ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs) @register_model def gcresnet50t(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs) @register_model def gcresnext50ts(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs) @register_model def regnetz_b16(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_b16', pretrained=pretrained, **kwargs) @register_model def regnetz_c16(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_c16', pretrained=pretrained, **kwargs) @register_model def regnetz_d32(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_d32', pretrained=pretrained, **kwargs) @register_model def regnetz_d8(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_d8', pretrained=pretrained, **kwargs) @register_model def regnetz_e8(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_e8', pretrained=pretrained, **kwargs) @register_model def regnetz_b16_evos(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_b16_evos', pretrained=pretrained, **kwargs) @register_model def regnetz_c16_evos(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_c16_evos', pretrained=pretrained, **kwargs) @register_model def regnetz_d8_evos(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('regnetz_d8_evos', pretrained=pretrained, **kwargs) @register_model def mobileone_s0(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s0', pretrained=pretrained, **kwargs) @register_model def mobileone_s1(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s1', pretrained=pretrained, **kwargs) @register_model def mobileone_s2(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s2', pretrained=pretrained, **kwargs) @register_model def mobileone_s3(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s3', pretrained=pretrained, **kwargs) @register_model def mobileone_s4(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('mobileone_s4', pretrained=pretrained, **kwargs) @register_model def resnet50_clip(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50 CLIP image tower """ return _create_byobnet('resnet50_clip', pretrained=pretrained, **kwargs) @register_model def resnet101_clip(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-101 CLIP image tower """ return _create_byobnet('resnet101_clip', pretrained=pretrained, **kwargs) @register_model def resnet50x4_clip(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x4 CLIP image tower """ return _create_byobnet('resnet50x4_clip', pretrained=pretrained, **kwargs) @register_model def resnet50x16_clip(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x16 CLIP image tower """ return _create_byobnet('resnet50x16_clip', pretrained=pretrained, **kwargs) @register_model def resnet50x64_clip(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x64 CLIP image tower """ return _create_byobnet('resnet50x64_clip', pretrained=pretrained, **kwargs) @register_model def resnet50_clip_gap(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50 CLIP image tower w/ avg pool (no attention pool) """ return _create_byobnet('resnet50_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet101_clip_gap(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-101 CLIP image tower w/ avg pool (no attention pool) """ return _create_byobnet('resnet101_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50x4_clip_gap(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x4 CLIP image tower w/ avg pool (no attention pool) """ return _create_byobnet('resnet50x4_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50x16_clip_gap(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x16 CLIP image tower w/ avg pool (no attention pool) """ return _create_byobnet('resnet50x16_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50x64_clip_gap(pretrained=False, **kwargs) -> ByobNet: """ OpenAI Modified ResNet-50x64 CLIP image tower w/ avg pool (no attention pool) """ return _create_byobnet('resnet50x64_clip_gap', pretrained=pretrained, **kwargs) @register_model def resnet50_mlp(pretrained=False, **kwargs) -> ByobNet: """ """ return _create_byobnet('resnet50_mlp', pretrained=pretrained, **kwargs) @register_model def test_byobnet(pretrained=False, **kwargs) -> ByobNet: """ Minimal test ResNet (BYOB based) model. """ return _create_byobnet('test_byobnet', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/byobnet.py/0
{ "file_path": "pytorch-image-models/timm/models/byobnet.py", "repo_id": "pytorch-image-models", "token_count": 55216 }
271
""" The EfficientNet Family in PyTorch An implementation of EfficienNet that covers variety of related models with efficient architectures: * EfficientNet-V2 - `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 * EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 * MixNet (Small, Medium, and Large) - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 * MNasNet B1, A1 (SE), Small - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 * FBNet-C - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 * Single-Path NAS Pixel1 - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 * TinyNet - Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819 - Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch * And likely more... The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing the models and weights open source! Hacked together by / Copyright 2019, Ross Wightman """ from functools import partial from typing import Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_conv2d, create_classifier, get_norm_act_layer, LayerType, \ GroupNormAct, LayerNormAct2d, EvoNorm2dS0 from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \ round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT from ._features import FeatureInfo, FeatureHooks, feature_take_indices from ._manipulate import checkpoint_seq, checkpoint from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['EfficientNet', 'EfficientNetFeatures'] class EfficientNet(nn.Module): """EfficientNet model architecture. A flexible and performant PyTorch implementation of efficient network architectures, including: * EfficientNet-V2 Small, Medium, Large, XL & B0-B3 * EfficientNet B0-B8, L2 * EfficientNet-EdgeTPU * EfficientNet-CondConv * MixNet S, M, L, XL * MnasNet A1, B1, and small * MobileNet-V2 * FBNet C * Single-Path NAS Pixel1 * TinyNet References: - EfficientNet: https://arxiv.org/abs/1905.11946 - EfficientNetV2: https://arxiv.org/abs/2104.00298 - MixNet: https://arxiv.org/abs/1907.09595 - MnasNet: https://arxiv.org/abs/1807.11626 """ def __init__( self, block_args: BlockArgs, num_classes: int = 1000, num_features: int = 1280, in_chans: int = 3, stem_size: int = 32, stem_kernel_size: int = 3, fix_stem: bool = False, output_stride: int = 32, pad_type: str = '', act_layer: Optional[LayerType] = None, norm_layer: Optional[LayerType] = None, aa_layer: Optional[LayerType] = None, se_layer: Optional[LayerType] = None, round_chs_fn: Callable = round_channels, drop_rate: float = 0., drop_path_rate: float = 0., global_pool: str = 'avg' ) -> None: """Initialize EfficientNet model. Args: block_args: Arguments for building blocks. num_classes: Number of classifier classes. num_features: Number of features for penultimate layer. in_chans: Number of input channels. stem_size: Number of output channels in stem. stem_kernel_size: Kernel size for stem convolution. fix_stem: If True, don't scale stem channels. output_stride: Output stride of network. pad_type: Padding type. act_layer: Activation layer class. norm_layer: Normalization layer class. aa_layer: Anti-aliasing layer class. se_layer: Squeeze-and-excitation layer class. round_chs_fn: Channel rounding function. drop_rate: Dropout rate for classifier. drop_path_rate: Drop path rate for stochastic depth. global_pool: Global pooling type. """ super(EfficientNet, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, stem_kernel_size, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = builder.features self.stage_ends = [f['stage'] for f in self.feature_info] head_chs = builder.in_chs # Head + Pooling if num_features > 0: self.conv_head = create_conv2d(head_chs, num_features, 1, padding=pad_type) self.bn2 = norm_act_layer(num_features, inplace=True) self.num_features = self.head_hidden_size = num_features else: self.conv_head = nn.Identity() self.bn2 = nn.Identity() self.num_features = self.head_hidden_size = head_chs self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) efficientnet_init_weights(self) def as_sequential(self) -> nn.Sequential: """Convert model to sequential for feature extraction.""" layers = [self.conv_stem, self.bn1] layers.extend(self.blocks) layers.extend([self.conv_head, self.bn2, self.global_pool]) layers.extend([nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Union[str, List]]: """Create regex patterns for parameter groups. Args: coarse: Use coarse (stage-level) grouping. Returns: Dictionary mapping group names to regex patterns. """ return dict( stem=r'^conv_stem|bn1', blocks=[ (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), (r'conv_head|bn2', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing. Args: enable: Whether to enable gradient checkpointing. """ self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: """Get the classifier module.""" return self.classifier def reset_classifier(self, num_classes: int, global_pool: str = 'avg') -> None: """Reset the classifier head. Args: num_classes: Number of classes for new classifier. global_pool: Global pooling type. """ self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, extra_blocks: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """Forward features that returns intermediates. Args: x: Input image tensor. indices: Take last n blocks if int, all if None, select matching indices if sequence. norm: Apply norm layer to compatible intermediates. stop_early: Stop iterating over blocks when last desired intermediate hit. output_fmt: Shape of intermediate feature outputs. intermediates_only: Only return intermediate features. extra_blocks: Include outputs of all blocks and head conv in output, does not align with feature_info. Returns: List of intermediate features or tuple of (final features, intermediates). """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] if extra_blocks: take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices) else: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] # forward pass feat_idx = 0 # stem is index 0 x = self.conv_stem(x) x = self.bn1(x) if feat_idx in take_indices: intermediates.append(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index] for feat_idx, blk in enumerate(blocks, start=1): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(blk, x) else: x = blk(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates if feat_idx == self.stage_ends[-1]: x = self.conv_head(x) x = self.bn2(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, extra_blocks: bool = False, ) -> List[int]: """Prune layers not required for specified intermediates. Args: indices: Indices of intermediate layers to keep. prune_norm: Whether to prune normalization layers. prune_head: Whether to prune the classifier head. extra_blocks: Include all blocks in indexing. Returns: List of indices that were kept. """ if extra_blocks: take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices) else: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] self.blocks = self.blocks[:max_index] # truncate blocks w/ stem as idx 0 if prune_norm or max_index < len(self.blocks): self.conv_head = nn.Identity() self.bn2 = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: """Forward pass through feature extraction layers.""" x = self.conv_stem(x) x = self.bn1(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) x = self.conv_head(x) x = self.bn2(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: """Forward pass through classifier head. Args: x: Feature tensor. pre_logits: Return features before final classifier. Returns: Output tensor. """ x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.classifier(x) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" x = self.forward_features(x) x = self.forward_head(x) return x class EfficientNetFeatures(nn.Module): """ EfficientNet Feature Extractor A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation and object detection models. """ def __init__( self, block_args: BlockArgs, out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4), feature_location: str = 'bottleneck', in_chans: int = 3, stem_size: int = 32, stem_kernel_size: int = 3, fix_stem: bool = False, output_stride: int = 32, pad_type: str = '', act_layer: Optional[LayerType] = None, norm_layer: Optional[LayerType] = None, aa_layer: Optional[LayerType] = None, se_layer: Optional[LayerType] = None, round_chs_fn: Callable = round_channels, drop_rate: float = 0., drop_path_rate: float = 0., ): super(EfficientNetFeatures, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, stem_kernel_size, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, feature_location=feature_location, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = FeatureInfo(builder.features, out_indices) self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()} efficientnet_init_weights(self) # Register feature extraction hooks with FeatureHooks helper self.feature_hooks = None if feature_location != 'bottleneck': hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) self.feature_hooks = FeatureHooks(hooks, self.named_modules()) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing. Args: enable: Whether to enable gradient checkpointing. """ self.grad_checkpointing = enable def forward(self, x) -> List[torch.Tensor]: x = self.conv_stem(x) x = self.bn1(x) if self.feature_hooks is None: features = [] if 0 in self._stage_out_idx: features.append(x) # add stem out for i, b in enumerate(self.blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(b, x) else: x = b(x) if i + 1 in self._stage_out_idx: features.append(x) return features else: self.blocks(x) out = self.feature_hooks.get_output(x.device) return list(out.values()) def _create_effnet(variant, pretrained=False, **kwargs): features_mode = '' model_cls = EfficientNet kwargs_filter = None if kwargs.pop('features_only', False): if 'feature_cfg' in kwargs or 'feature_cls' in kwargs: features_mode = 'cfg' else: kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') model_cls = EfficientNetFeatures features_mode = 'cls' model = build_model_with_cfg( model_cls, variant, pretrained, features_only=features_mode == 'cfg', pretrained_strict=features_mode != 'cls', kwargs_filter=kwargs_filter, **kwargs, ) if features_mode == 'cls': model.pretrained_cfg = model.default_cfg = pretrained_cfg_for_features(model.pretrained_cfg) return model def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-a1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16_noskip'], # stage 1, 112x112 in ['ir_r2_k3_s2_e6_c24'], # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40_se0.25'], # stage 3, 28x28 in ['ir_r4_k3_s2_e6_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c112_se0.25'], # stage 5, 14x14in ['ir_r3_k5_s2_e6_c160_se0.25'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-b1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_c16_noskip'], # stage 1, 112x112 in ['ir_r3_k3_s2_e3_c24'], # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40'], # stage 3, 28x28 in ['ir_r3_k5_s2_e6_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c96'], # stage 5, 14x14in ['ir_r4_k5_s2_e6_c192'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320_noskip'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-b1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ ['ds_r1_k3_s1_c8'], ['ir_r1_k3_s2_e3_c16'], ['ir_r2_k3_s2_e6_c16'], ['ir_r4_k5_s2_e6_c32_se0.25'], ['ir_r3_k3_s1_e6_c32_se0.25'], ['ir_r3_k5_s2_e6_c88_se0.25'], ['ir_r1_k3_s1_e6_c144'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=8, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v1( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, fix_stem_head=False, head_conv=False, pretrained=False, **kwargs ): """ Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py Paper: https://arxiv.org/abs/1801.04381 """ arch_def = [ ['dsa_r1_k3_s1_c64'], ['dsa_r2_k3_s2_c128'], ['dsa_r2_k3_s2_c256'], ['dsa_r6_k3_s2_c512'], ['dsa_r2_k3_s2_c1024'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) head_features = (1024 if fix_stem_head else max(1024, round_chs_fn(1024))) if head_conv else 0 model_kwargs = dict( block_args=decode_arch_def( arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head, group_size=group_size, ), num_features=head_features, stem_size=32, fix_stem=fix_stem_head, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu6'), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v2( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, fix_stem_head=False, pretrained=False, **kwargs ): """ Generate MobileNet-V2 network Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py Paper: https://arxiv.org/abs/1801.04381 """ arch_def = [ ['ds_r1_k3_s1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k3_s2_e6_c32'], ['ir_r4_k3_s2_e6_c64'], ['ir_r3_k3_s1_e6_c96'], ['ir_r3_k3_s2_e6_c160'], ['ir_r1_k3_s1_e6_c320'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def( arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head, group_size=group_size, ), num_features=1280 if fix_stem_head else max(1280, round_chs_fn(1280)), stem_size=32, fix_stem=fix_stem_head, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu6'), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """ FBNet-C Paper: https://arxiv.org/abs/1812.03443 Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, it was used to confirm some building block details """ arch_def = [ ['ir_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], ['ir_r4_k5_s2_e6_c184'], ['ir_r1_k3_s1_e6_c352'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=16, num_features=1984, # paper suggests this, but is not 100% clear round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates the Single-Path NAS model from search targeted for Pixel1 phone. Paper: https://arxiv.org/abs/1904.02877 Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_c16_noskip'], # stage 1, 112x112 in ['ir_r3_k3_s2_e3_c24'], # stage 2, 56x56 in ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], # stage 3, 28x28 in ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], # stage 4, 14x14in ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], # stage 5, 14x14in ['ir_r4_k5_s2_e6_c192'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320_noskip'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet( variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, group_size=None, pretrained=False, **kwargs ): """Creates an EfficientNet model. Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py Paper: https://arxiv.org/abs/1905.11946 EfficientNet params name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) 'efficientnet-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5), Args: channel_multiplier: multiplier to number of channels per layer depth_multiplier: multiplier to number of repeats per stage """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_edge( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs ): """ Creates an EfficientNet-EdgeTPU model Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu """ arch_def = [ # NOTE `fc` is present to override a mismatch between stem channels and in chs not # present in other models ['er_r1_k3_s1_e4_c24_fc24_noskip'], ['er_r2_k3_s2_e8_c32'], ['er_r4_k3_s2_e8_c48'], ['ir_r5_k5_s2_e8_c96'], ['ir_r4_k5_s1_e8_c144'], ['ir_r2_k5_s2_e8_c192'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_condconv( variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs ): """Creates an EfficientNet-CondConv model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], ] # NOTE unlike official impl, this one uses `cc<x>` option where x is the base number of experts for each stage and # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'swish'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """Creates an EfficientNet-Lite model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite Paper: https://arxiv.org/abs/1905.11946 EfficientNet params name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), Args: channel_multiplier: multiplier to number of channels per layer depth_multiplier: multiplier to number of repeats per stage """ arch_def = [ ['ds_r1_k3_s1_e1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r2_k5_s2_e6_c40'], ['ir_r3_k3_s2_e6_c80'], ['ir_r3_k5_s1_e6_c112'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), num_features=1280, stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), act_layer=resolve_act_layer(kwargs, 'relu6'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_base( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs ): """ Creates an EfficientNet-V2 base model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r1_k3_s1_e1_c16_skip'], ['er_r2_k3_s2_e4_c32'], ['er_r2_k3_s2_e4_c48'], ['ir_r3_k3_s2_e4_c96_se0.25'], ['ir_r5_k3_s1_e6_c112_se0.25'], ['ir_r8_k3_s2_e6_c192_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_s( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, rw=False, pretrained=False, **kwargs ): """ Creates an EfficientNet-V2 Small model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model, before ref the impl was released. """ arch_def = [ ['cn_r2_k3_s1_e1_c24_skip'], ['er_r4_k3_s2_e4_c48'], ['er_r4_k3_s2_e4_c64'], ['ir_r6_k3_s2_e4_c128_se0.25'], ['ir_r9_k3_s1_e6_c160_se0.25'], ['ir_r15_k3_s2_e6_c256_se0.25'], ] num_features = 1280 if rw: # my original variant, based on paper figure differs from the official release arch_def[0] = ['er_r2_k3_s1_e1_c24'] arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] num_features = 1792 round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(num_features), stem_size=24, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_m( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs ): """ Creates an EfficientNet-V2 Medium model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r3_k3_s1_e1_c24_skip'], ['er_r5_k3_s2_e4_c48'], ['er_r5_k3_s2_e4_c80'], ['ir_r7_k3_s2_e4_c160_se0.25'], ['ir_r14_k3_s1_e6_c176_se0.25'], ['ir_r18_k3_s2_e6_c304_se0.25'], ['ir_r5_k3_s1_e6_c512_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=1280, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_l( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs ): """ Creates an EfficientNet-V2 Large model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r4_k3_s1_e1_c32_skip'], ['er_r7_k3_s2_e4_c64'], ['er_r7_k3_s2_e4_c96'], ['ir_r10_k3_s2_e4_c192_se0.25'], ['ir_r19_k3_s1_e6_c224_se0.25'], ['ir_r25_k3_s2_e6_c384_se0.25'], ['ir_r7_k3_s1_e6_c640_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=1280, stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_xl( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs ): """ Creates an EfficientNet-V2 Xtra-Large model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r4_k3_s1_e1_c32_skip'], ['er_r8_k3_s2_e4_c64'], ['er_r8_k3_s2_e4_c96'], ['ir_r16_k3_s2_e4_c192_se0.25'], ['ir_r24_k3_s1_e6_c256_se0.25'], ['ir_r32_k3_s2_e6_c512_se0.25'], ['ir_r8_k3_s1_e6_c640_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=1280, stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_x( variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, group_size=None, version=1, pretrained=False, **kwargs ): """Creates an EfficientNet model. Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py Paper: https://arxiv.org/abs/1905.11946 EfficientNet params name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) 'efficientnet-x-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-x-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-x-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-x-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-x-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-x-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-x-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-x-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-x-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5), Args: channel_multiplier: multiplier to number of channels per layer depth_multiplier: multiplier to number of repeats per stage """ """ if version == 1: blocks_args = [ 'r1_k3_s11_e1_i32_o16_se0.25_d1_a0', 'r2_k3_s22_e6_i16_o24_se0.25_f1_d2_a1', 'r2_k5_s22_e6_i24_o40_se0.25_f1_a1', 'r3_k3_s22_e6_i40_o80_se0.25_a0', 'r3_k5_s11_e6_i80_o112_se0.25_a0', 'r4_k5_s22_e6_i112_o192_se0.25_a0', 'r1_k3_s11_e6_i192_o320_se0.25_a0', ] elif version == 2: blocks_args = [ 'r1_k3_s11_e1_i32_o16_se0.25_d1_a0', 'r2_k3_s22_e4_i16_o24_se0.25_f1_d2_a1', 'r2_k5_s22_e4_i24_o40_se0.25_f1_a1', 'r3_k3_s22_e4_i40_o80_se0.25_a0', 'r3_k5_s11_e6_i80_o112_se0.25_a0', 'r4_k5_s22_e6_i112_o192_se0.25_a0', 'r1_k3_s11_e6_i192_o320_se0.25_a0', ] """ if version == 1: arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25_d1'], ['er_r2_k3_s2_e6_c24_se0.25_nre'], ['er_r2_k5_s2_e6_c40_se0.25_nre'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] else: arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25_d1'], ['er_r2_k3_s2_e4_c24_se0.25_nre'], ['er_r2_k5_s2_e4_c40_se0.25_nre'], ['ir_r3_k3_s2_e4_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, act_layer=resolve_act_layer(kwargs, 'silu'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a MixNet Small model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet Paper: https://arxiv.org/abs/1907.09595 """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16'], # relu # stage 1, 112x112 in ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu # stage 2, 56x56 in ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish # stage 3, 28x28 in ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish # stage 4, 14x14in ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish # stage 5, 14x14in ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def), num_features=1536, stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """Creates a MixNet Medium-Large model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet Paper: https://arxiv.org/abs/1907.09595 """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c24'], # relu # stage 1, 112x112 in ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu # stage 2, 56x56 in ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish # stage 3, 28x28 in ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish # stage 4, 14x14in ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish # stage 5, 14x14in ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=1536, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_tinynet(variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """Creates a TinyNet model. """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=max(1280, round_channels(1280, model_width, 8, None)), stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=model_width), act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_edgetpu(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Based on definitions in: https://github.com/tensorflow/models/tree/d2427a562f401c9af118e47af2f030a0a5599f55/official/projects/edgetpu/vision """ if 'edgetpu_v2' in variant: stem_size = 64 stem_kernel_size = 5 group_size = 64 num_features = 1280 act_layer = resolve_act_layer(kwargs, 'relu') def _arch_def(chs: List[int], group_size: int): return [ # stage 0, 112x112 in [f'cn_r1_k1_s1_c{chs[0]}'], # NOTE with expansion==1, official impl block ends just 1x1 pwl # stage 1, 112x112 in [f'er_r1_k3_s2_e8_c{chs[1]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[1]}'], # stage 2, 56x56 in [ f'er_r1_k3_s2_e8_c{chs[2]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}', f'er_r1_k3_s1_e4_c{chs[2]}', f'er_r1_k3_s1_e4_gs{group_size}_c{chs[2]}', ], # stage 3, 28x28 in [f'er_r1_k3_s2_e8_c{chs[3]}', f'ir_r3_k3_s1_e4_c{chs[3]}'], # stage 4, 14x14in [f'ir_r1_k3_s1_e8_c{chs[4]}', f'ir_r3_k3_s1_e4_c{chs[4]}'], # stage 5, 14x14in [f'ir_r1_k3_s2_e8_c{chs[5]}', f'ir_r3_k3_s1_e4_c{chs[5]}'], # stage 6, 7x7 in [f'ir_r1_k3_s1_e8_c{chs[6]}'], ] if 'edgetpu_v2_xs' in variant: stem_size = 32 stem_kernel_size = 3 channels = [16, 32, 48, 96, 144, 160, 192] elif 'edgetpu_v2_s' in variant: channels = [24, 48, 64, 128, 160, 192, 256] elif 'edgetpu_v2_m' in variant: channels = [32, 64, 80, 160, 192, 240, 320] num_features = 1344 elif 'edgetpu_v2_l' in variant: stem_kernel_size = 7 group_size = 128 channels = [32, 64, 96, 192, 240, 256, 384] num_features = 1408 else: assert False arch_def = _arch_def(channels, group_size) else: # v1 stem_size = 32 stem_kernel_size = 3 num_features = 1280 act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [ # stage 0, 112x112 in ['cn_r1_k1_s1_c16'], # stage 1, 112x112 in ['er_r1_k3_s2_e8_c32', 'er_r3_k3_s1_e4_c32'], # stage 2, 56x56 in ['er_r1_k3_s2_e8_c48', 'er_r3_k3_s1_e4_c48'], # stage 3, 28x28 in ['ir_r1_k3_s2_e8_c96', 'ir_r3_k3_s1_e4_c96'], # stage 4, 14x14in ['ir_r1_k3_s1_e8_c96_noskip', 'ir_r3_k3_s1_e4_c96'], # stage 5, 14x14in ['ir_r1_k5_s2_e8_c160', 'ir_r3_k5_s1_e4_c160'], # stage 6, 7x7 in ['ir_r1_k3_s1_e8_c192'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=num_features, stem_size=stem_size, stem_kernel_size=stem_kernel_size, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_test_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Minimal test EfficientNet generator. """ arch_def = [ ['cn_r1_k3_s1_e1_c16_skip'], ['er_r1_k3_s2_e4_c24'], ['er_r1_k3_s2_e4_c32'], ['ir_r1_k3_s2_e4_c48_se0.25'], ['ir_r1_k3_s2_e4_c64_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_chs_fn(256), stem_size=24, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'mnasnet_050.untrained': _cfg(), 'mnasnet_075.untrained': _cfg(), 'mnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth', hf_hub_id='timm/'), 'mnasnet_140.untrained': _cfg(), 'semnasnet_050.untrained': _cfg(), 'semnasnet_075.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/semnasnet_075-18710866.pth', hf_hub_id='timm/'), 'semnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth', hf_hub_id='timm/'), 'semnasnet_140.untrained': _cfg(), 'mnasnet_small.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth', hf_hub_id='timm/'), 'mobilenetv1_100.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, test_input_size=(3, 256, 256), test_crop_pct=0.95, ), 'mobilenetv1_100h.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, test_input_size=(3, 256, 256), test_crop_pct=0.95, ), 'mobilenetv1_125.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=1.0, ), 'mobilenetv2_035.untrained': _cfg(), 'mobilenetv2_050.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth', hf_hub_id='timm/', interpolation='bicubic', ), 'mobilenetv2_075.untrained': _cfg(), 'mobilenetv2_100.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth', hf_hub_id='timm/'), 'mobilenetv2_110d.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth', hf_hub_id='timm/'), 'mobilenetv2_120d.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth', hf_hub_id='timm/'), 'mobilenetv2_140.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth', hf_hub_id='timm/'), 'fbnetc_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', hf_hub_id='timm/', interpolation='bilinear'), 'spnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', hf_hub_id='timm/', interpolation='bilinear'), # NOTE experimenting with alternate attention 'efficientnet_b0.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth', hf_hub_id='timm/'), 'efficientnet_b0.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=1.0), 'efficientnet_b1.ra4_e3600_r240_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), crop_pct=0.9, pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'efficientnet_b1.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), test_crop_pct=1.0), 'efficientnet_b2.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'efficientnet_b3.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), test_crop_pct=1.0), 'efficientnet_b4.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', hf_hub_id='timm/', input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), test_crop_pct=1.0), 'efficientnet_b5.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, crop_mode='squash'), 'efficientnet_b5.sw_in12k': _cfg( hf_hub_id='timm/', input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.95, num_classes=11821), 'efficientnet_b6.untrained': _cfg( url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'efficientnet_b7.untrained': _cfg( url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'efficientnet_b8.untrained': _cfg( url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'efficientnet_l2.untrained': _cfg( url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), # FIXME experimental 'efficientnet_b0_gn.untrained': _cfg(), 'efficientnet_b0_g8_gn.untrained': _cfg(), 'efficientnet_b0_g16_evos.untrained': _cfg(), 'efficientnet_b3_gn.untrained': _cfg( input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_b3_g8_gn.untrained': _cfg( input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_blur_b0.untrained': _cfg(), 'efficientnet_es.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth', hf_hub_id='timm/'), 'efficientnet_em.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_el.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el-3b455510.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_es_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_pruned75-1b7248cf.pth', hf_hub_id='timm/'), 'efficientnet_el_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el_pruned70-ef2a2ccf.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_cc_b0_4e.untrained': _cfg(), 'efficientnet_cc_b0_8e.untrained': _cfg(), 'efficientnet_cc_b1_8e.untrained': _cfg(input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_lite0.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth', hf_hub_id='timm/'), 'efficientnet_lite1.untrained': _cfg( input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_lite2.untrained': _cfg( input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'efficientnet_lite3.untrained': _cfg( input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_lite4.untrained': _cfg( input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'efficientnet_b1_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb1_pruned-bea43a3a.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnet_b2_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb2_pruned-08c1b27c.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnet_b3_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb3_pruned-59ecf72d.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnetv2_rw_t.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', hf_hub_id='timm/', input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), 'gc_efficientnetv2_rw_t.agc_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', hf_hub_id='timm/', input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), 'efficientnetv2_rw_s.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', hf_hub_id='timm/', input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), 'efficientnetv2_rw_m.agc_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', hf_hub_id='timm/', input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), 'efficientnetv2_s.untrained': _cfg( input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), 'efficientnetv2_m.untrained': _cfg( input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), 'efficientnetv2_l.untrained': _cfg( input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), 'efficientnetv2_xl.untrained': _cfg( input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), 'tf_efficientnet_b0.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', hf_hub_id='timm/', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_l2.ns_jft_in1k_475': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', hf_hub_id='timm/', input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), 'tf_efficientnet_l2.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', hf_hub_id='timm/', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), 'tf_efficientnet_b0.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), 'tf_efficientnet_b1.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b8.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'tf_efficientnet_b5.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b7.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b8.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', hf_hub_id='timm/', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'tf_efficientnet_b0.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.aa_in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_aa-99018a74.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', hf_hub_id='timm/', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.aa_in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_aa-076e3472.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b0.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0-0af12548.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1-5c1377c4.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2-e393ef04.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3-e3bd6955.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4-74ee3bed.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5-c6949ce9.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_es.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), ), 'tf_efficientnet_em.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_el.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_cc_b0_4e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_efficientnet_cc_b0_8e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_efficientnet_cc_b1_8e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_lite0.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite1.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite2.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite3.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), 'tf_efficientnet_lite4.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), 'tf_efficientnetv2_s.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_xl.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_s.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_s.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_xl.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_b0.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', hf_hub_id='timm/', input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), 'tf_efficientnetv2_b1.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', hf_hub_id='timm/', input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), 'tf_efficientnetv2_b2.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', hf_hub_id='timm/', input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890), 'tf_efficientnetv2_b3.in21k_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.9, crop_mode='squash'), 'tf_efficientnetv2_b3.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', hf_hub_id='timm/', input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), 'tf_efficientnetv2_b3.in21k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=21843, input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), 'mixnet_s.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth', hf_hub_id='timm/'), 'mixnet_m.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth', hf_hub_id='timm/'), 'mixnet_l.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth', hf_hub_id='timm/'), 'mixnet_xl.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth', hf_hub_id='timm/'), 'mixnet_xxl.untrained': _cfg(), 'tf_mixnet_s.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth', hf_hub_id='timm/'), 'tf_mixnet_m.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth', hf_hub_id='timm/'), 'tf_mixnet_l.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth', hf_hub_id='timm/'), "tinynet_a.in1k": _cfg( input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth', hf_hub_id='timm/'), "tinynet_b.in1k": _cfg( input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth', hf_hub_id='timm/'), "tinynet_c.in1k": _cfg( input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth', hf_hub_id='timm/'), "tinynet_d.in1k": _cfg( input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth', hf_hub_id='timm/'), "tinynet_e.in1k": _cfg( input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth', hf_hub_id='timm/'), 'mobilenet_edgetpu_100.untrained': _cfg( # hf_hub_id='timm/', input_size=(3, 224, 224), crop_pct=0.9), 'mobilenet_edgetpu_v2_xs.untrained': _cfg( # hf_hub_id='timm/', input_size=(3, 224, 224), crop_pct=0.9), 'mobilenet_edgetpu_v2_s.untrained': _cfg( #hf_hub_id='timm/', input_size=(3, 224, 224), crop_pct=0.9), 'mobilenet_edgetpu_v2_m.ra4_e3600_r224_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=0.9, test_input_size=(3, 256, 256), test_crop_pct=0.95, ), 'mobilenet_edgetpu_v2_l.untrained': _cfg( #hf_hub_id='timm/', input_size=(3, 224, 224), crop_pct=0.9), "test_efficientnet.r160_in1k": _cfg( hf_hub_id='timm/', input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), "test_efficientnet_ln.r160_in1k": _cfg( hf_hub_id='timm/', input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), "test_efficientnet_gn.r160_in1k": _cfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), "test_efficientnet_evos.r160_in1k": _cfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.95), }) @register_model def mnasnet_050(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 0.5. """ model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_075(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 0.75. """ model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 1.0. """ model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_140(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 1.4 """ model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_050(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_075(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_140(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_small(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet Small, depth multiplier of 1.0. """ model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv1_100(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V1 """ model = _gen_mobilenet_v1('mobilenetv1_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv1_100h(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V1 """ model = _gen_mobilenet_v1('mobilenetv1_100h', 1.0, head_conv=True, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv1_125(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V1 """ model = _gen_mobilenet_v1('mobilenetv1_125', 1.25, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_035(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.35 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_035', 0.35, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_050(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.5 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_075(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.75 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_100(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.0 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_140(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.4 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_110d(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" model = _gen_mobilenet_v2( 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_120d(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ model = _gen_mobilenet_v2( 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) return model @register_model def fbnetc_100(pretrained=False, **kwargs) -> EfficientNet: """ FBNet-C """ if pretrained: # pretrained model trained with non-default BN epsilon kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def spnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ Single-Path NAS Pixel1""" model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B6 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B7 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B8 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-L2.""" # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model # FIXME experimental group cong / GroupNorm / EvoNorm experiments @register_model def efficientnet_b0_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 + GroupNorm""" model = _gen_efficientnet( 'efficientnet_b0_gn', norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_g8_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 w/ group conv + GroupNorm""" model = _gen_efficientnet( 'efficientnet_b0_g8_gn', group_size=8, norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_g16_evos(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 w/ group 16 conv + EvoNorm""" model = _gen_efficientnet( 'efficientnet_b0_g16_evos', group_size=16, channel_divisor=16, pretrained=pretrained, **kwargs) #norm_layer=partial(EvoNorm2dS0, group_size=16), return model @register_model def efficientnet_b3_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 w/ GroupNorm """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3_gn', channel_multiplier=1.2, depth_multiplier=1.4, channel_divisor=16, norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_g8_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 w/ grouped conv + BN""" # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3_g8_gn', channel_multiplier=1.2, depth_multiplier=1.4, group_size=8, channel_divisor=16, norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_blur_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 w/ BlurPool """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_blur_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, aa_layer='blurpc', **kwargs ) return model @register_model def efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small. """ model = _gen_efficientnet_edge( 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_es_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" model = _gen_efficientnet_edge( 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Medium. """ model = _gen_efficientnet_edge( 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large. """ model = _gen_efficientnet_edge( 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_el_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" model = _gen_efficientnet_edge( 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B1 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b1_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') variant = 'efficientnet_b1_pruned' model = _gen_efficientnet( variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """ model = _gen_efficientnetv2_s( 'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) return model @register_model def gc_efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """ model = _gen_efficientnetv2_s( 'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, se_layer='gc', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small (RW variant). NOTE: This is my initial (pre official code release) w/ some differences. See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding """ model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium (RW variant). """ model = _gen_efficientnetv2_s( 'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small. """ model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium. """ model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Large. """ model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Xtra-Large. """ model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B4. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B6. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B7. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B8. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet( 'tf_efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge( 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge( 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_edge( 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnet_lite( 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Large. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Xtra-Large. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B0. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B1. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B2. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B3. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_x_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet_x( 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_x_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5 """ model = _gen_efficientnet_x( 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_h_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5 """ model = _gen_efficientnet_x( 'efficientnet_b5', channel_multiplier=1.92, depth_multiplier=2.2, version=2, pretrained=pretrained, **kwargs) return model @register_model def mixnet_s(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Small model. """ model = _gen_mixnet_s( 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def mixnet_m(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Medium model. """ model = _gen_mixnet_m( 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def mixnet_l(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Large model. """ model = _gen_mixnet_m( 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def mixnet_xl(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Extra-Large model. Not a paper spec, experimental def by RW w/ depth scaling. """ model = _gen_mixnet_m( 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def mixnet_xxl(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Double Extra Large model. Not a paper spec, experimental def by RW w/ depth scaling. """ model = _gen_mixnet_m( 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_s(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Small model. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_s( 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_m(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Medium model. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_m( 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_l(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Large model. Tensorflow compatible variant """ kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT) kwargs.setdefault('pad_type', 'same') model = _gen_mixnet_m( 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def tinynet_a(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs) return model @register_model def tinynet_b(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs) return model @register_model def tinynet_c(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs) return model @register_model def tinynet_d(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs) return model @register_model def tinynet_e(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_100(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet-EdgeTPU-v1 100. """ model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_100', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_xs(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet-EdgeTPU-v2 Extra Small. """ model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_xs', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_s(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet-EdgeTPU-v2 Small. """ model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_s', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_m(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet-EdgeTPU-v2 Medium. """ model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_m', pretrained=pretrained, **kwargs) return model @register_model def mobilenet_edgetpu_v2_l(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet-EdgeTPU-v2 Large. """ model = _gen_mobilenet_edgetpu('mobilenet_edgetpu_v2_l', pretrained=pretrained, **kwargs) return model @register_model def test_efficientnet(pretrained=False, **kwargs) -> EfficientNet: model = _gen_test_efficientnet('test_efficientnet', pretrained=pretrained, **kwargs) return model @register_model def test_efficientnet_gn(pretrained=False, **kwargs) -> EfficientNet: model = _gen_test_efficientnet( 'test_efficientnet_gn', pretrained=pretrained, norm_layer=kwargs.pop('norm_layer', partial(GroupNormAct, group_size=8)), **kwargs ) return model @register_model def test_efficientnet_ln(pretrained=False, **kwargs) -> EfficientNet: model = _gen_test_efficientnet( 'test_efficientnet_ln', pretrained=pretrained, norm_layer=kwargs.pop('norm_layer', LayerNormAct2d), **kwargs ) return model @register_model def test_efficientnet_evos(pretrained=False, **kwargs) -> EfficientNet: model = _gen_test_efficientnet( 'test_efficientnet_evos', pretrained=pretrained, norm_layer=kwargs.pop('norm_layer', partial(EvoNorm2dS0, group_size=8)), **kwargs ) return model register_model_deprecations(__name__, { 'tf_efficientnet_b0_ap': 'tf_efficientnet_b0.ap_in1k', 'tf_efficientnet_b1_ap': 'tf_efficientnet_b1.ap_in1k', 'tf_efficientnet_b2_ap': 'tf_efficientnet_b2.ap_in1k', 'tf_efficientnet_b3_ap': 'tf_efficientnet_b3.ap_in1k', 'tf_efficientnet_b4_ap': 'tf_efficientnet_b4.ap_in1k', 'tf_efficientnet_b5_ap': 'tf_efficientnet_b5.ap_in1k', 'tf_efficientnet_b6_ap': 'tf_efficientnet_b6.ap_in1k', 'tf_efficientnet_b7_ap': 'tf_efficientnet_b7.ap_in1k', 'tf_efficientnet_b8_ap': 'tf_efficientnet_b8.ap_in1k', 'tf_efficientnet_b0_ns': 'tf_efficientnet_b0.ns_jft_in1k', 'tf_efficientnet_b1_ns': 'tf_efficientnet_b1.ns_jft_in1k', 'tf_efficientnet_b2_ns': 'tf_efficientnet_b2.ns_jft_in1k', 'tf_efficientnet_b3_ns': 'tf_efficientnet_b3.ns_jft_in1k', 'tf_efficientnet_b4_ns': 'tf_efficientnet_b4.ns_jft_in1k', 'tf_efficientnet_b5_ns': 'tf_efficientnet_b5.ns_jft_in1k', 'tf_efficientnet_b6_ns': 'tf_efficientnet_b6.ns_jft_in1k', 'tf_efficientnet_b7_ns': 'tf_efficientnet_b7.ns_jft_in1k', 'tf_efficientnet_l2_ns_475': 'tf_efficientnet_l2.ns_jft_in1k_475', 'tf_efficientnet_l2_ns': 'tf_efficientnet_l2.ns_jft_in1k', 'tf_efficientnetv2_s_in21ft1k': 'tf_efficientnetv2_s.in21k_ft_in1k', 'tf_efficientnetv2_m_in21ft1k': 'tf_efficientnetv2_m.in21k_ft_in1k', 'tf_efficientnetv2_l_in21ft1k': 'tf_efficientnetv2_l.in21k_ft_in1k', 'tf_efficientnetv2_xl_in21ft1k': 'tf_efficientnetv2_xl.in21k_ft_in1k', 'tf_efficientnetv2_s_in21k': 'tf_efficientnetv2_s.in21k', 'tf_efficientnetv2_m_in21k': 'tf_efficientnetv2_m.in21k', 'tf_efficientnetv2_l_in21k': 'tf_efficientnetv2_l.in21k', 'tf_efficientnetv2_xl_in21k': 'tf_efficientnetv2_xl.in21k', 'efficientnet_b2a': 'efficientnet_b2', 'efficientnet_b3a': 'efficientnet_b3', 'mnasnet_a1': 'semnasnet_100', 'mnasnet_b1': 'mnasnet_100', })
pytorch-image-models/timm/models/efficientnet.py/0
{ "file_path": "pytorch-image-models/timm/models/efficientnet.py", "repo_id": "pytorch-image-models", "token_count": 59231 }
272
import math from copy import deepcopy from functools import partial from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, ClNormMlpClassifierHead, LayerScale, \ get_norm_layer, get_act_layer, init_weight_jax, init_weight_vit, to_2tuple, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint from ._registry import generate_default_cfgs, register_model def window_partition(x, window_size: Tuple[int, int]): """ Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition """ B, H, W, C = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows def window_unpartition(windows: torch.Tensor, window_size: Tuple[int, int], hw: Tuple[int, int]): """ Window unpartition into original sequences and removing padding. Args: x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. hw (Tuple): original height and width (H, W) before padding. Returns: x: unpartitioned sequences with [B, H, W, C]. """ H, W = hw B = windows.shape[0] // (H * W // window_size[0] // window_size[1]) x = windows.view(B, H // window_size[0], W // window_size[1], window_size[0], window_size[1], -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x def _calc_pad(H: int, W: int, window_size: Tuple[int, int]) -> Tuple[int, int, int, int]: pad_h = (window_size[0] - H % window_size[0]) % window_size[0] pad_w = (window_size[1] - W % window_size[1]) % window_size[1] Hp, Wp = H + pad_h, W + pad_w return Hp, Wp, pad_h, pad_w class MultiScaleAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__( self, dim: int, dim_out: int, num_heads: int, q_pool: nn.Module = None, ): super().__init__() self.dim = dim self.dim_out = dim_out self.num_heads = num_heads head_dim = dim_out // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.q_pool = q_pool self.qkv = nn.Linear(dim, dim_out * 3) self.proj = nn.Linear(dim_out, dim_out) def forward(self, x: torch.Tensor) -> torch.Tensor: B, H, W, _ = x.shape # qkv with shape (B, H * W, 3, nHead, C) qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1) # q, k, v with shape (B, H * W, nheads, C) q, k, v = torch.unbind(qkv, 2) # Q pooling (for downsample at stage changes) if self.q_pool is not None: q = q.reshape(B, H, W, -1).permute(0, 3, 1, 2) # to BCHW for pool q = self.q_pool(q).permute(0, 2, 3, 1) H, W = q.shape[1:3] # downsampled shape q = q.reshape(B, H * W, self.num_heads, -1) # Torch's SDPA expects [B, nheads, H*W, C] so we transpose q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = q @ k.transpose(-1, -2) attn = attn.softmax(dim=-1) x = attn @ v # Transpose back x = x.transpose(1, 2).reshape(B, H, W, -1) x = self.proj(x) return x class MultiScaleBlock(nn.Module): def __init__( self, dim: int, dim_out: int, num_heads: int, mlp_ratio: float = 4.0, q_stride: Optional[Tuple[int, int]] = None, norm_layer: Union[nn.Module, str] = "LayerNorm", act_layer: Union[nn.Module, str] = "GELU", window_size: int = 0, init_values: Optional[float] = None, drop_path: float = 0.0, ): super().__init__() norm_layer = get_norm_layer(norm_layer) act_layer = get_act_layer(act_layer) self.window_size = to_2tuple(window_size) self.is_windowed = any(self.window_size) self.dim = dim self.dim_out = dim_out self.q_stride = q_stride if dim != dim_out: self.proj = nn.Linear(dim, dim_out) else: self.proj = nn.Identity() self.pool = None if self.q_stride: # note make a different instance for this Module so that it's not shared with attn module self.pool = nn.MaxPool2d( kernel_size=q_stride, stride=q_stride, ceil_mode=False, ) self.norm1 = norm_layer(dim) self.attn = MultiScaleAttention( dim, dim_out, num_heads=num_heads, q_pool=deepcopy(self.pool), ) self.ls1 = LayerScale(dim_out, init_values) if init_values is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim_out) self.mlp = Mlp( dim_out, int(dim_out * mlp_ratio), act_layer=act_layer, ) self.ls2 = LayerScale(dim_out, init_values) if init_values is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x # B, H, W, C x = self.norm1(x) # Skip connection if self.dim != self.dim_out: shortcut = self.proj(x) if self.pool is not None: shortcut = shortcut.permute(0, 3, 1, 2) shortcut = self.pool(shortcut).permute(0, 2, 3, 1) # Window partition window_size = self.window_size H, W = x.shape[1:3] Hp, Wp = H, W # keep torchscript happy if self.is_windowed: Hp, Wp, pad_h, pad_w = _calc_pad(H, W, window_size) x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) x = window_partition(x, window_size) # Window Attention + Q Pooling (if stage change) x = self.attn(x) if self.q_stride is not None: # Shapes have changed due to Q pooling window_size = (self.window_size[0] // self.q_stride[0], self.window_size[1] // self.q_stride[1]) H, W = shortcut.shape[1:3] Hp, Wp, pad_h, pad_w = _calc_pad(H, W, window_size) # Reverse window partition if self.is_windowed: x = window_unpartition(x, window_size, (Hp, Wp)) x = x[:, :H, :W, :].contiguous() # unpad x = shortcut + self.drop_path1(self.ls1(x)) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class HieraPatchEmbed(nn.Module): """ Image to Patch Embedding. """ def __init__( self, kernel_size: Tuple[int, ...] = (7, 7), stride: Tuple[int, ...] = (4, 4), padding: Tuple[int, ...] = (3, 3), in_chans: int = 3, embed_dim: int = 768, ): """ Args: kernel_size (Tuple): kernel size of the projection layer. stride (Tuple): stride of the projection layer. padding (Tuple): padding size of the projection layer. in_chans (int): Number of input image channels. embed_dim (int): embed_dim (int): Patch embedding dimension. """ super().__init__() self.proj = nn.Conv2d( in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding ) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) # B C H W -> B H W C x = x.permute(0, 2, 3, 1) return x class HieraDet(nn.Module): """ Reference: https://arxiv.org/abs/2306.00989 """ def __init__( self, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 96, # initial embed dim num_heads: int = 1, # initial number of heads patch_kernel: Tuple[int, ...] = (7, 7), patch_stride: Tuple[int, ...] = (4, 4), patch_padding: Tuple[int, ...] = (3, 3), patch_size: Optional[Tuple[int, ...]] = None, q_pool: int = 3, # number of q_pool stages q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage dim_mul: float = 2.0, # dim_mul factor at stage shift head_mul: float = 2.0, # head_mul factor at stage shift global_pos_size: Tuple[int, int] = (7, 7), # window size per stage, when not using global att. window_spec: Tuple[int, ...] = ( 8, 4, 14, 7, ), # global attn in these blocks global_att_blocks: Tuple[int, ...] = ( 12, 16, 20, ), init_values: Optional[float] = None, weight_init: str = '', fix_init: bool = True, head_init_scale: float = 0.001, drop_rate: float = 0.0, drop_path_rate: float = 0.0, # stochastic depth norm_layer: Union[nn.Module, str] = "LayerNorm", act_layer: Union[nn.Module, str] = "GELU", ): super().__init__() norm_layer = get_norm_layer(norm_layer) act_layer = get_act_layer(act_layer) assert len(stages) == len(window_spec) self.grad_checkpointing = False self.num_classes = num_classes self.window_spec = window_spec self.output_fmt = 'NHWC' depth = sum(stages) self.q_stride = q_stride self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] assert 0 <= q_pool <= len(self.stage_ends[:-1]) self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool] if patch_size is not None: # use a non-overlapping vit style patch embed self.patch_embed = PatchEmbed( img_size=None, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, output_fmt='NHWC', dynamic_img_pad=True, ) else: self.patch_embed = HieraPatchEmbed( kernel_size=patch_kernel, stride=patch_stride, padding=patch_padding, in_chans=in_chans, embed_dim=embed_dim, ) # Which blocks have global att? self.global_att_blocks = global_att_blocks # Windowed positional embedding (https://arxiv.org/abs/2311.05613) self.global_pos_size = global_pos_size self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.global_pos_size)) self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0])) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule cur_stage = 0 self.blocks = nn.Sequential() self.feature_info = [] for i in range(depth): dim_out = embed_dim # lags by a block, so first block of # next stage uses an initial window size # of previous stage and final window size of current stage window_size = self.window_spec[cur_stage] if self.global_att_blocks is not None: window_size = 0 if i in self.global_att_blocks else window_size if i - 1 in self.stage_ends: dim_out = int(embed_dim * dim_mul) num_heads = int(num_heads * head_mul) cur_stage += 1 block = MultiScaleBlock( dim=embed_dim, dim_out=dim_out, num_heads=num_heads, drop_path=dpr[i], q_stride=self.q_stride if i in self.q_pool_blocks else None, window_size=window_size, norm_layer=norm_layer, act_layer=act_layer, init_values=init_values, ) embed_dim = dim_out self.blocks.append(block) if i in self.stage_ends: self.feature_info += [ dict(num_chs=dim_out, reduction=2**(cur_stage+2), module=f'blocks.{self.stage_ends[cur_stage]}')] self.num_features = self.head_hidden_size = embed_dim self.head = ClNormMlpClassifierHead( embed_dim, num_classes, pool_type=global_pool, drop_rate=drop_rate, norm_layer=norm_layer, ) # Initialize everything if self.pos_embed is not None: nn.init.trunc_normal_(self.pos_embed, std=0.02) if self.pos_embed_window is not None: nn.init.trunc_normal_(self.pos_embed_window, std=0.02) if weight_init != 'skip': init_fn = init_weight_jax if weight_init == 'jax' else init_weight_vit init_fn = partial(init_fn, classifier_name='head.fc') named_apply(init_fn, self) if fix_init: self.fix_init_weight() if isinstance(self.head, ClNormMlpClassifierHead) and isinstance(self.head.fc, nn.Linear): self.head.fc.weight.data.mul_(head_init_scale) self.head.fc.bias.data.mul_(head_init_scale) def _pos_embed(self, x: torch.Tensor) -> torch.Tensor: h, w = x.shape[1:3] window_embed = self.pos_embed_window pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic") tile_h = pos_embed.shape[-2] // window_embed.shape[-2] tile_w = pos_embed.shape[-1] // window_embed.shape[-1] pos_embed = pos_embed + window_embed.tile((tile_h, tile_w)) pos_embed = pos_embed.permute(0, 2, 3, 1) return x + pos_embed def fix_init_weight(self): def rescale(param, _layer_id): param.div_(math.sqrt(2.0 * _layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) @torch.jit.ignore def no_weight_decay(self): return ['pos_embed', 'pos_embed_window'] @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict: return dict( stem=r'^pos_embed|pos_embed_window|patch_embed', blocks=[(r'^blocks\.(\d+)', None)] ) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, reset_other: bool = False): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool, reset_other=reset_other) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = True, output_fmt: str = 'NCHW', intermediates_only: bool = False, coarse: bool = True, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features coarse: Take coarse features (stage ends) if true, otherwise all block featrures Returns: """ assert not norm, 'normalization of features not supported' assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW, NHWC.' if coarse: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] else: take_indices, max_index = feature_take_indices(len(self.blocks), indices) x = self.patch_embed(x) x = self._pos_embed(x) intermediates = [] if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for i, blk in enumerate(blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x) else: x = blk(x) if i in take_indices: x_out = x.permute(0, 3, 1, 2) if output_fmt == 'NCHW' else x intermediates.append(x_out) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, coarse: bool = True, ): """ Prune layers not required for specified intermediates. """ if coarse: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] else: take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_head: self.head.reset(0, reset_other=prune_norm) return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) # BHWC x = self._pos_embed(x) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x) else: x = blk(x) return x def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) return x def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x # NOTE sam2 appears to use 1024x1024 for all models, but T, S, & B+ have windows that fit multiples of 224. def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 0, 'input_size': (3, 896, 896), 'pool_size': (28, 28), 'crop_pct': 1.0, 'interpolation': 'bicubic', 'min_input_size': (3, 224, 224), 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ "sam2_hiera_tiny.fb_r896": _cfg( # hf_hub_id='facebook/sam2-hiera-tiny', # hf_hub_filename='sam2_hiera_tiny.pt', hf_hub_id='timm/', ), "sam2_hiera_tiny.fb_r896_2pt1": _cfg( # hf_hub_id='facebook/sam2.1-hiera-tiny', # hf_hub_filename='sam2.1_hiera_tiny.pt', hf_hub_id='timm/', ), "sam2_hiera_small.fb_r896": _cfg( # hf_hub_id='facebook/sam2-hiera-small', # hf_hub_filename='sam2_hiera_small.pt', hf_hub_id='timm/', ), "sam2_hiera_small.fb_r896_2pt1": _cfg( # hf_hub_id='facebook/sam2.1-hiera-small', # hf_hub_filename='sam2.1_hiera_small.pt', hf_hub_id='timm/', ), "sam2_hiera_base_plus.fb_r896": _cfg( # hf_hub_id='facebook/sam2-hiera-base-plus', # hf_hub_filename='sam2_hiera_base_plus.pt', hf_hub_id='timm/', ), "sam2_hiera_base_plus.fb_r896_2pt1": _cfg( # hf_hub_id='facebook/sam2.1-hiera-base-plus', # hf_hub_filename='sam2.1_hiera_base_plus.pt', hf_hub_id='timm/', ), "sam2_hiera_large.fb_r1024": _cfg( # hf_hub_id='facebook/sam2-hiera-large', # hf_hub_filename='sam2_hiera_large.pt', hf_hub_id='timm/', min_input_size=(3, 256, 256), input_size=(3, 1024, 1024), pool_size=(32, 32), ), "sam2_hiera_large.fb_r1024_2pt1": _cfg( # hf_hub_id='facebook/sam2.1-hiera-large', # hf_hub_filename='sam2.1_hiera_large.pt', hf_hub_id='timm/', min_input_size=(3, 256, 256), input_size=(3, 1024, 1024), pool_size=(32, 32), ), "hieradet_small.untrained": _cfg( num_classes=1000, input_size=(3, 256, 256), pool_size=(8, 8), ), }) def checkpoint_filter_fn(state_dict, model=None, prefix=''): state_dict = state_dict.get('model', state_dict) output = {} for k, v in state_dict.items(): if k.startswith(prefix): k = k.replace(prefix, '') else: continue k = k.replace('mlp.layers.0', 'mlp.fc1') k = k.replace('mlp.layers.1', 'mlp.fc2') output[k] = v return output def _create_hiera_det(variant: str, pretrained: bool = False, **kwargs) -> HieraDet: out_indices = kwargs.pop('out_indices', 4) checkpoint_prefix = '' # if 'sam2' in variant: # # SAM2 pretrained weights have no classifier or final norm-layer (`head.norm`) # # This is workaround loading with num_classes=0 w/o removing norm-layer. # kwargs.setdefault('pretrained_strict', False) # checkpoint_prefix = 'image_encoder.trunk.' return build_model_with_cfg( HieraDet, variant, pretrained, pretrained_filter_fn=partial(checkpoint_filter_fn, prefix=checkpoint_prefix), feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) @register_model def sam2_hiera_tiny(pretrained=False, **kwargs): model_args = dict(stages=(1, 2, 7, 2), global_att_blocks=(5, 7, 9)) return _create_hiera_det('sam2_hiera_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def sam2_hiera_small(pretrained=False, **kwargs): model_args = dict(stages=(1, 2, 11, 2), global_att_blocks=(7, 10, 13)) return _create_hiera_det('sam2_hiera_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def sam2_hiera_base_plus(pretrained=False, **kwargs): model_args = dict(embed_dim=112, num_heads=2, global_pos_size=(14, 14)) return _create_hiera_det('sam2_hiera_base_plus', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def sam2_hiera_large(pretrained=False, **kwargs): model_args = dict( embed_dim=144, num_heads=2, stages=(2, 6, 36, 4), global_att_blocks=(23, 33, 43), window_spec=(8, 4, 16, 8), ) return _create_hiera_det('sam2_hiera_large', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hieradet_small(pretrained=False, **kwargs): model_args = dict(stages=(1, 2, 11, 2), global_att_blocks=(7, 10, 13), window_spec=(8, 4, 16, 8), init_values=1e-5) return _create_hiera_det('hieradet_small', pretrained=pretrained, **dict(model_args, **kwargs)) # @register_model # def hieradet_base(pretrained=False, **kwargs): # model_args = dict(window_spec=(8, 4, 16, 8)) # return _create_hiera_det('hieradet_base', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/hieradet_sam2.py/0
{ "file_path": "pytorch-image-models/timm/models/hieradet_sam2.py", "repo_id": "pytorch-image-models", "token_count": 11986 }
273
""" Multi-Scale Vision Transformer v2 @inproceedings{li2021improved, title={MViTv2: Improved multiscale vision transformers for classification and detection}, author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph}, booktitle={CVPR}, year={2022} } Code adapted from original Apache 2.0 licensed impl at https://github.com/facebookresearch/mvit Original copyright below. Modifications and timm support by / Copyright 2022, Ross Wightman """ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved. All Rights Reserved. import operator from collections import OrderedDict from dataclasses import dataclass from functools import partial, reduce from typing import Union, List, Tuple, Optional import torch from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, trunc_normal_tf_, get_norm_layer, to_2tuple from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._manipulate import checkpoint from ._registry import register_model, generate_default_cfgs __all__ = ['MultiScaleVit', 'MultiScaleVitCfg'] # model_registry will add each entrypoint fn to this @dataclass class MultiScaleVitCfg: depths: Tuple[int, ...] = (2, 3, 16, 3) embed_dim: Union[int, Tuple[int, ...]] = 96 num_heads: Union[int, Tuple[int, ...]] = 1 mlp_ratio: float = 4. pool_first: bool = False expand_attn: bool = True qkv_bias: bool = True use_cls_token: bool = False use_abs_pos: bool = False residual_pooling: bool = True mode: str = 'conv' kernel_qkv: Tuple[int, int] = (3, 3) stride_q: Optional[Tuple[Tuple[int, int]]] = ((1, 1), (2, 2), (2, 2), (2, 2)) stride_kv: Optional[Tuple[Tuple[int, int]]] = None stride_kv_adaptive: Optional[Tuple[int, int]] = (4, 4) patch_kernel: Tuple[int, int] = (7, 7) patch_stride: Tuple[int, int] = (4, 4) patch_padding: Tuple[int, int] = (3, 3) pool_type: str = 'max' rel_pos_type: str = 'spatial' act_layer: Union[str, Tuple[str, str]] = 'gelu' norm_layer: Union[str, Tuple[str, str]] = 'layernorm' norm_eps: float = 1e-6 def __post_init__(self): num_stages = len(self.depths) if not isinstance(self.embed_dim, (tuple, list)): self.embed_dim = tuple(self.embed_dim * 2 ** i for i in range(num_stages)) assert len(self.embed_dim) == num_stages if not isinstance(self.num_heads, (tuple, list)): self.num_heads = tuple(self.num_heads * 2 ** i for i in range(num_stages)) assert len(self.num_heads) == num_stages if self.stride_kv_adaptive is not None and self.stride_kv is None: _stride_kv = self.stride_kv_adaptive pool_kv_stride = [] for i in range(num_stages): if min(self.stride_q[i]) > 1: _stride_kv = [ max(_stride_kv[d] // self.stride_q[i][d], 1) for d in range(len(_stride_kv)) ] pool_kv_stride.append(tuple(_stride_kv)) self.stride_kv = tuple(pool_kv_stride) def prod(iterable): return reduce(operator.mul, iterable, 1) class PatchEmbed(nn.Module): """ PatchEmbed. """ def __init__( self, dim_in=3, dim_out=768, kernel=(7, 7), stride=(4, 4), padding=(3, 3), ): super().__init__() self.proj = nn.Conv2d( dim_in, dim_out, kernel_size=kernel, stride=stride, padding=padding, ) def forward(self, x) -> Tuple[torch.Tensor, List[int]]: x = self.proj(x) # B C H W -> B HW C return x.flatten(2).transpose(1, 2), x.shape[-2:] @register_notrace_function def reshape_pre_pool( x, feat_size: List[int], has_cls_token: bool = True ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: H, W = feat_size if has_cls_token: cls_tok, x = x[:, :, :1, :], x[:, :, 1:, :] else: cls_tok = None x = x.reshape(-1, H, W, x.shape[-1]).permute(0, 3, 1, 2).contiguous() return x, cls_tok @register_notrace_function def reshape_post_pool( x, num_heads: int, cls_tok: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, List[int]]: feat_size = [x.shape[2], x.shape[3]] L_pooled = x.shape[2] * x.shape[3] x = x.reshape(-1, num_heads, x.shape[1], L_pooled).transpose(2, 3) if cls_tok is not None: x = torch.cat((cls_tok, x), dim=2) return x, feat_size @register_notrace_function def cal_rel_pos_type( attn: torch.Tensor, q: torch.Tensor, has_cls_token: bool, q_size: List[int], k_size: List[int], rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, ): """ Spatial Relative Positional Embeddings. """ sp_idx = 1 if has_cls_token else 0 q_h, q_w = q_size k_h, k_w = k_size # Scale up rel pos if shapes for q and k are different. q_h_ratio = max(k_h / q_h, 1.0) k_h_ratio = max(q_h / k_h, 1.0) dist_h = ( torch.arange(q_h, device=q.device).unsqueeze(-1) * q_h_ratio - torch.arange(k_h, device=q.device).unsqueeze(0) * k_h_ratio ) dist_h += (k_h - 1) * k_h_ratio q_w_ratio = max(k_w / q_w, 1.0) k_w_ratio = max(q_w / k_w, 1.0) dist_w = ( torch.arange(q_w, device=q.device).unsqueeze(-1) * q_w_ratio - torch.arange(k_w, device=q.device).unsqueeze(0) * k_w_ratio ) dist_w += (k_w - 1) * k_w_ratio rel_h = rel_pos_h[dist_h.long()] rel_w = rel_pos_w[dist_w.long()] B, n_head, q_N, dim = q.shape r_q = q[:, :, sp_idx:].reshape(B, n_head, q_h, q_w, dim) rel_h = torch.einsum("byhwc,hkc->byhwk", r_q, rel_h) rel_w = torch.einsum("byhwc,wkc->byhwk", r_q, rel_w) attn[:, :, sp_idx:, sp_idx:] = ( attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_h, q_w, k_h, k_w) + rel_h.unsqueeze(-1) + rel_w.unsqueeze(-2) ).view(B, -1, q_h * q_w, k_h * k_w) return attn class MultiScaleAttentionPoolFirst(nn.Module): def __init__( self, dim, dim_out, feat_size, num_heads=8, qkv_bias=True, mode="conv", kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), has_cls_token=True, rel_pos_type='spatial', residual_pooling=True, norm_layer=nn.LayerNorm, ): super().__init__() self.num_heads = num_heads self.dim_out = dim_out self.head_dim = dim_out // num_heads self.scale = self.head_dim ** -0.5 self.has_cls_token = has_cls_token padding_q = tuple([int(q // 2) for q in kernel_q]) padding_kv = tuple([int(kv // 2) for kv in kernel_kv]) self.q = nn.Linear(dim, dim_out, bias=qkv_bias) self.k = nn.Linear(dim, dim_out, bias=qkv_bias) self.v = nn.Linear(dim, dim_out, bias=qkv_bias) self.proj = nn.Linear(dim_out, dim_out) # Skip pooling with kernel and stride size of (1, 1, 1). if prod(kernel_q) == 1 and prod(stride_q) == 1: kernel_q = None if prod(kernel_kv) == 1 and prod(stride_kv) == 1: kernel_kv = None self.mode = mode self.unshared = mode == 'conv_unshared' self.pool_q, self.pool_k, self.pool_v = None, None, None self.norm_q, self.norm_k, self.norm_v = None, None, None if mode in ("avg", "max"): pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d if kernel_q: self.pool_q = pool_op(kernel_q, stride_q, padding_q) if kernel_kv: self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv) self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv) elif mode == "conv" or mode == "conv_unshared": dim_conv = dim // num_heads if mode == "conv" else dim if kernel_q: self.pool_q = nn.Conv2d( dim_conv, dim_conv, kernel_q, stride=stride_q, padding=padding_q, groups=dim_conv, bias=False, ) self.norm_q = norm_layer(dim_conv) if kernel_kv: self.pool_k = nn.Conv2d( dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False, ) self.norm_k = norm_layer(dim_conv) self.pool_v = nn.Conv2d( dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False, ) self.norm_v = norm_layer(dim_conv) else: raise NotImplementedError(f"Unsupported model {mode}") # relative pos embedding self.rel_pos_type = rel_pos_type if self.rel_pos_type == 'spatial': assert feat_size[0] == feat_size[1] size = feat_size[0] q_size = size // stride_q[1] if len(stride_q) > 0 else size kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size rel_sp_dim = 2 * max(q_size, kv_size) - 1 self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) trunc_normal_tf_(self.rel_pos_h, std=0.02) trunc_normal_tf_(self.rel_pos_w, std=0.02) self.residual_pooling = residual_pooling def forward(self, x, feat_size: List[int]): B, N, _ = x.shape fold_dim = 1 if self.unshared else self.num_heads x = x.reshape(B, N, fold_dim, -1).permute(0, 2, 1, 3) q = k = v = x if self.pool_q is not None: q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token) q = self.pool_q(q) q, q_size = reshape_post_pool(q, self.num_heads, q_tok) else: q_size = feat_size if self.norm_q is not None: q = self.norm_q(q) if self.pool_k is not None: k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token) k = self.pool_k(k) k, k_size = reshape_post_pool(k, self.num_heads, k_tok) else: k_size = feat_size if self.norm_k is not None: k = self.norm_k(k) if self.pool_v is not None: v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token) v = self.pool_v(v) v, v_size = reshape_post_pool(v, self.num_heads, v_tok) else: v_size = feat_size if self.norm_v is not None: v = self.norm_v(v) q_N = q_size[0] * q_size[1] + int(self.has_cls_token) q = q.transpose(1, 2).reshape(B, q_N, -1) q = self.q(q).reshape(B, q_N, self.num_heads, -1).transpose(1, 2) k_N = k_size[0] * k_size[1] + int(self.has_cls_token) k = k.transpose(1, 2).reshape(B, k_N, -1) k = self.k(k).reshape(B, k_N, self.num_heads, -1) v_N = v_size[0] * v_size[1] + int(self.has_cls_token) v = v.transpose(1, 2).reshape(B, v_N, -1) v = self.v(v).reshape(B, v_N, self.num_heads, -1).transpose(1, 2) attn = (q * self.scale) @ k if self.rel_pos_type == 'spatial': attn = cal_rel_pos_type( attn, q, self.has_cls_token, q_size, k_size, self.rel_pos_h, self.rel_pos_w, ) attn = attn.softmax(dim=-1) x = attn @ v if self.residual_pooling: x = x + q x = x.transpose(1, 2).reshape(B, -1, self.dim_out) x = self.proj(x) return x, q_size class MultiScaleAttention(nn.Module): def __init__( self, dim, dim_out, feat_size, num_heads=8, qkv_bias=True, mode="conv", kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), has_cls_token=True, rel_pos_type='spatial', residual_pooling=True, norm_layer=nn.LayerNorm, ): super().__init__() self.num_heads = num_heads self.dim_out = dim_out self.head_dim = dim_out // num_heads self.scale = self.head_dim ** -0.5 self.has_cls_token = has_cls_token padding_q = tuple([int(q // 2) for q in kernel_q]) padding_kv = tuple([int(kv // 2) for kv in kernel_kv]) self.qkv = nn.Linear(dim, dim_out * 3, bias=qkv_bias) self.proj = nn.Linear(dim_out, dim_out) # Skip pooling with kernel and stride size of (1, 1, 1). if prod(kernel_q) == 1 and prod(stride_q) == 1: kernel_q = None if prod(kernel_kv) == 1 and prod(stride_kv) == 1: kernel_kv = None self.mode = mode self.unshared = mode == 'conv_unshared' self.norm_q, self.norm_k, self.norm_v = None, None, None self.pool_q, self.pool_k, self.pool_v = None, None, None if mode in ("avg", "max"): pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d if kernel_q: self.pool_q = pool_op(kernel_q, stride_q, padding_q) if kernel_kv: self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv) self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv) elif mode == "conv" or mode == "conv_unshared": dim_conv = dim_out // num_heads if mode == "conv" else dim_out if kernel_q: self.pool_q = nn.Conv2d( dim_conv, dim_conv, kernel_q, stride=stride_q, padding=padding_q, groups=dim_conv, bias=False, ) self.norm_q = norm_layer(dim_conv) if kernel_kv: self.pool_k = nn.Conv2d( dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False, ) self.norm_k = norm_layer(dim_conv) self.pool_v = nn.Conv2d( dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False, ) self.norm_v = norm_layer(dim_conv) else: raise NotImplementedError(f"Unsupported model {mode}") # relative pos embedding self.rel_pos_type = rel_pos_type if self.rel_pos_type == 'spatial': assert feat_size[0] == feat_size[1] size = feat_size[0] q_size = size // stride_q[1] if len(stride_q) > 0 else size kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size rel_sp_dim = 2 * max(q_size, kv_size) - 1 self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) trunc_normal_tf_(self.rel_pos_h, std=0.02) trunc_normal_tf_(self.rel_pos_w, std=0.02) self.residual_pooling = residual_pooling def forward(self, x, feat_size: List[int]): B, N, _ = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(dim=0) if self.pool_q is not None: q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token) q = self.pool_q(q) q, q_size = reshape_post_pool(q, self.num_heads, q_tok) else: q_size = feat_size if self.norm_q is not None: q = self.norm_q(q) if self.pool_k is not None: k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token) k = self.pool_k(k) k, k_size = reshape_post_pool(k, self.num_heads, k_tok) else: k_size = feat_size if self.norm_k is not None: k = self.norm_k(k) if self.pool_v is not None: v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token) v = self.pool_v(v) v, _ = reshape_post_pool(v, self.num_heads, v_tok) if self.norm_v is not None: v = self.norm_v(v) attn = (q * self.scale) @ k.transpose(-2, -1) if self.rel_pos_type == 'spatial': attn = cal_rel_pos_type( attn, q, self.has_cls_token, q_size, k_size, self.rel_pos_h, self.rel_pos_w, ) attn = attn.softmax(dim=-1) x = attn @ v if self.residual_pooling: x = x + q x = x.transpose(1, 2).reshape(B, -1, self.dim_out) x = self.proj(x) return x, q_size class MultiScaleBlock(nn.Module): def __init__( self, dim, dim_out, num_heads, feat_size, mlp_ratio=4.0, qkv_bias=True, drop_path=0.0, norm_layer=nn.LayerNorm, kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), mode="conv", has_cls_token=True, expand_attn=False, pool_first=False, rel_pos_type='spatial', residual_pooling=True, ): super().__init__() proj_needed = dim != dim_out self.dim = dim self.dim_out = dim_out self.has_cls_token = has_cls_token self.norm1 = norm_layer(dim) self.shortcut_proj_attn = nn.Linear(dim, dim_out) if proj_needed and expand_attn else None if stride_q and prod(stride_q) > 1: kernel_skip = [s + 1 if s > 1 else s for s in stride_q] stride_skip = stride_q padding_skip = [int(skip // 2) for skip in kernel_skip] self.shortcut_pool_attn = nn.MaxPool2d(kernel_skip, stride_skip, padding_skip) else: self.shortcut_pool_attn = None att_dim = dim_out if expand_attn else dim attn_layer = MultiScaleAttentionPoolFirst if pool_first else MultiScaleAttention self.attn = attn_layer( dim, att_dim, num_heads=num_heads, feat_size=feat_size, qkv_bias=qkv_bias, kernel_q=kernel_q, kernel_kv=kernel_kv, stride_q=stride_q, stride_kv=stride_kv, norm_layer=norm_layer, has_cls_token=has_cls_token, mode=mode, rel_pos_type=rel_pos_type, residual_pooling=residual_pooling, ) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(att_dim) mlp_dim_out = dim_out self.shortcut_proj_mlp = nn.Linear(dim, dim_out) if proj_needed and not expand_attn else None self.mlp = Mlp( in_features=att_dim, hidden_features=int(att_dim * mlp_ratio), out_features=mlp_dim_out, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def _shortcut_pool(self, x, feat_size: List[int]): if self.shortcut_pool_attn is None: return x if self.has_cls_token: cls_tok, x = x[:, :1, :], x[:, 1:, :] else: cls_tok = None B, L, C = x.shape H, W = feat_size x = x.reshape(B, H, W, C).permute(0, 3, 1, 2).contiguous() x = self.shortcut_pool_attn(x) x = x.reshape(B, C, -1).transpose(1, 2) if cls_tok is not None: x = torch.cat((cls_tok, x), dim=1) return x def forward(self, x, feat_size: List[int]): x_norm = self.norm1(x) # NOTE as per the original impl, this seems odd, but shortcut uses un-normalized input if no proj x_shortcut = x if self.shortcut_proj_attn is None else self.shortcut_proj_attn(x_norm) x_shortcut = self._shortcut_pool(x_shortcut, feat_size) x, feat_size_new = self.attn(x_norm, feat_size) x = x_shortcut + self.drop_path1(x) x_norm = self.norm2(x) x_shortcut = x if self.shortcut_proj_mlp is None else self.shortcut_proj_mlp(x_norm) x = x_shortcut + self.drop_path2(self.mlp(x_norm)) return x, feat_size_new class MultiScaleVitStage(nn.Module): def __init__( self, dim, dim_out, depth, num_heads, feat_size, mlp_ratio=4.0, qkv_bias=True, mode="conv", kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), has_cls_token=True, expand_attn=False, pool_first=False, rel_pos_type='spatial', residual_pooling=True, norm_layer=nn.LayerNorm, drop_path=0.0, ): super().__init__() self.grad_checkpointing = False self.blocks = nn.ModuleList() if expand_attn: out_dims = (dim_out,) * depth else: out_dims = (dim,) * (depth - 1) + (dim_out,) for i in range(depth): attention_block = MultiScaleBlock( dim=dim, dim_out=out_dims[i], num_heads=num_heads, feat_size=feat_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, kernel_q=kernel_q, kernel_kv=kernel_kv, stride_q=stride_q if i == 0 else (1, 1), stride_kv=stride_kv, mode=mode, has_cls_token=has_cls_token, pool_first=pool_first, rel_pos_type=rel_pos_type, residual_pooling=residual_pooling, expand_attn=expand_attn, norm_layer=norm_layer, drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path, ) dim = out_dims[i] self.blocks.append(attention_block) if i == 0: feat_size = tuple([size // stride for size, stride in zip(feat_size, stride_q)]) self.feat_size = feat_size def forward(self, x, feat_size: List[int]): for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x, feat_size = checkpoint(blk, x, feat_size) else: x, feat_size = blk(x, feat_size) return x, feat_size class MultiScaleVit(nn.Module): """ Improved Multiscale Vision Transformers for Classification and Detection Yanghao Li*, Chao-Yuan Wu*, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik, Christoph Feichtenhofer* https://arxiv.org/abs/2112.01526 Multiscale Vision Transformers Haoqi Fan*, Bo Xiong*, Karttikeya Mangalam*, Yanghao Li*, Zhicheng Yan, Jitendra Malik, Christoph Feichtenhofer* https://arxiv.org/abs/2104.11227 """ def __init__( self, cfg: MultiScaleVitCfg, img_size: Tuple[int, int] = (224, 224), in_chans: int = 3, global_pool: Optional[str] = None, num_classes: int = 1000, drop_path_rate: float = 0., drop_rate: float = 0., ): super().__init__() img_size = to_2tuple(img_size) norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) self.num_classes = num_classes self.drop_rate = drop_rate if global_pool is None: global_pool = 'token' if cfg.use_cls_token else 'avg' self.global_pool = global_pool self.depths = tuple(cfg.depths) self.expand_attn = cfg.expand_attn embed_dim = cfg.embed_dim[0] self.patch_embed = PatchEmbed( dim_in=in_chans, dim_out=embed_dim, kernel=cfg.patch_kernel, stride=cfg.patch_stride, padding=cfg.patch_padding, ) patch_dims = (img_size[0] // cfg.patch_stride[0], img_size[1] // cfg.patch_stride[1]) num_patches = prod(patch_dims) if cfg.use_cls_token: self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.num_prefix_tokens = 1 pos_embed_dim = num_patches + 1 else: self.num_prefix_tokens = 0 self.cls_token = None pos_embed_dim = num_patches if cfg.use_abs_pos: self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_dim, embed_dim)) else: self.pos_embed = None num_stages = len(cfg.embed_dim) feat_size = patch_dims curr_stride = max(cfg.patch_stride) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] self.stages = nn.ModuleList() self.feature_info = [] for i in range(num_stages): if cfg.expand_attn: dim_out = cfg.embed_dim[i] else: dim_out = cfg.embed_dim[min(i + 1, num_stages - 1)] stage = MultiScaleVitStage( dim=embed_dim, dim_out=dim_out, depth=cfg.depths[i], num_heads=cfg.num_heads[i], feat_size=feat_size, mlp_ratio=cfg.mlp_ratio, qkv_bias=cfg.qkv_bias, mode=cfg.mode, pool_first=cfg.pool_first, expand_attn=cfg.expand_attn, kernel_q=cfg.kernel_qkv, kernel_kv=cfg.kernel_qkv, stride_q=cfg.stride_q[i], stride_kv=cfg.stride_kv[i], has_cls_token=cfg.use_cls_token, rel_pos_type=cfg.rel_pos_type, residual_pooling=cfg.residual_pooling, norm_layer=norm_layer, drop_path=dpr[i], ) curr_stride *= max(cfg.stride_q[i]) self.feature_info += [dict(module=f'block.{i}', num_chs=dim_out, reduction=curr_stride)] embed_dim = dim_out feat_size = stage.feat_size self.stages.append(stage) self.num_features = self.head_hidden_size = embed_dim self.norm = norm_layer(embed_dim) self.head = nn.Sequential(OrderedDict([ ('drop', nn.Dropout(self.drop_rate)), ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()) ])) if self.pos_embed is not None: trunc_normal_tf_(self.pos_embed, std=0.02) if self.cls_token is not None: trunc_normal_tf_(self.cls_token, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_tf_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0.0) @torch.jit.ignore def no_weight_decay(self): return {k for k, _ in self.named_parameters() if any(n in k for n in ["pos_embed", "rel_pos_h", "rel_pos_w", "cls_token"])} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', # stem and embed blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Sequential(OrderedDict([ ('drop', nn.Dropout(self.drop_rate)), ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()) ])) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW', 'NLC'), 'Output shape must be NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # FIXME slice block/pos_block if < max # forward pass x, feat_size = self.patch_embed(x) B = x.shape[0] if self.cls_token is not None: cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed last_idx = len(self.stages) - 1 for feat_idx, stage in enumerate(self.stages): x, feat_size = stage(x, feat_size) if feat_idx in take_indices: if norm and feat_idx == last_idx: x_inter = self.norm(x) # applying final norm last intermediate else: x_inter = x if reshape: if self.cls_token is not None: # possible to allow return of class tokens, TBD x_inter = x_inter[:, 1:] x_inter = x_inter.reshape(B, feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2) intermediates.append(x_inter) if intermediates_only: return intermediates if feat_idx == last_idx: x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) # FIXME add stage pruning # self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0 if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x, feat_size = self.patch_embed(x) B, N, C = x.shape if self.cls_token is not None: cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed for stage in self.stages: x, feat_size = stage(x, feat_size) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: if self.global_pool == 'avg': x = x[:, self.num_prefix_tokens:].mean(1) else: x = x[:, 0] return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'stages.0.blocks.0.norm1.weight' in state_dict: # native checkpoint, look for rel_pos interpolations for k in state_dict.keys(): if 'rel_pos' in k: rel_pos = state_dict[k] dest_rel_pos_shape = model.state_dict()[k].shape if rel_pos.shape[0] != dest_rel_pos_shape[0]: rel_pos_resized = torch.nn.functional.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=dest_rel_pos_shape[0], mode="linear", ) state_dict[k] = rel_pos_resized.reshape(-1, dest_rel_pos_shape[0]).permute(1, 0) return state_dict import re if 'model_state' in state_dict: state_dict = state_dict['model_state'] depths = getattr(model, 'depths', None) expand_attn = getattr(model, 'expand_attn', True) assert depths is not None, 'model requires depth attribute to remap checkpoints' depth_map = {} block_idx = 0 for stage_idx, d in enumerate(depths): depth_map.update({i: (stage_idx, i - block_idx) for i in range(block_idx, block_idx + d)}) block_idx += d out_dict = {} for k, v in state_dict.items(): k = re.sub( r'blocks\.(\d+)', lambda x: f'stages.{depth_map[int(x.group(1))][0]}.blocks.{depth_map[int(x.group(1))][1]}', k) if expand_attn: k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_attn', k) else: k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_mlp', k) if 'head' in k: k = k.replace('head.projection', 'head.fc') out_dict[k] = v return out_dict model_cfgs = dict( mvitv2_tiny=MultiScaleVitCfg( depths=(1, 2, 5, 2), ), mvitv2_small=MultiScaleVitCfg( depths=(1, 2, 11, 2), ), mvitv2_base=MultiScaleVitCfg( depths=(2, 3, 16, 3), ), mvitv2_large=MultiScaleVitCfg( depths=(2, 6, 36, 4), embed_dim=144, num_heads=2, expand_attn=False, ), mvitv2_small_cls=MultiScaleVitCfg( depths=(1, 2, 11, 2), use_cls_token=True, ), mvitv2_base_cls=MultiScaleVitCfg( depths=(2, 3, 16, 3), use_cls_token=True, ), mvitv2_large_cls=MultiScaleVitCfg( depths=(2, 6, 36, 4), embed_dim=144, num_heads=2, use_cls_token=True, expand_attn=True, ), mvitv2_huge_cls=MultiScaleVitCfg( depths=(4, 8, 60, 8), embed_dim=192, num_heads=3, use_cls_token=True, expand_attn=True, ), ) def _create_mvitv2(variant, cfg_variant=None, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 4) return build_model_with_cfg( MultiScaleVit, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', 'fixed_input_size': True, **kwargs } default_cfgs = generate_default_cfgs({ 'mvitv2_tiny.fb_in1k': _cfg( url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_T_in1k.pyth', hf_hub_id='timm/'), 'mvitv2_small.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_S_in1k.pyth', hf_hub_id='timm/'), 'mvitv2_base.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in1k.pyth', hf_hub_id='timm/'), 'mvitv2_large.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in1k.pyth', hf_hub_id='timm/'), 'mvitv2_small_cls': _cfg(url=''), 'mvitv2_base_cls.fb_inw21k': _cfg( url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in21k.pyth', hf_hub_id='timm/', num_classes=19168), 'mvitv2_large_cls.fb_inw21k': _cfg( url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in21k.pyth', hf_hub_id='timm/', num_classes=19168), 'mvitv2_huge_cls.fb_inw21k': _cfg( url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_H_in21k.pyth', hf_hub_id='timm/', num_classes=19168), }) @register_model def mvitv2_tiny(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_tiny', pretrained=pretrained, **kwargs) @register_model def mvitv2_small(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_small', pretrained=pretrained, **kwargs) @register_model def mvitv2_base(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_base', pretrained=pretrained, **kwargs) @register_model def mvitv2_large(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_large', pretrained=pretrained, **kwargs) @register_model def mvitv2_small_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_small_cls', pretrained=pretrained, **kwargs) @register_model def mvitv2_base_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_base_cls', pretrained=pretrained, **kwargs) @register_model def mvitv2_large_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_large_cls', pretrained=pretrained, **kwargs) @register_model def mvitv2_huge_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_huge_cls', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/mvitv2.py/0
{ "file_path": "pytorch-image-models/timm/models/mvitv2.py", "repo_id": "pytorch-image-models", "token_count": 21308 }
274
"""PyTorch ResNet This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with additional dropout and dynamic global avg/max pool. ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman Copyright 2019, Ross Wightman """ import math from functools import partial from typing import Any, Dict, List, Optional, Tuple, Type, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, LayerType, create_attn, \ get_attn, get_act_layer, get_norm_layer, create_classifier, create_aa, to_ntuple from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this def get_padding(kernel_size: int, stride: int, dilation: int = 1) -> int: padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding class BasicBlock(nn.Module): """Basic residual block for ResNet. This is the standard residual block used in ResNet-18 and ResNet-34. """ expansion = 1 def __init__( self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, cardinality: int = 1, base_width: int = 64, reduce_first: int = 1, dilation: int = 1, first_dilation: Optional[int] = None, act_layer: Type[nn.Module] = nn.ReLU, norm_layer: Type[nn.Module] = nn.BatchNorm2d, attn_layer: Optional[Type[nn.Module]] = None, aa_layer: Optional[Type[nn.Module]] = None, drop_block: Optional[Type[nn.Module]] = None, drop_path: Optional[nn.Module] = None, ) -> None: """ Args: inplanes: Input channel dimensionality. planes: Used to determine output channel dimensionalities. stride: Stride used in convolution layers. downsample: Optional downsample layer for residual path. cardinality: Number of convolution groups. base_width: Base width used to determine output channel dimensionality. reduce_first: Reduction factor for first convolution output width of residual blocks. dilation: Dilation rate for convolution layers. first_dilation: Dilation rate for first convolution layer. act_layer: Activation layer class. norm_layer: Normalization layer class. attn_layer: Attention layer class. aa_layer: Anti-aliasing layer class. drop_block: DropBlock layer class. drop_path: Optional DropPath layer instance. """ super(BasicBlock, self).__init__() assert cardinality == 1, 'BasicBlock only supports cardinality of 1' assert base_width == 64, 'BasicBlock does not support changing base width' first_planes = planes // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) self.conv1 = nn.Conv2d( inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, dilation=first_dilation, bias=False) self.bn1 = norm_layer(first_planes) self.drop_block = drop_block() if drop_block is not None else nn.Identity() self.act1 = act_layer(inplace=True) self.aa = create_aa(aa_layer, channels=first_planes, stride=stride, enable=use_aa) self.conv2 = nn.Conv2d( first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False) self.bn2 = norm_layer(outplanes) self.se = create_attn(attn_layer, outplanes) self.act2 = act_layer(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.drop_path = drop_path def zero_init_last(self) -> None: """Initialize the last batch norm layer weights to zero for better convergence.""" if getattr(self.bn2, 'weight', None) is not None: nn.init.zeros_(self.bn2.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x x = self.conv1(x) x = self.bn1(x) x = self.drop_block(x) x = self.act1(x) x = self.aa(x) x = self.conv2(x) x = self.bn2(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act2(x) return x class Bottleneck(nn.Module): """Bottleneck residual block for ResNet. This is the bottleneck block used in ResNet-50, ResNet-101, and ResNet-152. """ expansion = 4 def __init__( self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, cardinality: int = 1, base_width: int = 64, reduce_first: int = 1, dilation: int = 1, first_dilation: Optional[int] = None, act_layer: Type[nn.Module] = nn.ReLU, norm_layer: Type[nn.Module] = nn.BatchNorm2d, attn_layer: Optional[Type[nn.Module]] = None, aa_layer: Optional[Type[nn.Module]] = None, drop_block: Optional[Type[nn.Module]] = None, drop_path: Optional[nn.Module] = None, ) -> None: """ Args: inplanes: Input channel dimensionality. planes: Used to determine output channel dimensionalities. stride: Stride used in convolution layers. downsample: Optional downsample layer for residual path. cardinality: Number of convolution groups. base_width: Base width used to determine output channel dimensionality. reduce_first: Reduction factor for first convolution output width of residual blocks. dilation: Dilation rate for convolution layers. first_dilation: Dilation rate for first convolution layer. act_layer: Activation layer class. norm_layer: Normalization layer class. attn_layer: Attention layer class. aa_layer: Anti-aliasing layer class. drop_block: DropBlock layer class. drop_path: Optional DropPath layer instance. """ super(Bottleneck, self).__init__() width = int(math.floor(planes * (base_width / 64)) * cardinality) first_planes = width // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False) self.bn1 = norm_layer(first_planes) self.act1 = act_layer(inplace=True) self.conv2 = nn.Conv2d( first_planes, width, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) self.bn2 = norm_layer(width) self.drop_block = drop_block() if drop_block is not None else nn.Identity() self.act2 = act_layer(inplace=True) self.aa = create_aa(aa_layer, channels=width, stride=stride, enable=use_aa) self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False) self.bn3 = norm_layer(outplanes) self.se = create_attn(attn_layer, outplanes) self.act3 = act_layer(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.drop_path = drop_path def zero_init_last(self) -> None: """Initialize the last batch norm layer weights to zero for better convergence.""" if getattr(self.bn3, 'weight', None) is not None: nn.init.zeros_(self.bn3.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.drop_block(x) x = self.act2(x) x = self.aa(x) x = self.conv3(x) x = self.bn3(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act3(x) return x def downsample_conv( in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1, first_dilation: Optional[int] = None, norm_layer: Optional[Type[nn.Module]] = None, ) -> nn.Module: norm_layer = norm_layer or nn.BatchNorm2d kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1 p = get_padding(kernel_size, stride, first_dilation) return nn.Sequential(*[ nn.Conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), norm_layer(out_channels) ]) def downsample_avg( in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1, first_dilation: Optional[int] = None, norm_layer: Optional[Type[nn.Module]] = None, ) -> nn.Module: norm_layer = norm_layer or nn.BatchNorm2d avg_stride = stride if dilation == 1 else 1 if stride == 1 and dilation == 1: pool = nn.Identity() else: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) return nn.Sequential(*[ pool, nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), norm_layer(out_channels) ]) def drop_blocks(drop_prob: float = 0.) -> List[Optional[partial]]: """Create DropBlock layer instances for each stage. Args: drop_prob: Drop probability for DropBlock. Returns: List of DropBlock partial instances or None for each stage. """ return [ None, None, partial(DropBlock2d, drop_prob=drop_prob, block_size=5, gamma_scale=0.25) if drop_prob else None, partial(DropBlock2d, drop_prob=drop_prob, block_size=3, gamma_scale=1.00) if drop_prob else None] def make_blocks( block_fns: Tuple[Union[Type[BasicBlock], Type[Bottleneck]], ...], channels: Tuple[int, ...], block_repeats: Tuple[int, ...], inplanes: int, reduce_first: int = 1, output_stride: int = 32, down_kernel_size: int = 1, avg_down: bool = False, drop_block_rate: float = 0., drop_path_rate: float = 0., **kwargs, ) -> Tuple[List[Tuple[str, nn.Module]], List[Dict[str, Any]]]: """Create ResNet stages with specified block configurations. Args: block_fns: Block class to use for each stage. channels: Number of channels for each stage. block_repeats: Number of blocks to repeat for each stage. inplanes: Number of input channels. reduce_first: Reduction factor for first convolution in each stage. output_stride: Target output stride of network. down_kernel_size: Kernel size for downsample layers. avg_down: Use average pooling for downsample. drop_block_rate: DropBlock drop rate. drop_path_rate: Drop path rate for stochastic depth. **kwargs: Additional arguments passed to block constructors. Returns: Tuple of stage modules list and feature info list. """ stages = [] feature_info = [] net_num_blocks = sum(block_repeats) net_block_idx = 0 net_stride = 4 dilation = prev_dilation = 1 for stage_idx, (block_fn, planes, num_blocks, db) in enumerate(zip(block_fns, channels, block_repeats, drop_blocks(drop_block_rate))): stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it stride = 1 if stage_idx == 0 else 2 if net_stride >= output_stride: dilation *= stride stride = 1 else: net_stride *= stride downsample = None if stride != 1 or inplanes != planes * block_fn.expansion: down_kwargs = dict( in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size, stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer'), ) downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs) block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs) blocks = [] for block_idx in range(num_blocks): downsample = downsample if block_idx == 0 else None stride = stride if block_idx == 0 else 1 block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule blocks.append(block_fn( inplanes, planes, stride, downsample, first_dilation=prev_dilation, drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs, )) prev_dilation = dilation inplanes = planes * block_fn.expansion net_block_idx += 1 stages.append((stage_name, nn.Sequential(*blocks))) feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name)) return stages, feature_info class ResNet(nn.Module): """ResNet / ResNeXt / SE-ResNeXt / SE-Net This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that * have > 1 stride in the 3x3 conv layer of bottleneck * have conv-bn-act ordering This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default. ResNet variants (the same modifications can be used in SE/ResNeXt models as well): * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample ResNeXt * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths * same c,d, e, s variants as ResNet can be enabled SE-ResNeXt * normal - 7x7 stem, stem_width = 64 * same c, d, e, s variants as ResNet can be enabled SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block """ def __init__( self, block: Union[BasicBlock, Bottleneck], layers: Tuple[int, ...], num_classes: int = 1000, in_chans: int = 3, output_stride: int = 32, global_pool: str = 'avg', cardinality: int = 1, base_width: int = 64, stem_width: int = 64, stem_type: str = '', replace_stem_pool: bool = False, block_reduce_first: int = 1, down_kernel_size: int = 1, avg_down: bool = False, channels: Optional[Tuple[int, ...]] = (64, 128, 256, 512), act_layer: LayerType = nn.ReLU, norm_layer: LayerType = nn.BatchNorm2d, aa_layer: Optional[Type[nn.Module]] = None, drop_rate: float = 0.0, drop_path_rate: float = 0., drop_block_rate: float = 0., zero_init_last: bool = True, block_args: Optional[Dict[str, Any]] = None, ): """ Args: block (nn.Module): class for the residual block. Options are BasicBlock, Bottleneck. layers (List[int]) : number of layers in each block num_classes (int): number of classification classes (default 1000) in_chans (int): number of input (color) channels. (default 3) output_stride (int): output stride of the network, 32, 16, or 8. (default 32) global_pool (str): Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg') cardinality (int): number of convolution groups for 3x3 conv in Bottleneck. (default 1) base_width (int): bottleneck channels factor. `planes * base_width / 64 * cardinality` (default 64) stem_width (int): number of channels in stem convolutions (default 64) stem_type (str): The type of stem (default ''): * '', default - a single 7x7 conv with a width of stem_width * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2 * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2 replace_stem_pool (bool): replace stem max-pooling layer with a 3x3 stride-2 convolution block_reduce_first (int): Reduction factor for first convolution output width of residual blocks, 1 for all archs except senets, where 2 (default 1) down_kernel_size (int): kernel size of residual block downsample path, 1x1 for most, 3x3 for senets (default: 1) avg_down (bool): use avg pooling for projection skip connection between stages/downsample (default False) act_layer (str, nn.Module): activation layer norm_layer (str, nn.Module): normalization layer aa_layer (nn.Module): anti-aliasing layer drop_rate (float): Dropout probability before classifier, for training (default 0.) drop_path_rate (float): Stochastic depth drop-path rate (default 0.) drop_block_rate (float): Drop block rate (default 0.) zero_init_last (bool): zero-init the last weight in residual path (usually last BN affine weight) block_args (dict): Extra kwargs to pass through to block module """ super(ResNet, self).__init__() block_args = block_args or dict() assert output_stride in (8, 16, 32) self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False act_layer = get_act_layer(act_layer) norm_layer = get_norm_layer(norm_layer) # Stem deep_stem = 'deep' in stem_type inplanes = stem_width * 2 if deep_stem else 64 if deep_stem: stem_chs = (stem_width, stem_width) if 'tiered' in stem_type: stem_chs = (3 * (stem_width // 4), stem_width) self.conv1 = nn.Sequential(*[ nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), norm_layer(stem_chs[0]), act_layer(inplace=True), nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), norm_layer(stem_chs[1]), act_layer(inplace=True), nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)]) else: self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(inplanes) self.act1 = act_layer(inplace=True) self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')] # Stem pooling. The name 'maxpool' remains for weight compatibility. if replace_stem_pool: self.maxpool = nn.Sequential(*filter(None, [ nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False), create_aa(aa_layer, channels=inplanes, stride=2) if aa_layer is not None else None, norm_layer(inplanes), act_layer(inplace=True), ])) else: if aa_layer is not None: if issubclass(aa_layer, nn.AvgPool2d): self.maxpool = aa_layer(2) else: self.maxpool = nn.Sequential(*[ nn.MaxPool2d(kernel_size=3, stride=1, padding=1), aa_layer(channels=inplanes, stride=2)]) else: self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # Feature Blocks block_fns = to_ntuple(len(channels))(block) stage_modules, stage_feature_info = make_blocks( block_fns, channels, layers, inplanes, cardinality=cardinality, base_width=base_width, output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down, down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args, ) for stage in stage_modules: self.add_module(*stage) # layer1, layer2, etc self.feature_info.extend(stage_feature_info) # Head (Pooling and Classifier) self.num_features = self.head_hidden_size = channels[-1] * block_fns[-1].expansion self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.init_weights(zero_init_last=zero_init_last) @torch.jit.ignore def init_weights(self, zero_init_last: bool = True) -> None: """Initialize model weights. Args: zero_init_last: Zero-initialize the last BN in each residual branch. """ for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if zero_init_last: for m in self.modules(): if hasattr(m, 'zero_init_last'): m.zero_init_last() @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, str]: """Create regex patterns for parameter grouping. Args: coarse: Use coarse (stage-level) or fine (block-level) grouping. Returns: Dictionary mapping group names to regex patterns. """ matcher = dict(stem=r'^conv1|bn1|maxpool', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing. Args: enable: Whether to enable gradient checkpointing. """ self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self, name_only: bool = False) -> Union[str, nn.Module]: """Get the classifier module. Args: name_only: Return classifier module name instead of module. Returns: Classifier module or name. """ return 'fc' if name_only else self.fc def reset_classifier(self, num_classes: int, global_pool: str = 'avg') -> None: """Reset the classifier head. Args: num_classes: Number of classes for new classifier. global_pool: Global pooling type. """ self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """Forward features that returns intermediates. Args: x: Input image tensor. indices: Take last n blocks if int, all if None, select matching indices if sequence. norm: Apply norm layer to compatible intermediates. stop_early: Stop iterating over blocks when last desired intermediate hit. output_fmt: Shape of intermediate feature outputs. intermediates_only: Only return intermediate features. Returns: Features and list of intermediate features or just intermediate features. """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(5, indices) # forward pass feat_idx = 0 x = self.conv1(x) x = self.bn1(x) x = self.act1(x) if feat_idx in take_indices: intermediates.append(x) x = self.maxpool(x) layer_names = ('layer1', 'layer2', 'layer3', 'layer4') if stop_early: layer_names = layer_names[:max_index] for n in layer_names: feat_idx += 1 x = getattr(self, n)(x) # won't work with torchscript, but keeps code reasonable, FML if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ) -> List[int]: """Prune layers not required for specified intermediates. Args: indices: Indices of intermediate layers to keep. prune_norm: Whether to prune normalization layers. prune_head: Whether to prune the classifier head. Returns: List of indices that were kept. """ take_indices, max_index = feature_take_indices(5, indices) layer_names = ('layer1', 'layer2', 'layer3', 'layer4') layer_names = layer_names[max_index:] for n in layer_names: setattr(self, n, nn.Identity()) if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x: torch.Tensor) -> torch.Tensor: """Forward pass through feature extraction layers.""" x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.maxpool(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq([self.layer1, self.layer2, self.layer3, self.layer4], x, flatten=True) else: x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: """Forward pass through classifier head. Args: x: Feature tensor. pre_logits: Return features before final classifier layer. Returns: Output tensor. """ x = self.global_pool(x) if self.drop_rate: x = F.dropout(x, p=float(self.drop_rate), training=self.training) return x if pre_logits else self.fc(x) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass.""" x = self.forward_features(x) x = self.forward_head(x) return x def _create_resnet(variant: str, pretrained: bool = False, **kwargs) -> ResNet: """Create a ResNet model. Args: variant: Model variant name. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: ResNet model instance. """ return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) def _cfg(url: str = '', **kwargs) -> Dict[str, Any]: """Create a default configuration for ResNet models.""" return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs } def _tcfg(url: str = '', **kwargs) -> Dict[str, Any]: """Create a configuration with bicubic interpolation.""" return _cfg(url=url, **dict({'interpolation': 'bicubic'}, **kwargs)) def _ttcfg(url: str = '', **kwargs) -> Dict[str, Any]: """Create a configuration for models trained with timm.""" return _cfg(url=url, **dict({ 'interpolation': 'bicubic', 'test_input_size': (3, 288, 288), 'test_crop_pct': 0.95, 'origin_url': 'https://github.com/huggingface/pytorch-image-models', }, **kwargs)) def _rcfg(url: str = '', **kwargs) -> Dict[str, Any]: """Create a configuration for ResNet-RS models.""" return _cfg(url=url, **dict({ 'interpolation': 'bicubic', 'crop_pct': 0.95, 'test_input_size': (3, 288, 288), 'test_crop_pct': 1.0, 'origin_url': 'https://github.com/huggingface/pytorch-image-models', 'paper_ids': 'arXiv:2110.00476' }, **kwargs)) def _r3cfg(url: str = '', **kwargs) -> Dict[str, Any]: """Create a configuration for ResNet-RS models with 160x160 input.""" return _cfg(url=url, **dict({ 'interpolation': 'bicubic', 'input_size': (3, 160, 160), 'pool_size': (5, 5), 'crop_pct': 0.95, 'test_input_size': (3, 224, 224), 'test_crop_pct': 0.95, 'origin_url': 'https://github.com/huggingface/pytorch-image-models', 'paper_ids': 'arXiv:2110.00476', }, **kwargs)) def _gcfg(url: str = '', **kwargs) -> Dict[str, Any]: """Create a configuration for Gluon pretrained models.""" return _cfg(url=url, **dict({ 'interpolation': 'bicubic', 'origin_url': 'https://cv.gluon.ai/model_zoo/classification.html', }, **kwargs)) default_cfgs = generate_default_cfgs({ # ResNet and Wide ResNet trained w/ timm (RSB paper and others) 'resnet10t.c3_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet10t_176_c3-f3215ab1.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_crop_pct=0.95, test_input_size=(3, 224, 224), first_conv='conv1.0'), 'resnet14t.c3_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet14t_176_c3-c4ed2c37.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_crop_pct=0.95, test_input_size=(3, 224, 224), first_conv='conv1.0'), 'resnet18.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a1_0-d63eafa0.pth'), 'resnet18.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a2_0-b61bd467.pth'), 'resnet18.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a3_0-40c531c8.pth'), 'resnet18d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth', first_conv='conv1.0'), 'resnet18d.ra4_e3600_r224_in1k': _rcfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.9, first_conv='conv1.0'), 'resnet34.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a1_0-46f8f793.pth'), 'resnet34.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a2_0-82d47d71.pth'), 'resnet34.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a3_0-a20cabb6.pth', crop_pct=0.95), 'resnet34.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'), 'resnet34.ra4_e3600_r224_in1k': _rcfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.9), 'resnet34d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth', first_conv='conv1.0'), 'resnet26.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth'), 'resnet26d.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth', first_conv='conv1.0'), 'resnet26t.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'resnet50.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth'), 'resnet50.a1h_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth', input_size=(3, 176, 176), pool_size=(6, 6), crop_pct=0.9, test_input_size=(3, 224, 224), test_crop_pct=1.0), 'resnet50.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a2_0-a2746f79.pth'), 'resnet50.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a3_0-59cae1ef.pth'), 'resnet50.b1k_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_b1k-532a802a.pth'), 'resnet50.b2k_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_b2k-1ba180c1.pth'), 'resnet50.c1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_c1-5ba5e060.pth'), 'resnet50.c2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_c2-d01e05b2.pth'), 'resnet50.d_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_d-f39db8af.pth'), 'resnet50.ram_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth'), 'resnet50.am_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_am-6c502b37.pth'), 'resnet50.ra_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_ra-85ebb6e5.pth'), 'resnet50.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/rw_resnet50-86acaeed.pth'), 'resnet50d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth', first_conv='conv1.0'), 'resnet50d.ra4_e3600_r224_in1k': _rcfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0, first_conv='conv1.0'), 'resnet50d.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a1_0-e20cff14.pth', first_conv='conv1.0'), 'resnet50d.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a2_0-a3adc64d.pth', first_conv='conv1.0'), 'resnet50d.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a3_0-403fdfad.pth', first_conv='conv1.0'), 'resnet50t.untrained': _ttcfg(first_conv='conv1.0'), 'resnet101.a1h_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1h-36d3f2aa.pth'), 'resnet101.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1_0-cdcb52a9.pth'), 'resnet101.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a2_0-6edb36c7.pth'), 'resnet101.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a3_0-1db14157.pth'), 'resnet101d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320)), 'resnet152.a1h_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1h-dc400468.pth'), 'resnet152.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1_0-2eee8a7a.pth'), 'resnet152.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a2_0-b4c6978f.pth'), 'resnet152.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a3_0-134d4688.pth'), 'resnet152d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320)), 'resnet200.untrained': _ttcfg(), 'resnet200d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320)), 'wide_resnet50_2.racm_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth'), # torchvision resnet weights 'resnet18.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet18-f37072fd.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet34.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet34-b627a593.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet50.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet50-0676ba61.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet50.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet50-11ad3fa6.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet101.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet101-63fe2227.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet101.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet101-cd907fc2.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet152.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet152-394f9c45.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet152.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet152-f82ba261.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet50_2.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet50_2.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet50_2-9ba9bcbe.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet101_2.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet101_2.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet101_2-d733dc28.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), # ResNets w/ alternative norm layers 'resnet50_gn.a1h_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_gn_a1h2-8fe6c4d0.pth', crop_pct=0.94), # ResNeXt trained in timm (RSB paper and others) 'resnext50_32x4d.a1h_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1h-0146ab0a.pth'), 'resnext50_32x4d.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1_0-b5a91a1d.pth'), 'resnext50_32x4d.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a2_0-efc76add.pth'), 'resnext50_32x4d.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a3_0-3e450271.pth'), 'resnext50_32x4d.ra_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth'), 'resnext50d_32x4d.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth', first_conv='conv1.0'), 'resnext101_32x4d.untrained': _ttcfg(), 'resnext101_64x4d.c1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnext101_64x4d_c-0d0e0cc0.pth'), # torchvision ResNeXt weights 'resnext50_32x4d.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext101_32x8d.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext101_64x4d.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext101_64x4d-173b62eb.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext50_32x4d.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext50_32x4d-1a0047aa.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext101_32x8d.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext101_32x8d-110c445d.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags # from https://github.com/facebookresearch/WSL-Images # Please note the CC-BY-NC 4.0 license on these weights, non-commercial use only. 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), 'resnext101_32x16d.fb_wsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), 'resnext101_32x32d.fb_wsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), 'resnext101_32x48d.fb_wsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. 'resnet18.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnet50.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. 'resnet18.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnet50.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext50_32x4d.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x4d.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x8d.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x16d.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), # Efficient Channel Attention ResNets 'ecaresnet26t.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), test_crop_pct=0.95, test_input_size=(3, 320, 320)), 'ecaresnetlight.miil_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnetlight-75a9c627.pth', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet50d.miil_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d-93c81e3b.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet50d_pruned.miil_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d_p-e4fa23c2.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet50t.ra2_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), test_crop_pct=0.95, test_input_size=(3, 320, 320)), 'ecaresnet50t.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a1_0-99bd76a8.pth', first_conv='conv1.0'), 'ecaresnet50t.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a2_0-b1c7b745.pth', first_conv='conv1.0'), 'ecaresnet50t.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a3_0-8cc311f1.pth', first_conv='conv1.0'), 'ecaresnet101d.miil_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d-153dad65.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet101d_pruned.miil_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d_p-9e74cb91.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet200d.untrained': _ttcfg( first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.95, pool_size=(8, 8)), 'ecaresnet269d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 352, 352)), # Efficient Channel Attention ResNeXts 'ecaresnext26t_32x4d.untrained': _tcfg(first_conv='conv1.0'), 'ecaresnext50t_32x4d.untrained': _tcfg(first_conv='conv1.0'), # Squeeze-Excitation ResNets, to eventually replace the models in senet.py 'seresnet18.untrained': _ttcfg(), 'seresnet34.untrained': _ttcfg(), 'seresnet50.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a1_0-ffa00869.pth', crop_pct=0.95), 'seresnet50.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a2_0-850de0d9.pth', crop_pct=0.95), 'seresnet50.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a3_0-317ecd56.pth', crop_pct=0.95), 'seresnet50.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth'), 'seresnet50t.untrained': _ttcfg( first_conv='conv1.0'), 'seresnet101.untrained': _ttcfg(), 'seresnet152.untrained': _ttcfg(), 'seresnet152d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320) ), 'seresnet200d.untrained': _ttcfg( first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), 'seresnet269d.untrained': _ttcfg( first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py 'seresnext26d_32x4d.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth', first_conv='conv1.0'), 'seresnext26t_32x4d.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth', first_conv='conv1.0'), 'seresnext50_32x4d.racm_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth'), 'seresnext101_32x4d.untrained': _ttcfg(), 'seresnext101_32x8d.ah_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101_32x8d_ah-e6bc4c0a.pth'), 'seresnext101d_32x8d.ah_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101d_32x8d_ah-191d7b94.pth', first_conv='conv1.0'), # ResNets with anti-aliasing / blur pool 'resnetaa50d.sw_in12k_ft_in1k': _ttcfg( hf_hub_id='timm/', first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetaa101d.sw_in12k_ft_in1k': _ttcfg( hf_hub_id='timm/', first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'seresnextaa101d_32x8d.sw_in12k_ft_in1k_288': _ttcfg( hf_hub_id='timm/', crop_pct=0.95, input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), test_crop_pct=1.0, first_conv='conv1.0'), 'seresnextaa101d_32x8d.sw_in12k_ft_in1k': _ttcfg( hf_hub_id='timm/', first_conv='conv1.0', test_crop_pct=1.0), 'seresnextaa201d_32x8d.sw_in12k_ft_in1k_384': _cfg( hf_hub_id='timm/', interpolation='bicubic', first_conv='conv1.0', pool_size=(12, 12), input_size=(3, 384, 384), crop_pct=1.0), 'seresnextaa201d_32x8d.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, interpolation='bicubic', first_conv='conv1.0', crop_pct=0.95, input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), test_crop_pct=1.0), 'resnetaa50d.sw_in12k': _ttcfg( hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetaa50d.d_in12k': _ttcfg( hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetaa101d.sw_in12k': _ttcfg( hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'seresnextaa101d_32x8d.sw_in12k': _ttcfg( hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetblur18.untrained': _ttcfg(), 'resnetblur50.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth'), 'resnetblur50d.untrained': _ttcfg(first_conv='conv1.0'), 'resnetblur101d.untrained': _ttcfg(first_conv='conv1.0'), 'resnetaa34d.untrained': _ttcfg(first_conv='conv1.0'), 'resnetaa50.a1h_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetaa50_a1h-4cf422b3.pth'), 'seresnetaa50d.untrained': _ttcfg(first_conv='conv1.0'), 'seresnextaa101d_32x8d.ah_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnextaa101d_32x8d_ah-83c8ae12.pth', first_conv='conv1.0'), # ResNet-RS models 'resnetrs50.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth', input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs101.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth', input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs152.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs200.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnetrs200_c-6b698b88.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs270.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs350.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs420.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416), interpolation='bicubic', first_conv='conv1.0'), # gluon resnet weights 'resnet18.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth'), 'resnet34.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth'), 'resnet50.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth'), 'resnet101.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth'), 'resnet152.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth'), 'resnet50c.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth', first_conv='conv1.0'), 'resnet101c.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth', first_conv='conv1.0'), 'resnet152c.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth', first_conv='conv1.0'), 'resnet50d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth', first_conv='conv1.0'), 'resnet101d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth', first_conv='conv1.0'), 'resnet152d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth', first_conv='conv1.0'), 'resnet50s.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth', first_conv='conv1.0'), 'resnet101s.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth', first_conv='conv1.0'), 'resnet152s.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth', first_conv='conv1.0'), 'resnext50_32x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth'), 'resnext101_32x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth'), 'resnext101_64x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth'), 'seresnext50_32x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth'), 'seresnext101_32x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth'), 'seresnext101_64x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth'), 'senet154.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth', first_conv='conv1.0'), 'test_resnet.r160_in1k': _cfg( hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), crop_pct=0.95, input_size=(3, 160, 160), pool_size=(5, 5), first_conv='conv1.0'), }) @register_model def resnet10t(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-10-T model. """ model_args = dict(block=BasicBlock, layers=(1, 1, 1, 1), stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet10t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet14t(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-14-T model. """ model_args = dict(block=Bottleneck, layers=(1, 1, 1, 1), stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet14t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet18(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-18 model. """ model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2)) return _create_resnet('resnet18', pretrained, **dict(model_args, **kwargs)) @register_model def resnet18d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-18-D model. """ model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet18d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet34(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-34 model. """ model_args = dict(block=BasicBlock, layers=(3, 4, 6, 3)) return _create_resnet('resnet34', pretrained, **dict(model_args, **kwargs)) @register_model def resnet34d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-34-D model. """ model_args = dict(block=BasicBlock, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet34d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet26(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-26 model. """ model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2)) return _create_resnet('resnet26', pretrained, **dict(model_args, **kwargs)) @register_model def resnet26t(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-26-T model. """ model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet26t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet26d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-26-D model. """ model_args = dict(block=Bottleneck, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet26d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50 model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3)) return _create_resnet('resnet50', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50c(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50-C model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep') return _create_resnet('resnet50c', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50-D model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet50d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50s(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50-S model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=64, stem_type='deep') return _create_resnet('resnet50s', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50t(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50-T model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet50t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-101 model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3)) return _create_resnet('resnet101', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101c(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-101-C model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep') return _create_resnet('resnet101c', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-101-D model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet101d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101s(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-101-S model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), stem_width=64, stem_type='deep') return _create_resnet('resnet101s', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-152 model. """ model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3)) return _create_resnet('resnet152', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152c(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-152-C model. """ model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep') return _create_resnet('resnet152c', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-152-D model. """ model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet152d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152s(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-152-S model. """ model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), stem_width=64, stem_type='deep') return _create_resnet('resnet152s', pretrained, **dict(model_args, **kwargs)) @register_model def resnet200(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-200 model. """ model_args = dict(block=Bottleneck, layers=(3, 24, 36, 3)) return _create_resnet('resnet200', pretrained, **dict(model_args, **kwargs)) @register_model def resnet200d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-200-D model. """ model_args = dict(block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet200d', pretrained, **dict(model_args, **kwargs)) @register_model def wide_resnet50_2(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a Wide ResNet-50-2 model. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. """ model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), base_width=128) return _create_resnet('wide_resnet50_2', pretrained, **dict(model_args, **kwargs)) @register_model def wide_resnet101_2(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a Wide ResNet-101-2 model. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same. """ model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), base_width=128) return _create_resnet('wide_resnet101_2', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50_gn(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50 model w/ GroupNorm """ model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), norm_layer='groupnorm') return _create_resnet('resnet50_gn', pretrained, **dict(model_args, **kwargs)) @register_model def resnext50_32x4d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNeXt50-32x4d model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), cardinality=32, base_width=4) return _create_resnet('resnext50_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext50d_32x4d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample """ model_args = dict( block=Bottleneck, layers=(3, 4, 6, 3), cardinality=32, base_width=4, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnext50d_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x4d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNeXt-101 32x4d model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=4) return _create_resnet('resnext101_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x8d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNeXt-101 32x8d model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8) return _create_resnet('resnext101_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x16d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNeXt-101 32x16d model """ model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=16) return _create_resnet('resnext101_32x16d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x32d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNeXt-101 32x32d model """ model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=32) return _create_resnet('resnext101_32x32d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_64x4d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNeXt101-64x4d model. """ model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), cardinality=64, base_width=4) return _create_resnet('resnext101_64x4d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet26t(pretrained: bool = False, **kwargs) -> ResNet: """Constructs an ECA-ResNeXt-26-T model. This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. """ model_args = dict( block=Bottleneck, layers=(2, 2, 2, 2), stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet26t', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet50d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50-D model with eca. """ model_args = dict( block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet50d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet50d_pruned(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50-D model pruned with eca. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ model_args = dict( block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **dict(model_args, **kwargs)) @register_model def ecaresnet50t(pretrained: bool = False, **kwargs) -> ResNet: """Constructs an ECA-ResNet-50-T model. Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. """ model_args = dict( block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet50t', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnetlight(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50-D light model with eca. """ model_args = dict( block=Bottleneck, layers=(1, 1, 11, 3), stem_width=32, avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnetlight', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet101d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-101-D model with eca. """ model_args = dict( block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet101d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet101d_pruned(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-101-D model pruned with eca. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ model_args = dict( block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **dict(model_args, **kwargs)) @register_model def ecaresnet200d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-200-D model with ECA. """ model_args = dict( block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet200d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet269d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-269-D model with ECA. """ model_args = dict( block=Bottleneck, layers=(3, 30, 48, 8), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet269d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnext26t_32x4d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs an ECA-ResNeXt-26-T model. This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem. This model replaces SE module with the ECA module """ model_args = dict( block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnext26t_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnext50t_32x4d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs an ECA-ResNeXt-50-T model. This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem. This model replaces SE module with the ECA module """ model_args = dict( block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnext50t_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet18(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2), block_args=dict(attn_layer='se')) return _create_resnet('seresnet18', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet34(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=(3, 4, 6, 3), block_args=dict(attn_layer='se')) return _create_resnet('seresnet34', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet50(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), block_args=dict(attn_layer='se')) return _create_resnet('seresnet50', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet50t(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet50t', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet101(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 4, 23, 3), block_args=dict(attn_layer='se')) return _create_resnet('seresnet101', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet152(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=(3, 8, 36, 3), block_args=dict(attn_layer='se')) return _create_resnet('seresnet152', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet152d(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet152d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet200d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-200-D model with SE attn. """ model_args = dict( block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet200d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet269d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-269-D model with SE attn. """ model_args = dict( block=Bottleneck, layers=(3, 30, 48, 8), stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet269d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext26d_32x4d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a SE-ResNeXt-26-D model.` This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for combination of deep stem and avg_pool in downsample. """ model_args = dict( block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnext26d_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext26t_32x4d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a SE-ResNet-26-T model. This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem. """ model_args = dict( block=Bottleneck, layers=(2, 2, 2, 2), cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnext26t_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext50_32x4d(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=(3, 4, 6, 3), cardinality=32, base_width=4, block_args=dict(attn_layer='se')) return _create_resnet('seresnext50_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101_32x4d(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=4, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101_32x8d(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101d_32x8d(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8, stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101d_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101_64x4d(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=(3, 4, 23, 3), cardinality=64, base_width=4, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101_64x4d', pretrained, **dict(model_args, **kwargs)) @register_model def senet154(pretrained: bool = False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=(3, 8, 36, 3), cardinality=64, base_width=4, stem_type='deep', down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se')) return _create_resnet('senet154', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur18(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-18 model with blur anti-aliasing """ model_args = dict(block=BasicBlock, layers=(2, 2, 2, 2), aa_layer=BlurPool2d) return _create_resnet('resnetblur18', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur50(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50 model with blur anti-aliasing """ model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=BlurPool2d) return _create_resnet('resnetblur50', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur50d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50-D model with blur anti-aliasing """ model_args = dict( block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=BlurPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetblur50d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur101d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-101-D model with blur anti-aliasing """ model_args = dict( block=Bottleneck, layers=(3, 4, 23, 3), aa_layer=BlurPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetblur101d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa34d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-34-D model w/ avgpool anti-aliasing """ model_args = dict( block=BasicBlock, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetaa34d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa50(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50 model with avgpool anti-aliasing """ model_args = dict(block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d) return _create_resnet('resnetaa50', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa50d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-50-D model with avgpool anti-aliasing """ model_args = dict( block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetaa50d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa101d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-101-D model with avgpool anti-aliasing """ model_args = dict( block=Bottleneck, layers=(3, 4, 23, 3), aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetaa101d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnetaa50d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a SE=ResNet-50-D model with avgpool anti-aliasing """ model_args = dict( block=Bottleneck, layers=(3, 4, 6, 3), aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnetaa50d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnextaa101d_32x8d(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing """ model_args = dict( block=Bottleneck, layers=(3, 4, 23, 3), cardinality=32, base_width=8, stem_width=32, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d, block_args=dict(attn_layer='se')) return _create_resnet('seresnextaa101d_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnextaa201d_32x8d(pretrained: bool = False, **kwargs): """Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing """ model_args = dict( block=Bottleneck, layers=(3, 24, 36, 4), cardinality=32, base_width=8, stem_width=64, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d, block_args=dict(attn_layer='se')) return _create_resnet('seresnextaa201d_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs50(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-RS-50 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=(3, 4, 6, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs50', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs101(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-RS-101 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=(3, 4, 23, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs101', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs152(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-RS-152 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=(3, 8, 36, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs152', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs200(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-RS-200 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=(3, 24, 36, 3), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs200', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs270(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-RS-270 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=(4, 29, 53, 4), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs270', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs350(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-RS-350 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=(4, 36, 72, 4), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs350', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs420(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a ResNet-RS-420 model Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=(4, 44, 87, 4), stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs420', pretrained, **dict(model_args, **kwargs)) @register_model def test_resnet(pretrained: bool = False, **kwargs) -> ResNet: """Constructs a tiny ResNet test model. """ model_args = dict( block=[BasicBlock, BasicBlock, Bottleneck, BasicBlock], layers=(1, 1, 1, 1), stem_width=16, stem_type='deep', avg_down=True, channels=(32, 48, 48, 96)) return _create_resnet('test_resnet', pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, { 'tv_resnet34': 'resnet34.tv_in1k', 'tv_resnet50': 'resnet50.tv_in1k', 'tv_resnet101': 'resnet101.tv_in1k', 'tv_resnet152': 'resnet152.tv_in1k', 'tv_resnext50_32x4d' : 'resnext50_32x4d.tv_in1k', 'ig_resnext101_32x8d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ig_resnext101_32x16d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ig_resnext101_32x32d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ig_resnext101_32x48d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ssl_resnet18': 'resnet18.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnet50': 'resnet50.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext50_32x4d': 'resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext101_32x4d': 'resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext101_32x8d': 'resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext101_32x16d': 'resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k', 'swsl_resnet18': 'resnet18.fb_swsl_ig1b_ft_in1k', 'swsl_resnet50': 'resnet50.fb_swsl_ig1b_ft_in1k', 'swsl_resnext50_32x4d': 'resnext50_32x4d.fb_swsl_ig1b_ft_in1k', 'swsl_resnext101_32x4d': 'resnext101_32x4d.fb_swsl_ig1b_ft_in1k', 'swsl_resnext101_32x8d': 'resnext101_32x8d.fb_swsl_ig1b_ft_in1k', 'swsl_resnext101_32x16d': 'resnext101_32x16d.fb_swsl_ig1b_ft_in1k', 'gluon_resnet18_v1b': 'resnet18.gluon_in1k', 'gluon_resnet34_v1b': 'resnet34.gluon_in1k', 'gluon_resnet50_v1b': 'resnet50.gluon_in1k', 'gluon_resnet101_v1b': 'resnet101.gluon_in1k', 'gluon_resnet152_v1b': 'resnet152.gluon_in1k', 'gluon_resnet50_v1c': 'resnet50c.gluon_in1k', 'gluon_resnet101_v1c': 'resnet101c.gluon_in1k', 'gluon_resnet152_v1c': 'resnet152c.gluon_in1k', 'gluon_resnet50_v1d': 'resnet50d.gluon_in1k', 'gluon_resnet101_v1d': 'resnet101d.gluon_in1k', 'gluon_resnet152_v1d': 'resnet152d.gluon_in1k', 'gluon_resnet50_v1s': 'resnet50s.gluon_in1k', 'gluon_resnet101_v1s': 'resnet101s.gluon_in1k', 'gluon_resnet152_v1s': 'resnet152s.gluon_in1k', 'gluon_resnext50_32x4d': 'resnext50_32x4d.gluon_in1k', 'gluon_resnext101_32x4d': 'resnext101_32x4d.gluon_in1k', 'gluon_resnext101_64x4d': 'resnext101_64x4d.gluon_in1k', 'gluon_seresnext50_32x4d': 'seresnext50_32x4d.gluon_in1k', 'gluon_seresnext101_32x4d': 'seresnext101_32x4d.gluon_in1k', 'gluon_seresnext101_64x4d': 'seresnext101_64x4d.gluon_in1k', 'gluon_senet154': 'senet154.gluon_in1k', 'seresnext26tn_32x4d': 'seresnext26t_32x4d', })
pytorch-image-models/timm/models/resnet.py/0
{ "file_path": "pytorch-image-models/timm/models/resnet.py", "repo_id": "pytorch-image-models", "token_count": 47348 }
275
""" Twins A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers` - https://arxiv.org/pdf/2104.13840.pdf Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below """ # -------------------------------------------------------- # Twins # Copyright (c) 2021 Meituan # Licensed under The Apache 2.0 License [see LICENSE for details] # Written by Xinjie Li, Xiangxiang Chu # -------------------------------------------------------- import math from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, to_2tuple, trunc_normal_, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs from .vision_transformer import Attention __all__ = ['Twins'] # model_registry will add each entrypoint fn to this Size_ = Tuple[int, int] @register_notrace_module # reason: FX can't symbolically trace control flow in forward method class LocallyGroupedAttn(nn.Module): """ LSA: self attention within a group """ fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1): assert ws != 1 super(LocallyGroupedAttn, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=True) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.ws = ws def forward(self, x, size: Size_): # There are two implementations for this function, zero padding or mask. We don't observe obvious difference for # both. You can choose any one, we recommend forward_padding because it's neat. However, # the masking implementation is more reasonable and accurate. B, N, C = x.shape H, W = size x = x.view(B, H, W, C) pad_l = pad_t = 0 pad_r = (self.ws - W % self.ws) % self.ws pad_b = (self.ws - H % self.ws) % self.ws x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) _, Hp, Wp, _ = x.shape _h, _w = Hp // self.ws, Wp // self.ws x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) qkv = self.qkv(x).reshape( B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) q, k, v = qkv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) x = x.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) if pad_r > 0 or pad_b > 0: x = x[:, :H, :W, :].contiguous() x = x.reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x # def forward_mask(self, x, size: Size_): # B, N, C = x.shape # H, W = size # x = x.view(B, H, W, C) # pad_l = pad_t = 0 # pad_r = (self.ws - W % self.ws) % self.ws # pad_b = (self.ws - H % self.ws) % self.ws # x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) # _, Hp, Wp, _ = x.shape # _h, _w = Hp // self.ws, Wp // self.ws # mask = torch.zeros((1, Hp, Wp), device=x.device) # mask[:, -pad_b:, :].fill_(1) # mask[:, :, -pad_r:].fill_(1) # # x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C # mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws) # attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws # attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0)) # qkv = self.qkv(x).reshape( # B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) # # n_h, B, _w*_h, nhead, ws*ws, dim # q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head # attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws # attn = attn + attn_mask.unsqueeze(2) # attn = attn.softmax(dim=-1) # attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head # attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) # x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) # if pad_r > 0 or pad_b > 0: # x = x[:, :H, :W, :].contiguous() # x = x.reshape(B, N, C) # x = self.proj(x) # x = self.proj_drop(x) # return x class GlobalSubSampleAttn(nn.Module): """ GSA: using a key to summarize the information for a group to be efficient. """ fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1): super().__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, dim, bias=True) self.kv = nn.Linear(dim, dim * 2, bias=True) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.sr_ratio = sr_ratio if sr_ratio > 1: self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) self.norm = nn.LayerNorm(dim) else: self.sr = None self.norm = None def forward(self, x, size: Size_): B, N, C = x.shape q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) if self.sr is not None: x = x.permute(0, 2, 1).reshape(B, C, *size) x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) x = self.norm(x) kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) k, v = kv.unbind(0) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None, ): super().__init__() self.norm1 = norm_layer(dim) if ws is None: self.attn = Attention(dim, num_heads, False, None, attn_drop, proj_drop) elif ws == 1: self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, proj_drop, sr_ratio) else: self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, proj_drop, ws) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, size: Size_): x = x + self.drop_path1(self.attn(self.norm1(x), size)) x = x + self.drop_path2(self.mlp(self.norm2(x))) return x class PosConv(nn.Module): # PEG from https://arxiv.org/abs/2102.10882 def __init__(self, in_chans, embed_dim=768, stride=1): super(PosConv, self).__init__() self.proj = nn.Sequential( nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), ) self.stride = stride def forward(self, x, size: Size_): B, N, C = x.shape cnn_feat_token = x.transpose(1, 2).view(B, C, *size) x = self.proj(cnn_feat_token) if self.stride == 1: x += cnn_feat_token x = x.flatten(2).transpose(1, 2) return x def no_weight_decay(self): return ['proj.%d.weight' % i for i in range(4)] class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \ f"img_size {img_size} should be divided by patch_size {patch_size}." self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] self.num_patches = self.H * self.W self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = nn.LayerNorm(embed_dim) def forward(self, x) -> Tuple[torch.Tensor, Size_]: B, C, H, W = x.shape x = self.proj(x).flatten(2).transpose(1, 2) x = self.norm(x) out_size = (H // self.patch_size[0], W // self.patch_size[1]) return x, out_size class Twins(nn.Module): """ Twins Vision Transformer (Revisiting Spatial Attention) Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git """ def __init__( self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, global_pool='avg', embed_dims=(64, 128, 256, 512), num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), wss=None, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), block_cls=Block, ): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.depths = depths self.embed_dims = embed_dims self.num_features = self.head_hidden_size = embed_dims[-1] self.grad_checkpointing = False img_size = to_2tuple(img_size) prev_chs = in_chans self.patch_embeds = nn.ModuleList() self.pos_drops = nn.ModuleList() for i in range(len(depths)): self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i])) self.pos_drops.append(nn.Dropout(p=pos_drop_rate)) prev_chs = embed_dims[i] img_size = tuple(t // patch_size for t in img_size) patch_size = 2 self.blocks = nn.ModuleList() self.feature_info = [] dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule cur = 0 for k in range(len(depths)): _block = nn.ModuleList([block_cls( dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k], ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])], ) self.blocks.append(_block) self.feature_info += [dict(module=f'block.{k}', num_chs=embed_dims[k], reduction=2**(2+k))] cur += depths[k] self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims]) self.norm = norm_layer(self.num_features) # classification head self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() # init weights self.apply(self._init_weights) @torch.jit.ignore def no_weight_decay(self): return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()]) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embeds.0', # stem and embed blocks=[ (r'^(?:blocks|patch_embeds|pos_block)\.(\d+)', None), ('^norm', (99999,)) ] if coarse else [ (r'^blocks\.(\d+)\.(\d+)', None), (r'^(?:patch_embeds|pos_block)\.(\d+)', (0,)), (r'^norm', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt == 'NCHW', 'Output shape for Twins must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.blocks), indices) # FIXME slice block/pos_block if < max # forward pass B, _, height, width = x.shape for i, (embed, drop, blocks, pos_blk) in enumerate(zip( self.patch_embeds, self.pos_drops, self.blocks, self.pos_block) ): x, size = embed(x) x = drop(x) for j, blk in enumerate(blocks): x = blk(x, size) if j == 0: x = pos_blk(x, size) # PEG here if i < len(self.depths) - 1: x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() if i in take_indices: intermediates.append(x) else: if i in take_indices: # only last feature can be normed x_feat = self.norm(x) if norm else x intermediates.append(x_feat.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous()) if intermediates_only: return intermediates x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.blocks), indices) # FIXME add block pruning if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): B = x.shape[0] for i, (embed, drop, blocks, pos_blk) in enumerate( zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)): x, size = embed(x) x = drop(x) for j, blk in enumerate(blocks): x = blk(x, size) if j == 0: x = pos_blk(x, size) # PEG here if i < len(self.depths) - 1: x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=1) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_twins(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 4) model = build_model_with_cfg( Twins, variant, pretrained, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'twins_pcpvt_small.in1k': _cfg(hf_hub_id='timm/'), 'twins_pcpvt_base.in1k': _cfg(hf_hub_id='timm/'), 'twins_pcpvt_large.in1k': _cfg(hf_hub_id='timm/'), 'twins_svt_small.in1k': _cfg(hf_hub_id='timm/'), 'twins_svt_base.in1k': _cfg(hf_hub_id='timm/'), 'twins_svt_large.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def twins_pcpvt_small(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_pcpvt_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_pcpvt_base(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_pcpvt_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_pcpvt_large(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_pcpvt_large', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_svt_small(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4], depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_svt_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_svt_base(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4], depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_svt_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_svt_large(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4], depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_svt_large', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/twins.py/0
{ "file_path": "pytorch-image-models/timm/models/twins.py", "repo_id": "pytorch-image-models", "token_count": 11134 }
276
from typing import Any, Dict, Iterable, Union, Protocol, Type try: from typing import TypeAlias, TypeVar except ImportError: from typing_extensions import TypeAlias, TypeVar import torch import torch.optim try: from torch.optim.optimizer import ParamsT except (ImportError, TypeError): ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]] OptimType = Type[torch.optim.Optimizer] class OptimizerCallable(Protocol): """Protocol for optimizer constructor signatures.""" def __call__(self, params: ParamsT, **kwargs) -> torch.optim.Optimizer: ... __all__ = ['ParamsT', 'OptimType', 'OptimizerCallable']
pytorch-image-models/timm/optim/_types.py/0
{ "file_path": "pytorch-image-models/timm/optim/_types.py", "repo_id": "pytorch-image-models", "token_count": 217 }
277
""" PyTorch MARS Optimizer Code simplified from https://github.com/AGI-Arena/MARS Paper: MARS: Unleashing the Power of Variance Reduction for Training Large Models - https://arxiv.org/abs/2411.10438 @article{yuan2024mars, title={MARS: Unleashing the Power of Variance Reduction for Training Large Models}, author={Yuan, Huizhuo and Liu, Yifeng and Wu, Shuang and Zhou, Xun and Gu, Quanquan}, journal={arXiv preprint arXiv:2411.10438}, year={2024} } """ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates # SPDX-License-Identifier: Apache-2.0 import math from typing import Optional, Tuple import torch from torch.optim.optimizer import Optimizer from ._types import ParamsT def _mars_single_tensor_step( p: torch.Tensor, grad: torch.Tensor, exp_avg: torch.Tensor, exp_avg_sq: torch.Tensor, lr: float, weight_decay: float, beta1: float, beta2: float, last_grad: torch.Tensor, eps: float, step: int, gamma: float, mars_type: str, is_grad_2d: bool, optimize_1d: bool, lr_1d_factor: bool, betas_1d: Tuple[float, float], caution: bool, ): # optimize_1d ==> use MARS for 1d param, else use AdamW if optimize_1d or is_grad_2d: one_minus_beta1 = 1. - beta1 if step == 1: # this is a timm addition, making first step more consistent when no grad history, otherwise tests fail c_t = grad else: c_t = (grad - last_grad).mul_(gamma * (beta1 / one_minus_beta1)).add_(grad) c_t_norm = torch.norm(c_t) if c_t_norm > 1.: c_t = c_t / c_t_norm exp_avg.mul_(beta1).add_(c_t, alpha=one_minus_beta1) if caution: # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 mask = (exp_avg * grad > 0).to(grad.dtype) mask.div_(mask.mean().clamp_(min=1e-3)) exp_avg = exp_avg * mask if mars_type == "adamw": exp_avg_sq.mul_(beta2).addcmul_(c_t, c_t, value=1. - beta2) bias_correction1 = 1.0 - beta1 ** step bias_correction2 = 1.0 - beta2 ** step denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) update = p * weight_decay + (exp_avg / bias_correction1).div_(denom) elif mars_type == "lion": update = p * weight_decay + exp_avg.sign() else: assert False p.add_(update, alpha=-lr) else: beta1_1d, beta2_1d = betas_1d exp_avg.mul_(beta1_1d).add_(grad, alpha=1. - beta1_1d) exp_avg_sq.mul_(beta2_1d).addcmul_(grad, grad, value=1. - beta2_1d) bias_correction1 = 1.0 - beta1_1d ** step bias_correction2 = 1.0 - beta2_1d ** step denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) if caution: mask = (exp_avg * grad > 0).to(grad.dtype) mask.div_(mask.mean().clamp_(min=1e-3)) exp_avg = exp_avg * mask update = p * weight_decay + (exp_avg / bias_correction1).div_(denom) p.add_(update, alpha=-(lr * lr_1d_factor)) return exp_avg, exp_avg_sq class Mars(Optimizer): """ MARS Optimizer Paper: MARS: Unleashing the Power of Variance Reduction for Training Large Models https://arxiv.org/abs/2411.10438 """ def __init__( self, params: ParamsT, lr: float = 3e-3, betas: Tuple[float, float] = (0.9, 0.99), eps: float = 1e-8, weight_decay: float = 0., gamma: float = 0.025, mars_type: str = "adamw", optimize_1d: bool = False, lr_1d_factor: float = 1.0, betas_1d: Optional[Tuple[float, float]] = None, caution: bool = False ): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) assert mars_type in ["adamw", "lion"], "MARS type not supported" defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, mars_type=mars_type, gamma=gamma, optimize_1d=optimize_1d, lr_1d_factor=lr_1d_factor, betas_1d=betas_1d or betas, caution=caution, ) super(Mars, self).__init__(params, defaults) def __setstate__(self, state): super(Mars, self).__setstate__(state) for group in self.param_groups: group.setdefault('caution', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) <= 1: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Last Gradient state['last_grad'] = torch.zeros_like(p) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p) state['step'] += 1 step = state['step'] exp_avg = state['exp_avg'] exp_avg_sq = state['exp_avg_sq'] last_grad = state['last_grad'] lr = group['lr'] wd = group['weight_decay'] beta1, beta2 = group['betas'] is_grad_2d = grad.ndim >= 2 # FIXME add multi-tensor (if usage warrants), make more standard _mars_single_tensor_step( p, grad, exp_avg, exp_avg_sq, lr, wd, beta1, beta2, last_grad, group['eps'], step, group['gamma'], mars_type=group['mars_type'], is_grad_2d=is_grad_2d, optimize_1d=group['optimize_1d'], lr_1d_factor=group['lr_1d_factor'], betas_1d=group['betas_1d'], caution=group['caution'], ) state['last_grad'] = grad return loss
pytorch-image-models/timm/optim/mars.py/0
{ "file_path": "pytorch-image-models/timm/optim/mars.py", "repo_id": "pytorch-image-models", "token_count": 3950 }
278
""" Scheduler Factory Hacked together by / Copyright 2021 Ross Wightman """ from typing import List, Optional, Union from torch.optim import Optimizer from .cosine_lr import CosineLRScheduler from .multistep_lr import MultiStepLRScheduler from .plateau_lr import PlateauLRScheduler from .poly_lr import PolyLRScheduler from .step_lr import StepLRScheduler from .tanh_lr import TanhLRScheduler def scheduler_kwargs(cfg, decreasing_metric: Optional[bool] = None): """ cfg/argparse to kwargs helper Convert scheduler args in argparse args or cfg (.dot) like object to keyword args. """ eval_metric = getattr(cfg, 'eval_metric', 'top1') if decreasing_metric is not None: plateau_mode = 'min' if decreasing_metric else 'max' else: plateau_mode = 'min' if 'loss' in eval_metric else 'max' kwargs = dict( sched=cfg.sched, num_epochs=getattr(cfg, 'epochs', 100), decay_epochs=getattr(cfg, 'decay_epochs', 30), decay_milestones=getattr(cfg, 'decay_milestones', [30, 60]), warmup_epochs=getattr(cfg, 'warmup_epochs', 5), cooldown_epochs=getattr(cfg, 'cooldown_epochs', 0), patience_epochs=getattr(cfg, 'patience_epochs', 10), decay_rate=getattr(cfg, 'decay_rate', 0.1), min_lr=getattr(cfg, 'min_lr', 0.), warmup_lr=getattr(cfg, 'warmup_lr', 1e-5), warmup_prefix=getattr(cfg, 'warmup_prefix', False), noise=getattr(cfg, 'lr_noise', None), noise_pct=getattr(cfg, 'lr_noise_pct', 0.67), noise_std=getattr(cfg, 'lr_noise_std', 1.), noise_seed=getattr(cfg, 'seed', 42), cycle_mul=getattr(cfg, 'lr_cycle_mul', 1.), cycle_decay=getattr(cfg, 'lr_cycle_decay', 0.1), cycle_limit=getattr(cfg, 'lr_cycle_limit', 1), k_decay=getattr(cfg, 'lr_k_decay', 1.0), plateau_mode=plateau_mode, step_on_epochs=not getattr(cfg, 'sched_on_updates', False), ) return kwargs def create_scheduler( args, optimizer: Optimizer, updates_per_epoch: int = 0, ): return create_scheduler_v2( optimizer=optimizer, **scheduler_kwargs(args), updates_per_epoch=updates_per_epoch, ) def create_scheduler_v2( optimizer: Optimizer, sched: str = 'cosine', num_epochs: int = 300, decay_epochs: int = 90, decay_milestones: List[int] = (90, 180, 270), cooldown_epochs: int = 0, patience_epochs: int = 10, decay_rate: float = 0.1, min_lr: float = 0, warmup_lr: float = 1e-5, warmup_epochs: int = 0, warmup_prefix: bool = False, noise: Union[float, List[float]] = None, noise_pct: float = 0.67, noise_std: float = 1., noise_seed: int = 42, cycle_mul: float = 1., cycle_decay: float = 0.1, cycle_limit: int = 1, k_decay: float = 1.0, plateau_mode: str = 'max', step_on_epochs: bool = True, updates_per_epoch: int = 0, ): t_initial = num_epochs warmup_t = warmup_epochs decay_t = decay_epochs cooldown_t = cooldown_epochs if not step_on_epochs: assert updates_per_epoch > 0, 'updates_per_epoch must be set to number of dataloader batches' t_initial = t_initial * updates_per_epoch warmup_t = warmup_t * updates_per_epoch decay_t = decay_t * updates_per_epoch decay_milestones = [d * updates_per_epoch for d in decay_milestones] cooldown_t = cooldown_t * updates_per_epoch # warmup args warmup_args = dict( warmup_lr_init=warmup_lr, warmup_t=warmup_t, warmup_prefix=warmup_prefix, ) # setup noise args for supporting schedulers if noise is not None: if isinstance(noise, (list, tuple)): noise_range = [n * t_initial for n in noise] if len(noise_range) == 1: noise_range = noise_range[0] else: noise_range = noise * t_initial else: noise_range = None noise_args = dict( noise_range_t=noise_range, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, ) # setup cycle args for supporting schedulers cycle_args = dict( cycle_mul=cycle_mul, cycle_decay=cycle_decay, cycle_limit=cycle_limit, ) lr_scheduler = None if sched == 'cosine': lr_scheduler = CosineLRScheduler( optimizer, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, **cycle_args, **warmup_args, **noise_args, k_decay=k_decay, ) elif sched == 'tanh': lr_scheduler = TanhLRScheduler( optimizer, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, **cycle_args, **warmup_args, **noise_args, ) elif sched == 'step': lr_scheduler = StepLRScheduler( optimizer, decay_t=decay_t, decay_rate=decay_rate, t_in_epochs=step_on_epochs, **warmup_args, **noise_args, ) elif sched == 'multistep': lr_scheduler = MultiStepLRScheduler( optimizer, decay_t=decay_milestones, decay_rate=decay_rate, t_in_epochs=step_on_epochs, **warmup_args, **noise_args, ) elif sched == 'plateau': assert step_on_epochs, 'Plateau LR only supports step per epoch.' warmup_args.pop('warmup_prefix', False) lr_scheduler = PlateauLRScheduler( optimizer, decay_rate=decay_rate, patience_t=patience_epochs, cooldown_t=0, **warmup_args, lr_min=min_lr, mode=plateau_mode, **noise_args, ) elif sched == 'poly': lr_scheduler = PolyLRScheduler( optimizer, power=decay_rate, # overloading 'decay_rate' as polynomial power t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, k_decay=k_decay, **cycle_args, **warmup_args, **noise_args, ) if hasattr(lr_scheduler, 'get_cycle_length'): # For cycle based schedulers (cosine, tanh, poly) recalculate total epochs w/ cycles & cooldown # NOTE: Warmup prefix added in get_cycle_lengths() if enabled t_with_cycles_and_cooldown = lr_scheduler.get_cycle_length() + cooldown_t if step_on_epochs: num_epochs = t_with_cycles_and_cooldown else: num_epochs = t_with_cycles_and_cooldown // updates_per_epoch else: if warmup_prefix: num_epochs += warmup_epochs return lr_scheduler, num_epochs
pytorch-image-models/timm/scheduler/scheduler_factory.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/scheduler_factory.py", "repo_id": "pytorch-image-models", "token_count": 3536 }
279
""" Exponential Moving Average (EMA) of model updates Hacked together by / Copyright 2020 Ross Wightman """ import logging from collections import OrderedDict from copy import deepcopy from typing import Optional import torch import torch.nn as nn _logger = logging.getLogger(__name__) class ModelEma: """ Model Exponential Moving Average (DEPRECATED) Keep a moving average of everything in the model state_dict (parameters and buffers). This version is deprecated, it does not work with scripted models. Will be removed eventually. This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage A smoothed version of the weights is necessary for some training schemes to perform well. E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA smoothing of weights to match results. Pay attention to the decay constant you are using relative to your update count per epoch. To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but disable validation of the EMA weights. Validation will have to be done manually in a separate process, or after the training stops converging. This class is sensitive where it is initialized in the sequence of model init, GPU assignment and distributed training wrappers. """ def __init__(self, model, decay=0.9999, device='', resume=''): # make a copy of the model for accumulating moving average of weights self.ema = deepcopy(model) self.ema.eval() self.decay = decay self.device = device # perform ema on different device from model if set if device: self.ema.to(device=device) self.ema_has_module = hasattr(self.ema, 'module') if resume: self._load_checkpoint(resume) for p in self.ema.parameters(): p.requires_grad_(False) def _load_checkpoint(self, checkpoint_path): checkpoint = torch.load(checkpoint_path, map_location='cpu') assert isinstance(checkpoint, dict) if 'state_dict_ema' in checkpoint: new_state_dict = OrderedDict() for k, v in checkpoint['state_dict_ema'].items(): # ema model may have been wrapped by DataParallel, and need module prefix if self.ema_has_module: name = 'module.' + k if not k.startswith('module') else k else: name = k new_state_dict[name] = v self.ema.load_state_dict(new_state_dict) _logger.info("Loaded state_dict_ema") else: _logger.warning("Failed to find state_dict_ema, starting from loaded model weights") def update(self, model): # correct a mismatch in state dict keys needs_module = hasattr(model, 'module') and not self.ema_has_module with torch.no_grad(): msd = model.state_dict() for k, ema_v in self.ema.state_dict().items(): if needs_module: k = 'module.' + k model_v = msd[k].detach() if self.device: model_v = model_v.to(device=self.device) ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) class ModelEmaV2(nn.Module): """ Model Exponential Moving Average V2 Keep a moving average of everything in the model state_dict (parameters and buffers). V2 of this module is simpler, it does not match params/buffers based on name but simply iterates in order. It works with torchscript (JIT of full model). This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage A smoothed version of the weights is necessary for some training schemes to perform well. E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA smoothing of weights to match results. Pay attention to the decay constant you are using relative to your update count per epoch. To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but disable validation of the EMA weights. Validation will have to be done manually in a separate process, or after the training stops converging. This class is sensitive where it is initialized in the sequence of model init, GPU assignment and distributed training wrappers. """ def __init__(self, model, decay=0.9999, device=None): super().__init__() # make a copy of the model for accumulating moving average of weights self.module = deepcopy(model) self.module.eval() self.decay = decay self.device = device # perform ema on different device from model if set if self.device is not None: self.module.to(device=device) def _update(self, model, update_fn): with torch.no_grad(): for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): if self.device is not None: model_v = model_v.to(device=self.device) ema_v.copy_(update_fn(ema_v, model_v)) def update(self, model): self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) def set(self, model): self._update(model, update_fn=lambda e, m: m) def forward(self, *args, **kwargs): return self.module(*args, **kwargs) class ModelEmaV3(nn.Module): """ Model Exponential Moving Average V3 Keep a moving average of everything in the model state_dict (parameters and buffers). V3 of this module leverages for_each and in-place operations for faster performance. Decay warmup based on code by @crowsonkb, her comments: If inv_gamma=1 and power=1, implements a simple average. inv_gamma=1, power=2/3 are good values for models you plan to train for a million or more steps (reaches decay factor 0.999 at 31.6K steps, 0.9999 at 1M steps), inv_gamma=1, power=3/4 for models you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at 215.4k steps). This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but disable validation of the EMA weights. Validation will have to be done manually in a separate process, or after the training stops converging. This class is sensitive where it is initialized in the sequence of model init, GPU assignment and distributed training wrappers. """ def __init__( self, model, decay: float = 0.9999, min_decay: float = 0.0, update_after_step: int = 0, use_warmup: bool = False, warmup_gamma: float = 1.0, warmup_power: float = 2/3, device: Optional[torch.device] = None, foreach: bool = True, exclude_buffers: bool = False, ): super().__init__() # make a copy of the model for accumulating moving average of weights self.module = deepcopy(model) self.module.eval() self.decay = decay self.min_decay = min_decay self.update_after_step = update_after_step self.use_warmup = use_warmup self.warmup_gamma = warmup_gamma self.warmup_power = warmup_power self.foreach = foreach self.device = device # perform ema on different device from model if set self.exclude_buffers = exclude_buffers if self.device is not None and device != next(model.parameters()).device: self.foreach = False # cannot use foreach methods with different devices self.module.to(device=device) def get_decay(self, step: Optional[int] = None) -> float: """ Compute the decay factor for the exponential moving average. """ if step is None: return self.decay step = max(0, step - self.update_after_step - 1) if step <= 0: return 0.0 if self.use_warmup: decay = 1 - (1 + step / self.warmup_gamma) ** -self.warmup_power decay = max(min(decay, self.decay), self.min_decay) else: decay = self.decay return decay @torch.no_grad() def update(self, model, step: Optional[int] = None): decay = self.get_decay(step) if self.exclude_buffers: self.apply_update_no_buffers_(model, decay) else: self.apply_update_(model, decay) def apply_update_(self, model, decay: float): # interpolate parameters and buffers if self.foreach: ema_lerp_values = [] model_lerp_values = [] for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): if ema_v.is_floating_point(): ema_lerp_values.append(ema_v) model_lerp_values.append(model_v) else: ema_v.copy_(model_v) if hasattr(torch, '_foreach_lerp_'): torch._foreach_lerp_(ema_lerp_values, model_lerp_values, weight=1. - decay) else: torch._foreach_mul_(ema_lerp_values, scalar=decay) torch._foreach_add_(ema_lerp_values, model_lerp_values, alpha=1. - decay) else: for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): if ema_v.is_floating_point(): ema_v.lerp_(model_v.to(device=self.device), weight=1. - decay) else: ema_v.copy_(model_v.to(device=self.device)) def apply_update_no_buffers_(self, model, decay: float): # interpolate parameters, copy buffers ema_params = tuple(self.module.parameters()) model_params = tuple(model.parameters()) if self.foreach: if hasattr(torch, '_foreach_lerp_'): torch._foreach_lerp_(ema_params, model_params, weight=1. - decay) else: torch._foreach_mul_(ema_params, scalar=decay) torch._foreach_add_(ema_params, model_params, alpha=1 - decay) else: for ema_p, model_p in zip(ema_params, model_params): ema_p.lerp_(model_p.to(device=self.device), weight=1. - decay) for ema_b, model_b in zip(self.module.buffers(), model.buffers()): ema_b.copy_(model_b.to(device=self.device)) @torch.no_grad() def set(self, model): for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): ema_v.copy_(model_v.to(device=self.device)) def forward(self, *args, **kwargs): return self.module(*args, **kwargs)
pytorch-image-models/timm/utils/model_ema.py/0
{ "file_path": "pytorch-image-models/timm/utils/model_ema.py", "repo_id": "pytorch-image-models", "token_count": 4614 }
280
# Web Browser Automation with Agents 🤖🌐 [[open-in-colab]] In this notebook, we'll create an **agent-powered web browser automation system**! This system can navigate websites, interact with elements, and extract information automatically. The agent will be able to: - [x] Navigate to web pages - [x] Click on elements - [x] Search within pages - [x] Handle popups and modals - [x] Extract information Let's set up this system step by step! First, run these lines to install the required dependencies: ```bash pip install smolagents selenium helium pillow -q ``` Let's import our required libraries and set up environment variables: ```python from io import BytesIO from time import sleep import helium from dotenv import load_dotenv from PIL import Image from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from smolagents import CodeAgent, tool from smolagents.agents import ActionStep # Load environment variables load_dotenv() ``` Now let's create our core browser interaction tools that will allow our agent to navigate and interact with web pages: ```python @tool def search_item_ctrl_f(text: str, nth_result: int = 1) -> str: """ Searches for text on the current page via Ctrl + F and jumps to the nth occurrence. Args: text: The text to search for nth_result: Which occurrence to jump to (default: 1) """ elements = driver.find_elements(By.XPATH, f"//*[contains(text(), '{text}')]") if nth_result > len(elements): raise Exception(f"Match n°{nth_result} not found (only {len(elements)} matches found)") result = f"Found {len(elements)} matches for '{text}'." elem = elements[nth_result - 1] driver.execute_script("arguments[0].scrollIntoView(true);", elem) result += f"Focused on element {nth_result} of {len(elements)}" return result @tool def go_back() -> None: """Goes back to previous page.""" driver.back() @tool def close_popups() -> str: """ Closes any visible modal or pop-up on the page. Use this to dismiss pop-up windows! This does not work on cookie consent banners. """ webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform() ``` Let's set up our browser with Chrome and configure screenshot capabilities: ```python # Configure Chrome options chrome_options = webdriver.ChromeOptions() chrome_options.add_argument("--force-device-scale-factor=1") chrome_options.add_argument("--window-size=1000,1350") chrome_options.add_argument("--disable-pdf-viewer") chrome_options.add_argument("--window-position=0,0") # Initialize the browser driver = helium.start_chrome(headless=False, options=chrome_options) # Set up screenshot callback def save_screenshot(memory_step: ActionStep, agent: CodeAgent) -> None: sleep(1.0) # Let JavaScript animations happen before taking the screenshot driver = helium.get_driver() current_step = memory_step.step_number if driver is not None: for previous_memory_step in agent.memory.steps: # Remove previous screenshots for lean processing if isinstance(previous_memory_step, ActionStep) and previous_memory_step.step_number <= current_step - 2: previous_memory_step.observations_images = None png_bytes = driver.get_screenshot_as_png() image = Image.open(BytesIO(png_bytes)) print(f"Captured a browser screenshot: {image.size} pixels") memory_step.observations_images = [image.copy()] # Create a copy to ensure it persists # Update observations with current URL url_info = f"Current url: {driver.current_url}" memory_step.observations = ( url_info if memory_step.observations is None else memory_step.observations + "\n" + url_info ) ``` Now let's create our web automation agent: ```python from smolagents import InferenceClientModel # Initialize the model model_id = "Qwen/Qwen2-VL-72B-Instruct" # You can change this to your preferred VLM model model = InferenceClientModel(model_id=model_id) # Create the agent agent = CodeAgent( tools=[go_back, close_popups, search_item_ctrl_f], model=model, additional_authorized_imports=["helium"], step_callbacks=[save_screenshot], max_steps=20, verbosity_level=2, ) # Import helium for the agent agent.python_executor("from helium import *", agent.state) ``` The agent needs instructions on how to use Helium for web automation. Here are the instructions we'll provide: ```python helium_instructions = """ You can use helium to access websites. Don't bother about the helium driver, it's already managed. We've already ran "from helium import *" Then you can go to pages! Code: ```py go_to('github.com/trending') ```<end_code> You can directly click clickable elements by inputting the text that appears on them. Code: ```py click("Top products") ```<end_code> If it's a link: Code: ```py click(Link("Top products")) ```<end_code> If you try to interact with an element and it's not found, you'll get a LookupError. In general stop your action after each button click to see what happens on your screenshot. Never try to login in a page. To scroll up or down, use scroll_down or scroll_up with as an argument the number of pixels to scroll from. Code: ```py scroll_down(num_pixels=1200) # This will scroll one viewport down ```<end_code> When you have pop-ups with a cross icon to close, don't try to click the close icon by finding its element or targeting an 'X' element (this most often fails). Just use your built-in tool `close_popups` to close them: Code: ```py close_popups() ```<end_code> You can use .exists() to check for the existence of an element. For example: Code: ```py if Text('Accept cookies?').exists(): click('I accept') ```<end_code> """ ``` Now we can run our agent with a task! Let's try finding information on Wikipedia: ```python search_request = """ Please navigate to https://en.wikipedia.org/wiki/Chicago and give me a sentence containing the word "1992" that mentions a construction accident. """ agent_output = agent.run(search_request + helium_instructions) print("Final output:") print(agent_output) ``` You can run different tasks by modifying the request. For example, here's for me to know if I should work harder: ```python github_request = """ I'm trying to find how hard I have to work to get a repo in github.com/trending. Can you navigate to the profile for the top author of the top trending repo, and give me their total number of commits over the last year? """ agent_output = agent.run(github_request + helium_instructions) print("Final output:") print(agent_output) ``` The system is particularly effective for tasks like: - Data extraction from websites - Web research automation - UI testing and verification - Content monitoring
smolagents/docs/source/en/examples/web_browser.md/0
{ "file_path": "smolagents/docs/source/en/examples/web_browser.md", "repo_id": "smolagents", "token_count": 2146 }
281
# मल्टी-स्टेप एजेंट्स कैसे काम करते हैं? ReAct फ्रेमवर्क ([Yao et al., 2022](https://huggingface.co/papers/2210.03629)) वर्तमान में एजेंट्स बनाने का मुख्य दृष्टिकोण है। नाम दो शब्दों, "Reason" (तर्क) और "Act" (क्रिया) के संयोजन पर आधारित है। वास्तव में, इस आर्किटेक्चर का पालन करने वाले एजेंट अपने कार्य को उतने चरणों में हल करेंगे जितने आवश्यक हों, प्रत्येक चरण में एक Reasoning कदम होगा, फिर एक Action कदम होगा, जहाँ यह टूल कॉल्स तैयार करेगा जो उसे कार्य को हल करने के करीब ले जाएंगे। ReAct प्रक्रिया में पिछले चरणों की मेमोरी रखना शामिल है। > [!TIP] > मल्टी-स्टेप एजेंट्स के बारे में अधिक जानने के लिए [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) ब्लॉग पोस्ट पढ़ें। यहाँ एक वीडियो ओवरव्यू है कि यह कैसे काम करता है: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif" /> </div> ![ReAct एजेंट का फ्रेमवर्क](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-source-llms-as-agents/ReAct.png) हम दो प्रकार के ToolCallingAgent को लागू करते हैं: - [`ToolCallingAgent`] अपने आउटपुट में टूल कॉल को JSON के रूप में जनरेट करता है। - [`CodeAgent`] ToolCallingAgent का एक नया प्रकार है जो अपने टूल कॉल को कोड के ब्लॉब्स के रूप में जनरेट करता है, जो उन LLM के लिए वास्तव में अच्छी तरह काम करता है जिनका कोडिंग प्रदर्शन मजबूत है।
smolagents/docs/source/hi/conceptual_guides/react.md/0
{ "file_path": "smolagents/docs/source/hi/conceptual_guides/react.md", "repo_id": "smolagents", "token_count": 1595 }
282
# `smolagents`[[smolagents]] <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/license_to_call.png" style="max-width:700px"/> </div> ## smolagents란 무엇인가요?[[what-is-smolagents]] `smolagents`는 단 몇 줄의 코드만으로 에이전트를 구축하고 실행할 수 있도록 설계된 오픈소스 Python 라이브러리입니다. `smolagents`의 주요 특징: ✨ **단순함**: 에이전트 로직이 약 천 줄의 코드로 구현되어 있습니다. 코드 위에 불필요한 복잡한 구조를 추가하지 않고 단순하게 만들었습니다! 🧑‍💻 **코드 에이전트의 완전한 지원**: [`CodeAgent`](reference/agents#smolagents.CodeAgent)는 도구 호출이나 계산 수행을 위해 직접 코드를 작성합니다 ("코드 작성용 에이전트"와는 반대 개념). 이를 통해 함수 중첩, 루프, 조건문 등을 자연스럽게 조합할 수 있습니다. 보안을 위해 [E2B](https://e2b.dev/)나 Docker를 통한 [샌드박스 환경 실행](tutorials/secure_code_execution)을 지원합니다. 📡 **기본 도구 호출 에이전트 지원**: CodeAgent 외에도 [`ToolCallingAgent`](reference/agents#smolagents.ToolCallingAgent)는 일반적인 JSON/텍스트 기반 도구 호출 방식이 필요한 경우를 위해 지원됩니다. 🤗 **Hub 통합**: Gradio Spaces로 에이전트와 도구를 Hub에서 원활하게 공유하고 로드할 수 있습니다. 🌐 **모델 독립적**: Hub의 [Inference providers](https://huggingface.co/docs/inference-providers/index)나 OpenAI, Anthropic 등의 API를 통해 접근하거나, LiteLLM 통합으로 다양한 LLM을 쉽게 연결할 수 있습니다. Transformers나 Ollama를 사용한 로컬 실행도 가능합니다. 원하는 LLM으로 에이전트를 구동하는 것이 간단하고 유연합니다. 👁️ **모달리티 독립적**: 텍스트뿐만 아니라 비전, 비디오, 오디오 입력도 처리할 수 있어 활용 가능한 애플리케이션 범위가 확장됩니다. 비전 관련 [튜토리얼](examples/web_browser)을 확인해보세요. 🛠️ **도구 독립적**: [MCP 서버](reference/tools#smolagents.ToolCollection.from_mcp)의 도구나 [LangChain](reference/tools#smolagents.Tool.from_langchain)의 도구를 사용할 수 있고, [Hub Space](reference/tools#smolagents.Tool.from_space)도 도구로 활용할 수 있습니다. 💻 **CLI 도구**: 보일러플레이트 코드 작성 없이 에이전트를 빠르게 실행할 수 있는 명령줄 유틸리티(smolagent, webagent)가 포함되어 있습니다. ## 빠른 시작[[quickstart]] [[open-in-colab]] smolagents를 단 몇 분 만에 시작해보세요! 이 가이드는 첫 번째 에이전트를 생성하고 실행하는 방법을 보여줍니다. ### 설치[[installation]] pip으로 smolagents를 설치하세요: ```bash pip install smolagents[toolkit] # 웹 검색과 같은 기본 도구 포함 ``` ### 첫 에이전트 만들기[[create-your-first-agent]] 다음은 에이전트를 생성하고 실행하는 최소한의 예제입니다: ```python from smolagents import CodeAgent, InferenceClientModel # 모델 초기화 (Hugging Face Inference API 사용) model = InferenceClientModel() # 기본 모델 사용 # 도구 없이 에이전트 생성 agent = CodeAgent(tools=[], model=model) # 작업으로 에이전트 실행 result = agent.run("Calculate the sum of numbers from 1 to 10") print(result) ``` 끝입니다! 에이전트가 Python 코드를 사용하여 작업을 해결하고 결과를 반환합니다. ### 도구 추가[[adding-tools]] 몇 가지 도구를 추가하여 에이전트를 더 강력하게 만들어보겠습니다: ```python from smolagents import CodeAgent, InferenceClientModel, DuckDuckGoSearchTool model = InferenceClientModel() agent = CodeAgent( tools=[DuckDuckGoSearchTool()], model=model, ) # 이제 에이전트가 웹을 검색할 수 있습니다! result = agent.run("What is the current weather in Paris?") print(result) ``` ### 다른 모델 사용하기[[using-different-models]] 에이전트와 함께 다양한 모델을 사용할 수 있습니다: ```python # Hugging Face의 특정 모델 사용 model = InferenceClientModel(model_id="meta-llama/Llama-2-70b-chat-hf") # OpenAI/Anthropic 사용 (smolagents[litellm] 필요) from smolagents import LiteLLMModel model = LiteLLMModel(model_id="gpt-4") # 로컬 모델 사용 (smolagents[transformers] 필요) from smolagents import TransformersModel model = TransformersModel(model_id="meta-llama/Llama-2-7b-chat-hf") ``` ## 다음 단계[[next-steps]] - [설치 가이드](installation)에서 다양한 모델과 도구로 smolagents를 설정하는 방법을 알아보세요 - 더 고급 기능은 [안내서](guided_tour)를 확인하세요 - [커스텀 도구 구축](tutorials/tools)에 대해 알아보세요 - [안전한 코드 실행](tutorials/secure_code_execution)을 살펴보세요 - [멀티 에이전트 시스템](tutorials/building_good_agents) 생성 방법을 확인하세요 <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./guided_tour" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">안내서</div> <p class="text-gray-700">기본 사항을 배우고 에이전트 사용에 익숙해지세요. 에이전트를 처음 사용하는 경우 여기서 시작하세요!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./examples/text_to_sql" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">실습 가이드</div> <p class="text-gray-700">특정 목표를 달성하는 데 도움이 되는 실용적인 가이드: SQL 쿼리를 생성하고 테스트하는 에이전트를 만들어보세요!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual_guides/intro_agents" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">개념 가이드</div> <p class="text-gray-700">중요한 주제에 대한 전체적인 이해를 돕는 설명입니다.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorials/building_good_agents" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">튜토리얼</div> <p class="text-gray-700">에이전트 구축의 중요한 측면을 다루는 포괄적인 튜토리얼입니다.</p> </a> </div> </div>
smolagents/docs/source/ko/index.md/0
{ "file_path": "smolagents/docs/source/ko/index.md", "repo_id": "smolagents", "token_count": 4409 }
283
# 📚 管理Agent的记忆 [[open-in-colab]] 归根结底,Agent可以定义为由几个简单组件构成:它拥有工具、提示词。最重要的是,它具备对过往步骤的记忆,能够追溯完整的规划、执行和错误历史。 ### 回放Agent的记忆 我们提供了多项功能来审查Agent的过往运行记录。 您可以通过插装(instrumentation)在可视化界面中查看Agent的运行过程,该界面支持对特定步骤进行缩放操作,具体方法参见[插装指南](./inspect_runs)。 您也可以使用`agent.replay()`方法实现回放: 当Agent完成运行后: ```py from smolagents import InferenceClientModel, CodeAgent agent = CodeAgent(tools=[], model=InferenceClientModel(), verbosity_level=0) result = agent.run("What's the 20th Fibonacci number?") ``` 若要回放最近一次运行,只需使用: ```py agent.replay() ``` ### 动态修改Agent的记忆 许多高级应用场景需要对Agent的记忆进行动态修改。 您可以通过以下方式访问Agent的记忆: ```py from smolagents import ActionStep system_prompt_step = agent.memory.system_prompt print("The system prompt given to the agent was:") print(system_prompt_step.system_prompt) task_step = agent.memory.steps[0] print("\n\nThe first task step was:") print(task_step.task) for step in agent.memory.steps: if isinstance(step, ActionStep): if step.error is not None: print(f"\nStep {step.step_number} got this error:\n{step.error}\n") else: print(f"\nStep {step.step_number} got these observations:\n{step.observations}\n") ``` 使用`agent.memory.get_full_steps()`可获取完整步骤字典数据。 您还可以通过步骤回调(step callbacks)实现记忆的动态修改。 步骤回调函数可通过参数直接访问`agent`对象,因此能够访问所有记忆步骤并根据需要进行修改。例如,假设您正在监控网页浏览Agent每个步骤的屏幕截图,希望保留最新截图同时删除旧步骤的图片以节省token消耗。 可参考以下代码示例: _注:此代码片段不完整,部分导入语句和对象定义已精简,完整代码请访问[原始脚本](https://github.com/huggingface/smolagents/blob/main/src/smolagents/vision_web_browser.py)_ ```py import helium from PIL import Image from io import BytesIO from time import sleep def update_screenshot(memory_step: ActionStep, agent: CodeAgent) -> None: sleep(1.0) # Let JavaScript animations happen before taking the screenshot driver = helium.get_driver() latest_step = memory_step.step_number for previous_memory_step in agent.memory.steps: # Remove previous screenshots from logs for lean processing if isinstance(previous_memory_step, ActionStep) and previous_memory_step.step_number <= latest_step - 2: previous_memory_step.observations_images = None png_bytes = driver.get_screenshot_as_png() image = Image.open(BytesIO(png_bytes)) memory_step.observations_images = [image.copy()] ``` 最后在初始化Agent时,将此函数传入`step_callbacks`参数: ```py CodeAgent( tools=[WebSearchTool(), go_back, close_popups, search_item_ctrl_f], model=model, additional_authorized_imports=["helium"], step_callbacks=[update_screenshot], max_steps=20, verbosity_level=2, ) ``` 请访问我们的 [vision web browser code](https://github.com/huggingface/smolagents/blob/main/src/smolagents/vision_web_browser.py) 查看完整可运行示例。 ### 分步运行 Agents 当您需要处理耗时数天的工具调用时,这种方式特别有用:您可以逐步执行Agents。这还允许您在每一步更新记忆。 ```py from smolagents import InferenceClientModel, CodeAgent, ActionStep, TaskStep agent = CodeAgent(tools=[], model=InferenceClientModel(), verbosity_level=1) print(agent.memory.system_prompt) task = "What is the 20th Fibonacci number?" # You could modify the memory as needed here by inputting the memory of another agent. # agent.memory.steps = previous_agent.memory.steps # Let's start a new task! agent.memory.steps.append(TaskStep(task=task, task_images=[])) final_answer = None step_number = 1 while final_answer is None and step_number <= 10: memory_step = ActionStep( step_number=step_number, observations_images=[], ) # Run one step. final_answer = agent.step(memory_step) agent.memory.steps.append(memory_step) step_number += 1 # Change the memory as you please! # For instance to update the latest step: # agent.memory.steps[-1] = ... print("The final answer is:", final_answer) ```
smolagents/docs/source/zh/tutorials/memory.md/0
{ "file_path": "smolagents/docs/source/zh/tutorials/memory.md", "repo_id": "smolagents", "token_count": 2218 }
284
import argparse import os import threading from dotenv import load_dotenv from huggingface_hub import login from scripts.text_inspector_tool import TextInspectorTool from scripts.text_web_browser import ( ArchiveSearchTool, FinderTool, FindNextTool, PageDownTool, PageUpTool, SimpleTextBrowser, VisitTool, ) from scripts.visual_qa import visualizer from smolagents import ( CodeAgent, GoogleSearchTool, # InferenceClientModel, LiteLLMModel, ToolCallingAgent, ) load_dotenv(override=True) login(os.getenv("HF_TOKEN")) append_answer_lock = threading.Lock() def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "question", type=str, help="for example: 'How many studio albums did Mercedes Sosa release before 2007?'" ) parser.add_argument("--model-id", type=str, default="o1") return parser.parse_args() custom_role_conversions = {"tool-call": "assistant", "tool-response": "user"} user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0" BROWSER_CONFIG = { "viewport_size": 1024 * 5, "downloads_folder": "downloads_folder", "request_kwargs": { "headers": {"User-Agent": user_agent}, "timeout": 300, }, "serpapi_key": os.getenv("SERPAPI_API_KEY"), } os.makedirs(f"./{BROWSER_CONFIG['downloads_folder']}", exist_ok=True) def create_agent(model_id="o1"): model_params = { "model_id": model_id, "custom_role_conversions": custom_role_conversions, "max_completion_tokens": 8192, } if model_id == "o1": model_params["reasoning_effort"] = "high" model = LiteLLMModel(**model_params) text_limit = 100000 browser = SimpleTextBrowser(**BROWSER_CONFIG) WEB_TOOLS = [ GoogleSearchTool(provider="serper"), VisitTool(browser), PageUpTool(browser), PageDownTool(browser), FinderTool(browser), FindNextTool(browser), ArchiveSearchTool(browser), TextInspectorTool(model, text_limit), ] text_webbrowser_agent = ToolCallingAgent( model=model, tools=WEB_TOOLS, max_steps=20, verbosity_level=2, planning_interval=4, name="search_agent", description="""A team member that will search the internet to answer your question. Ask him for all your questions that require browsing the web. Provide him as much context as possible, in particular if you need to search on a specific timeframe! And don't hesitate to provide him with a complex search task, like finding a difference between two webpages. Your request must be a real sentence, not a google search! Like "Find me this information (...)" rather than a few keywords. """, provide_run_summary=True, ) text_webbrowser_agent.prompt_templates["managed_agent"]["task"] += """You can navigate to .txt online files. If a non-html page is in another format, especially .pdf or a Youtube video, use tool 'inspect_file_as_text' to inspect it. Additionally, if after some searching you find out that you need more information to answer the question, you can use `final_answer` with your request for clarification as argument to request for more information.""" manager_agent = CodeAgent( model=model, tools=[visualizer, TextInspectorTool(model, text_limit)], max_steps=12, verbosity_level=2, additional_authorized_imports=["*"], planning_interval=4, managed_agents=[text_webbrowser_agent], ) return manager_agent def main(): args = parse_args() agent = create_agent(model_id=args.model_id) answer = agent.run(args.question) print(f"Got this answer: {answer}") if __name__ == "__main__": main()
smolagents/examples/open_deep_research/run.py/0
{ "file_path": "smolagents/examples/open_deep_research/run.py", "repo_id": "smolagents", "token_count": 1465 }
285
# Smolagents Chat Server Demo This is a simple web server that provides a chat interface for interacting with an AI code agent powered by `smolagents` and the Qwen2.5-Coder-32B-Instruct model, enhanced with MCP (Model Control Protocol) tools. ## Features - Web-based chat interface - AI code agent powered by Qwen2.5-Coder - Integration with MCP tools through MCPClient - Asynchronous request handling - Clean, responsive UI - Graceful shutdown handling ## Requirements - Python 3.8+ - Starlette - AnyIO - Smolagents with MCP support ## Installation 1. Install the required packages: ```bash pip install starlette anyio smolagents[mcp] uvicorn ``` 2. Optional: If you want to use a specific model, you may need additional dependencies. ## Usage 1. Run the server: ```bash uvicorn examples.server.main:app --reload ``` 2. Open your browser and navigate to `http://localhost:8000` 3. Interact with the AI code agent through the chat interface ## How It Works The server consists of two main routes: - `/` - Serves the HTML page with the chat interface - `/chat` - API endpoint that processes messages and returns responses The server integrates with MCP tools through the following components: 1. MCPClient Configuration: ```python mcp_server_parameters = { "url": "https://evalstate-hf-mcp-server.hf.space/mcp", "transport": "streamable-http", } mcp_client = MCPClient(server_parameters=mcp_server_parameters) ``` 2. CodeAgent with MCP Tools: ```python agent = CodeAgent( model=InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct"), tools=mcp_client.get_tools(), ) ``` When a user sends a message: 1. The message is sent to the `/chat` endpoint 2. The server runs the AI code agent in a separate thread 3. The agent processes the message using MCP tools 4. The agent's response is returned to the client and displayed in the chat The server also includes a shutdown handler that properly disconnects the MCP client when the server stops: ```python async def shutdown(): mcp_client.disconnect() ``` ## Customization You can modify the `CodeAgent` configuration by changing the model or MCP server parameters. For example: ```python # Custom MCP server mcp_server_parameters = { "url": "your-mcp-server-url", "transport": "your-transport-method", } # Custom agent configuration agent = CodeAgent( model=InferenceClientModel(model_id="your-preferred-model"), tools=mcp_client.get_tools(), ) ```
smolagents/examples/server/README.md/0
{ "file_path": "smolagents/examples/server/README.md", "repo_id": "smolagents", "token_count": 753 }
286