text
stringlengths 7
1.24M
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
519
|
|---|---|---|---|
# Copyright 2024 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils.torch_utils import randn_tensor
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(
num_diffusion_timesteps,
max_beta=0.999,
alpha_transform_type="cosine",
):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
Choose from `cosine` or `exp`
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(t):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(t):
return math.exp(t * -12.0)
else:
raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}")
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
class KDPM2AncestralDiscreteScheduler(SchedulerMixin, ConfigMixin):
"""
KDPM2DiscreteScheduler with ancestral sampling is inspired by the DPMSolver2 and Algorithm 2 from the [Elucidating
the Design Space of Diffusion-Based Generative Models](https://huggingface.co/papers/2206.00364) paper.
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
methods the library implements for all schedulers such as loading and saving.
Args:
num_train_timesteps (`int`, defaults to 1000):
The number of diffusion steps to train the model.
beta_start (`float`, defaults to 0.00085):
The starting `beta` value of inference.
beta_end (`float`, defaults to 0.012):
The final `beta` value.
beta_schedule (`str`, defaults to `"linear"`):
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear` or `scaled_linear`.
trained_betas (`np.ndarray`, *optional*):
Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
use_karras_sigmas (`bool`, *optional*, defaults to `False`):
Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`,
the sigmas are determined according to a sequence of noise levels {σi}.
prediction_type (`str`, defaults to `epsilon`, *optional*):
Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
`sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
Video](https://imagen.research.google/video/paper.pdf) paper).
timestep_spacing (`str`, defaults to `"linspace"`):
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
steps_offset (`int`, defaults to 0):
An offset added to the inference steps, as required by some model families.
"""
_compatibles = [e.name for e in KarrasDiffusionSchedulers]
order = 2
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.00085, # sensible defaults
beta_end: float = 0.012,
beta_schedule: str = "linear",
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
use_karras_sigmas: Optional[bool] = False,
prediction_type: str = "epsilon",
timestep_spacing: str = "linspace",
steps_offset: int = 0,
):
if trained_betas is not None:
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
elif beta_schedule == "linear":
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
else:
raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
self.alphas = 1.0 - self.betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
# set all values
self.set_timesteps(num_train_timesteps, None, num_train_timesteps)
self._step_index = None
self._begin_index = None
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
@property
def init_noise_sigma(self):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
@property
def step_index(self):
"""
The index counter for current timestep. It will increase 1 after each scheduler step.
"""
return self._step_index
@property
def begin_index(self):
"""
The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
"""
return self._begin_index
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
def set_begin_index(self, begin_index: int = 0):
"""
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args:
begin_index (`int`):
The begin index for the scheduler.
"""
self._begin_index = begin_index
def scale_model_input(
self,
sample: torch.Tensor,
timestep: Union[float, torch.Tensor],
) -> torch.Tensor:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
sample (`torch.Tensor`):
The input sample.
timestep (`int`, *optional*):
The current timestep in the diffusion chain.
Returns:
`torch.Tensor`:
A scaled input sample.
"""
if self.step_index is None:
self._init_step_index(timestep)
if self.state_in_first_order:
sigma = self.sigmas[self.step_index]
else:
sigma = self.sigmas_interpol[self.step_index - 1]
sample = sample / ((sigma**2 + 1) ** 0.5)
return sample
def set_timesteps(
self,
num_inference_steps: int,
device: Union[str, torch.device] = None,
num_train_timesteps: Optional[int] = None,
):
"""
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
Args:
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
"""
self.num_inference_steps = num_inference_steps
num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy()
elif self.config.timestep_spacing == "leading":
step_ratio = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
step_ratio = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32)
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
)
sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
log_sigmas = np.log(sigmas)
sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
if self.config.use_karras_sigmas:
sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
self.log_sigmas = torch.from_numpy(log_sigmas).to(device)
sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
sigmas = torch.from_numpy(sigmas).to(device=device)
# compute up and down sigmas
sigmas_next = sigmas.roll(-1)
sigmas_next[-1] = 0.0
sigmas_up = (sigmas_next**2 * (sigmas**2 - sigmas_next**2) / sigmas**2) ** 0.5
sigmas_down = (sigmas_next**2 - sigmas_up**2) ** 0.5
sigmas_down[-1] = 0.0
# compute interpolated sigmas
sigmas_interpol = sigmas.log().lerp(sigmas_down.log(), 0.5).exp()
sigmas_interpol[-2:] = 0.0
# set sigmas
self.sigmas = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
self.sigmas_interpol = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]
)
self.sigmas_up = torch.cat([sigmas_up[:1], sigmas_up[1:].repeat_interleave(2), sigmas_up[-1:]])
self.sigmas_down = torch.cat([sigmas_down[:1], sigmas_down[1:].repeat_interleave(2), sigmas_down[-1:]])
if str(device).startswith("mps"):
timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32)
else:
timesteps = torch.from_numpy(timesteps).to(device)
sigmas_interpol = sigmas_interpol.cpu()
log_sigmas = self.log_sigmas.cpu()
timesteps_interpol = np.array(
[self._sigma_to_t(sigma_interpol, log_sigmas) for sigma_interpol in sigmas_interpol]
)
timesteps_interpol = torch.from_numpy(timesteps_interpol).to(device, dtype=timesteps.dtype)
interleaved_timesteps = torch.stack((timesteps_interpol[:-2, None], timesteps[1:, None]), dim=-1).flatten()
self.timesteps = torch.cat([timesteps[:1], interleaved_timesteps])
self.sample = None
self._step_index = None
self._begin_index = None
self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t
def _sigma_to_t(self, sigma, log_sigmas):
# get log sigma
log_sigma = np.log(np.maximum(sigma, 1e-10))
# get distribution
dists = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
high_idx = low_idx + 1
low = log_sigmas[low_idx]
high = log_sigmas[high_idx]
# interpolate sigmas
w = (low - log_sigma) / (low - high)
w = np.clip(w, 0, 1)
# transform interpolation to time range
t = (1 - w) * low_idx + w * high_idx
t = t.reshape(sigma.shape)
return t
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
"""Constructs the noise schedule of Karras et al. (2022)."""
# Hack to make sure that other schedulers which copy this function don't break
# TODO: Add this logic to the other schedulers
if hasattr(self.config, "sigma_min"):
sigma_min = self.config.sigma_min
else:
sigma_min = None
if hasattr(self.config, "sigma_max"):
sigma_max = self.config.sigma_max
else:
sigma_max = None
sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
rho = 7.0 # 7.0 is the value used in the paper
ramp = np.linspace(0, 1, num_inference_steps)
min_inv_rho = sigma_min ** (1 / rho)
max_inv_rho = sigma_max ** (1 / rho)
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def state_in_first_order(self):
return self.sample is None
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep
def index_for_timestep(self, timestep, schedule_timesteps=None):
if schedule_timesteps is None:
schedule_timesteps = self.timesteps
indices = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
pos = 1 if len(indices) > 1 else 0
return indices[pos].item()
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
def _init_step_index(self, timestep):
if self.begin_index is None:
if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device)
self._step_index = self.index_for_timestep(timestep)
else:
self._step_index = self._begin_index
def step(
self,
model_output: Union[torch.Tensor, np.ndarray],
timestep: Union[float, torch.Tensor],
sample: Union[torch.Tensor, np.ndarray],
generator: Optional[torch.Generator] = None,
return_dict: bool = True,
) -> Union[SchedulerOutput, Tuple]:
"""
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.Tensor`):
The direct output from learned diffusion model.
timestep (`float`):
The current discrete timestep in the diffusion chain.
sample (`torch.Tensor`):
A current instance of a sample created by the diffusion process.
generator (`torch.Generator`, *optional*):
A random number generator.
return_dict (`bool`):
Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple.
Returns:
[`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`:
If return_dict is `True`, [`~schedulers.scheduling_ddim.SchedulerOutput`] is returned, otherwise a
tuple is returned where the first element is the sample tensor.
"""
if self.step_index is None:
self._init_step_index(timestep)
if self.state_in_first_order:
sigma = self.sigmas[self.step_index]
sigma_interpol = self.sigmas_interpol[self.step_index]
sigma_up = self.sigmas_up[self.step_index]
sigma_down = self.sigmas_down[self.step_index - 1]
else:
# 2nd order / KPDM2's method
sigma = self.sigmas[self.step_index - 1]
sigma_interpol = self.sigmas_interpol[self.step_index - 1]
sigma_up = self.sigmas_up[self.step_index - 1]
sigma_down = self.sigmas_down[self.step_index - 1]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
gamma = 0
sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
device = model_output.device
noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator)
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol
pred_original_sample = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol
pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample")
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
)
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
derivative = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
dt = sigma_interpol - sigma_hat
# store for 2nd order step
self.sample = sample
self.dt = dt
prev_sample = sample + derivative * dt
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
derivative = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
dt = sigma_down - sigma_hat
sample = self.sample
self.sample = None
prev_sample = sample + derivative * dt
prev_sample = prev_sample + noise * sigma_up
# upon completion increase step index by one
self._step_index += 1
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=prev_sample)
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise
def add_noise(
self,
original_samples: torch.Tensor,
noise: torch.Tensor,
timesteps: torch.Tensor,
) -> torch.Tensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
# mps does not support float64
schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32)
timesteps = timesteps.to(original_samples.device, dtype=torch.float32)
else:
schedule_timesteps = self.timesteps.to(original_samples.device)
timesteps = timesteps.to(original_samples.device)
# self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index
if self.begin_index is None:
step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps]
elif self.step_index is not None:
# add_noise is called after first denoising step (for inpainting)
step_indices = [self.step_index] * timesteps.shape[0]
else:
# add noise is called before first denoising step to create initial latent(img2img)
step_indices = [self.begin_index] * timesteps.shape[0]
sigma = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
sigma = sigma.unsqueeze(-1)
noisy_samples = original_samples + noise * sigma
return noisy_samples
def __len__(self):
return self.config.num_train_timesteps
|
diffusers/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py/0
|
{
"file_path": "diffusers/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py",
"repo_id": "diffusers",
"token_count": 9739
}
| 166
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from huggingface_hub.utils import validate_hf_hub_args
from ..utils import BaseOutput, PushToHubMixin
SCHEDULER_CONFIG_NAME = "scheduler_config.json"
# NOTE: We make this type an enum because it simplifies usage in docs and prevents
# circular imports when used for `_compatibles` within the schedulers module.
# When it's used as a type in pipelines, it really is a Union because the actual
# scheduler instance is passed in.
class FlaxKarrasDiffusionSchedulers(Enum):
FlaxDDIMScheduler = 1
FlaxDDPMScheduler = 2
FlaxPNDMScheduler = 3
FlaxLMSDiscreteScheduler = 4
FlaxDPMSolverMultistepScheduler = 5
FlaxEulerDiscreteScheduler = 6
@dataclass
class FlaxSchedulerOutput(BaseOutput):
"""
Base class for the scheduler's step function output.
Args:
prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
"""
prev_sample: jnp.ndarray
class FlaxSchedulerMixin(PushToHubMixin):
"""
Mixin containing common functions for the schedulers.
Class attributes:
- **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that
`from_config` can be used from a class different than the one used to save the config (should be overridden
by parent class).
"""
config_name = SCHEDULER_CONFIG_NAME
ignore_for_config = ["dtype"]
_compatibles = []
has_compatibles = True
@classmethod
@validate_hf_hub_args
def from_pretrained(
cls,
pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
subfolder: Optional[str] = None,
return_unused_kwargs=False,
**kwargs,
):
r"""
Instantiate a Scheduler class from a pre-defined JSON-file.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
Can be either:
- A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an
organization name, like `google/ddpm-celebahq-256`.
- A path to a *directory* containing model weights saved using [`~SchedulerMixin.save_pretrained`],
e.g., `./my_model_directory/`.
subfolder (`str`, *optional*):
In case the relevant files are located inside a subfolder of the model repo (either remote in
huggingface.co or downloaded locally), you can specify the folder name here.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
Whether kwargs that are not consumed by the Python class should be returned or not.
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether or not to only look at local files (i.e., do not try to download the model).
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
models](https://huggingface.co/docs/hub/models-gated#gated-models).
</Tip>
<Tip>
Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to
use this method in a firewalled environment.
</Tip>
"""
config, kwargs = cls.load_config(
pretrained_model_name_or_path=pretrained_model_name_or_path,
subfolder=subfolder,
return_unused_kwargs=True,
**kwargs,
)
scheduler, unused_kwargs = cls.from_config(config, return_unused_kwargs=True, **kwargs)
if hasattr(scheduler, "create_state") and getattr(scheduler, "has_state", False):
state = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the
[`~FlaxSchedulerMixin.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)
@property
def compatibles(self):
"""
Returns all schedulers that are compatible with this scheduler
Returns:
`List[SchedulerMixin]`: List of compatible schedulers
"""
return self._get_compatibles()
@classmethod
def _get_compatibles(cls):
compatible_classes_str = list(set([cls.__name__] + cls._compatibles))
diffusers_library = importlib.import_module(__name__.split(".")[0])
compatible_classes = [
getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)
]
return compatible_classes
def broadcast_to_shape_from_left(x: jnp.ndarray, shape: Tuple[int]) -> jnp.ndarray:
assert len(shape) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(shape) - x.ndim)), shape)
def betas_for_alpha_bar(num_diffusion_timesteps: int, max_beta=0.999, dtype=jnp.float32) -> jnp.ndarray:
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`jnp.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return jnp.array(betas, dtype=dtype)
@flax.struct.dataclass
class CommonSchedulerState:
alphas: jnp.ndarray
betas: jnp.ndarray
alphas_cumprod: jnp.ndarray
@classmethod
def create(cls, scheduler):
config = scheduler.config
if config.trained_betas is not None:
betas = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
betas = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
betas = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype
)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
betas = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}"
)
alphas = 1.0 - betas
alphas_cumprod = jnp.cumprod(alphas, axis=0)
return cls(
alphas=alphas,
betas=betas,
alphas_cumprod=alphas_cumprod,
)
def get_sqrt_alpha_prod(
state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray
):
alphas_cumprod = state.alphas_cumprod
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
sqrt_alpha_prod = broadcast_to_shape_from_left(sqrt_alpha_prod, original_samples.shape)
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
sqrt_one_minus_alpha_prod = broadcast_to_shape_from_left(sqrt_one_minus_alpha_prod, original_samples.shape)
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def add_noise_common(
state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray
):
sqrt_alpha_prod, sqrt_one_minus_alpha_prod = get_sqrt_alpha_prod(state, original_samples, noise, timesteps)
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def get_velocity_common(state: CommonSchedulerState, sample: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray):
sqrt_alpha_prod, sqrt_one_minus_alpha_prod = get_sqrt_alpha_prod(state, sample, noise, timesteps)
velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
|
diffusers/src/diffusers/schedulers/scheduling_utils_flax.py/0
|
{
"file_path": "diffusers/src/diffusers/schedulers/scheduling_utils_flax.py",
"repo_id": "diffusers",
"token_count": 4948
}
| 167
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class StableDiffusionKDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers", "k_diffusion"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers", "k_diffusion"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "k_diffusion"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "k_diffusion"])
class StableDiffusionXLKDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers", "k_diffusion"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers", "k_diffusion"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "k_diffusion"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "k_diffusion"])
|
diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py/0
|
{
"file_path": "diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py",
"repo_id": "diffusers",
"token_count": 451
}
| 168
|
import functools
import importlib
import inspect
import io
import logging
import multiprocessing
import os
import random
import re
import struct
import sys
import tempfile
import time
import unittest
import urllib.parse
from contextlib import contextmanager
from io import BytesIO, StringIO
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import PIL.Image
import PIL.ImageOps
import requests
from numpy.linalg import norm
from packaging import version
from .import_utils import (
BACKENDS_MAPPING,
is_compel_available,
is_flax_available,
is_note_seq_available,
is_onnx_available,
is_opencv_available,
is_peft_available,
is_timm_available,
is_torch_available,
is_torch_version,
is_torchsde_available,
is_transformers_available,
)
from .logging import get_logger
global_rng = random.Random()
logger = get_logger(__name__)
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) > version.parse("0.5")
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) > version.parse("4.33")
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if is_torch_available():
import torch
# Set a backend environment variable for any extra module import required for a custom accelerator
if "DIFFUSERS_TEST_BACKEND" in os.environ:
backend = os.environ["DIFFUSERS_TEST_BACKEND"]
try:
_ = importlib.import_module(backend)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to import `DIFFUSERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module \
to enable a specified backend.):\n{e}"
) from e
if "DIFFUSERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["DIFFUSERS_TEST_DEVICE"]
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}"
) from e
logger.info(f"torch_device overrode to {torch_device}")
else:
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
is_torch_higher_equal_than_1_12 = version.parse(
version.parse(torch.__version__).base_version
) >= version.parse("1.12")
if is_torch_higher_equal_than_1_12:
# Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details
mps_backend_registered = hasattr(torch.backends, "mps")
torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device
def torch_all_close(a, b, *args, **kwargs):
if not is_torch_available():
raise ValueError("PyTorch needs to be installed to use this function.")
if not torch.allclose(a, b, *args, **kwargs):
assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}."
return True
def numpy_cosine_similarity_distance(a, b):
similarity = np.dot(a, b) / (norm(a) * norm(b))
distance = 1.0 - similarity.mean()
return distance
def print_tensor_test(
tensor,
limit_to_slices=None,
max_torch_print=None,
filename="test_corrections.txt",
expected_tensor_name="expected_slice",
):
if max_torch_print:
torch.set_printoptions(threshold=10_000)
test_name = os.environ.get("PYTEST_CURRENT_TEST")
if not torch.is_tensor(tensor):
tensor = torch.from_numpy(tensor)
if limit_to_slices:
tensor = tensor[0, -3:, -3:, -1]
tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "")
# format is usually:
# expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161])
output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array")
test_file, test_class, test_fn = test_name.split("::")
test_fn = test_fn.split()[0]
with open(filename, "a") as f:
print("::".join([test_file, test_class, test_fn, output_str]), file=f)
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return Path(tests_dir, append_path).as_posix()
else:
return tests_dir
# Taken from the following PR:
# https://github.com/huggingface/accelerate/pull/1964
def str_to_bool(value) -> int:
"""
Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`,
`on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
"""
value = value.lower()
if value in ("y", "yes", "t", "true", "on", "1"):
return 1
elif value in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError(f"invalid truth value {value}")
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = str_to_bool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
_run_compile_tests = parse_flag_from_env("RUN_COMPILE", default=False)
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def nightly(test_case):
"""
Decorator marking a test that runs nightly in the diffusers CI.
Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)
def is_torch_compile(test_case):
"""
Decorator marking a test that runs compile tests in the diffusers CI.
Compile tests are skipped by default. Set the RUN_COMPILE environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_2(test_case):
"""
Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed.
"""
return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")(
test_case
)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")(
test_case
)
# These decorators are for accelerator-specific behaviours that are not GPU-specific
def require_torch_accelerator(test_case):
"""Decorator marking a test that requires an accelerator backend and PyTorch."""
return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")(
test_case
)
def require_torch_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
multiple GPUs. To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests
-k "multi_gpu"
"""
if not is_torch_available():
return unittest.skip("test requires PyTorch")(test_case)
import torch
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
def require_torch_accelerator_with_fp16(test_case):
"""Decorator marking a test that requires an accelerator with support for the FP16 data type."""
return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")(
test_case
)
def require_torch_accelerator_with_fp64(test_case):
"""Decorator marking a test that requires an accelerator with support for the FP64 data type."""
return unittest.skipUnless(_is_torch_fp64_available(torch_device), "test requires accelerator with fp64 support")(
test_case
)
def require_torch_accelerator_with_training(test_case):
"""Decorator marking a test that requires an accelerator with support for training."""
return unittest.skipUnless(
is_torch_available() and backend_supports_training(torch_device),
"test requires accelerator with training support",
)(test_case)
def skip_mps(test_case):
"""Decorator marking a test to skip if torch_device is 'mps'"""
return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
def require_compel(test_case):
"""
Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when
the library is not installed.
"""
return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case)
def require_onnxruntime(test_case):
"""
Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed.
"""
return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case)
def require_note_seq(test_case):
"""
Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed.
"""
return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case)
def require_torchsde(test_case):
"""
Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed.
"""
return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case)
def require_peft_backend(test_case):
"""
Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and
transformers.
"""
return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case)
def require_timm(test_case):
"""
Decorator marking a test that requires timm. These tests are skipped when timm isn't installed.
"""
return unittest.skipUnless(is_timm_available(), "test requires timm")(test_case)
def require_peft_version_greater(peft_version):
"""
Decorator marking a test that requires PEFT backend with a specific version, this would require some specific
versions of PEFT and transformers.
"""
def decorator(test_case):
correct_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) > version.parse(peft_version)
return unittest.skipUnless(
correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}"
)(test_case)
return decorator
def require_accelerate_version_greater(accelerate_version):
def decorator(test_case):
correct_accelerate_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("accelerate")).base_version
) > version.parse(accelerate_version)
return unittest.skipUnless(
correct_accelerate_version, f"Test requires accelerate with the version greater than {accelerate_version}."
)(test_case)
return decorator
def deprecate_after_peft_backend(test_case):
"""
Decorator marking a test that will be skipped after PEFT backend
"""
return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case)
def get_python_version():
sys_info = sys.version_info
major, minor = sys_info.major, sys_info.minor
return major, minor
def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray:
if isinstance(arry, str):
if local_path is not None:
# local_path can be passed to correct images of tests
return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix()
elif arry.startswith("http://") or arry.startswith("https://"):
response = requests.get(arry)
response.raise_for_status()
arry = np.load(BytesIO(response.content))
elif os.path.isfile(arry):
arry = np.load(arry)
else:
raise ValueError(
f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path"
)
elif isinstance(arry, np.ndarray):
pass
else:
raise ValueError(
"Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a"
" ndarray."
)
return arry
def load_pt(url: str):
response = requests.get(url)
response.raise_for_status()
arry = torch.load(BytesIO(response.content))
return arry
def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image:
"""
Loads `image` to a PIL Image.
Args:
image (`str` or `PIL.Image.Image`):
The image to convert to the PIL Image format.
Returns:
`PIL.Image.Image`:
A PIL Image.
"""
if isinstance(image, str):
if image.startswith("http://") or image.startswith("https://"):
image = PIL.Image.open(requests.get(image, stream=True).raw)
elif os.path.isfile(image):
image = PIL.Image.open(image)
else:
raise ValueError(
f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path"
)
elif isinstance(image, PIL.Image.Image):
image = image
else:
raise ValueError(
"Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image."
)
image = PIL.ImageOps.exif_transpose(image)
image = image.convert("RGB")
return image
def preprocess_image(image: PIL.Image, batch_size: int):
w, h = image.size
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
image = np.array(image).astype(np.float32) / 255.0
image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
image = torch.from_numpy(image)
return 2.0 * image - 1.0
def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str:
if output_gif_path is None:
output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name
image[0].save(
output_gif_path,
save_all=True,
append_images=image[1:],
optimize=False,
duration=100,
loop=0,
)
return output_gif_path
@contextmanager
def buffered_writer(raw_f):
f = io.BufferedWriter(raw_f)
yield f
f.flush()
def export_to_ply(mesh, output_ply_path: str = None):
"""
Write a PLY file for a mesh.
"""
if output_ply_path is None:
output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name
coords = mesh.verts.detach().cpu().numpy()
faces = mesh.faces.cpu().numpy()
rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)
with buffered_writer(open(output_ply_path, "wb")) as f:
f.write(b"ply\n")
f.write(b"format binary_little_endian 1.0\n")
f.write(bytes(f"element vertex {len(coords)}\n", "ascii"))
f.write(b"property float x\n")
f.write(b"property float y\n")
f.write(b"property float z\n")
if rgb is not None:
f.write(b"property uchar red\n")
f.write(b"property uchar green\n")
f.write(b"property uchar blue\n")
if faces is not None:
f.write(bytes(f"element face {len(faces)}\n", "ascii"))
f.write(b"property list uchar int vertex_index\n")
f.write(b"end_header\n")
if rgb is not None:
rgb = (rgb * 255.499).round().astype(int)
vertices = [
(*coord, *rgb)
for coord, rgb in zip(
coords.tolist(),
rgb.tolist(),
)
]
format = struct.Struct("<3f3B")
for item in vertices:
f.write(format.pack(*item))
else:
format = struct.Struct("<3f")
for vertex in coords.tolist():
f.write(format.pack(*vertex))
if faces is not None:
format = struct.Struct("<B3I")
for tri in faces.tolist():
f.write(format.pack(len(tri), *tri))
return output_ply_path
def export_to_obj(mesh, output_obj_path: str = None):
if output_obj_path is None:
output_obj_path = tempfile.NamedTemporaryFile(suffix=".obj").name
verts = mesh.verts.detach().cpu().numpy()
faces = mesh.faces.cpu().numpy()
vertex_colors = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)
vertices = [
"{} {} {} {} {} {}".format(*coord, *color) for coord, color in zip(verts.tolist(), vertex_colors.tolist())
]
faces = ["f {} {} {}".format(str(tri[0] + 1), str(tri[1] + 1), str(tri[2] + 1)) for tri in faces.tolist()]
combined_data = ["v " + vertex for vertex in vertices] + faces
with open(output_obj_path, "w") as f:
f.writelines("\n".join(combined_data))
def export_to_video(video_frames: List[np.ndarray], output_video_path: str = None) -> str:
if is_opencv_available():
import cv2
else:
raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video"))
if output_video_path is None:
output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
h, w, c = video_frames[0].shape
video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h))
for i in range(len(video_frames)):
img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR)
video_writer.write(img)
return output_video_path
def load_hf_numpy(path) -> np.ndarray:
base_url = "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main"
if not path.startswith("http://") and not path.startswith("https://"):
path = os.path.join(base_url, urllib.parse.quote(path))
return load_numpy(path)
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
directory. The report files are prefixed with the test suite name.
This function emulates --duration and -rA pytest arguments.
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
there.
Args:
- tr: `terminalreporter` passed from `conftest.py`
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
NB: this functions taps into a private _pytest API and while unlikely, it could break should
pytest do internal changes - also it calls default internal methods of terminalreporter which
can be hijacked by various `pytest-` plugins and interfere.
"""
from _pytest.config import create_terminal_writer
if not len(id):
id = "tests"
config = tr.config
orig_writer = config.get_terminal_writer()
orig_tbstyle = config.option.tbstyle
orig_reportchars = tr.reportchars
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
"failures_long",
"failures_short",
"failures_line",
"passes",
"stats",
"summary_short",
"warnings",
]
}
# custom durations report
# note: there is no need to call pytest --durations=XX to get this separate report
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if dlist:
dlist.sort(key=lambda x: x.duration, reverse=True)
with open(report_files["durations"], "w") as f:
durations_min = 0.05 # sec
f.write("slowest durations\n")
for i, rep in enumerate(dlist):
if rep.duration < durations_min:
f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
break
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
def summary_failures_short(tr):
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
reports = tr.getreports("failed")
if not reports:
return
tr.write_sep("=", "FAILURES SHORT STACK")
for rep in reports:
msg = tr._getfailureheadline(rep)
tr.write_sep("_", msg, red=True, bold=True)
# chop off the optional leading extra frames, leaving only the last one
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
tr._tw.line(longrepr)
# note: not printing out any rep.sections to keep the report short
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
# pytest-instafail does that)
# report failures with line/short/long styles
config.option.tbstyle = "auto" # full tb
with open(report_files["failures_long"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
# config.option.tbstyle = "short" # short tb
with open(report_files["failures_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
summary_failures_short(tr)
config.option.tbstyle = "line" # one line per error
with open(report_files["failures_line"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_failures()
with open(report_files["errors"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_errors()
with open(report_files["warnings"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_warnings() # normal warnings
tr.summary_warnings() # final warnings
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
with open(report_files["passes"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_passes()
with open(report_files["summary_short"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.short_test_summary()
with open(report_files["stats"], "w") as f:
tr._tw = create_terminal_writer(config, f)
tr.summary_stats()
# restore:
tr._tw = orig_writer
tr.reportchars = orig_reportchars
config.option.tbstyle = orig_tbstyle
# Copied from https://github.com/huggingface/transformers/blob/000e52aec8850d3fe2f360adc6fd256e5b47fe4c/src/transformers/testing_utils.py#L1905
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
"""
To decorate flaky tests. They will be retried on failures.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers/testing_utils.py#L1787
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f'{results["error"]}')
class CaptureLogger:
"""
Args:
Context manager to capture `logging` streams
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from diffusers import logging
>>> from diffusers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
def enable_full_determinism():
"""
Helper function for reproducible behavior during distributed training. See
- https://pytorch.org/docs/stable/notes/randomness.html for pytorch
"""
# Enable PyTorch deterministic mode. This potentially requires either the environment
# variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set,
# depending on the CUDA version, so we set them both here
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
torch.use_deterministic_algorithms(True)
# Enable CUDNN deterministic mode
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = False
def disable_full_determinism():
os.environ["CUDA_LAUNCH_BLOCKING"] = "0"
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ""
torch.use_deterministic_algorithms(False)
# Utils for custom and alternative accelerator devices
def _is_torch_fp16_available(device):
if not is_torch_available():
return False
import torch
device = torch.device(device)
try:
x = torch.zeros((2, 2), dtype=torch.float16).to(device)
_ = torch.mul(x, x)
return True
except Exception as e:
if device.type == "cuda":
raise ValueError(
f"You have passed a device of type 'cuda' which should work with 'fp16', but 'cuda' does not seem to be correctly installed on your machine: {e}"
)
return False
def _is_torch_fp64_available(device):
if not is_torch_available():
return False
import torch
device = torch.device(device)
try:
x = torch.zeros((2, 2), dtype=torch.float64).to(device)
_ = torch.mul(x, x)
return True
except Exception as e:
if device.type == "cuda":
raise ValueError(
f"You have passed a device of type 'cuda' which should work with 'fp64', but 'cuda' does not seem to be correctly installed on your machine: {e}"
)
return False
# Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch
if is_torch_available():
# Behaviour flags
BACKEND_SUPPORTS_TRAINING = {"cuda": True, "cpu": True, "mps": False, "default": True}
# Function definitions
BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "mps": None, "default": None}
BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "mps": lambda: 0, "default": 0}
BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
# This dispatches a defined function according to the accelerator from the function definitions.
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
if device not in dispatch_table:
return dispatch_table["default"](*args, **kwargs)
fn = dispatch_table[device]
# Some device agnostic functions return values. Need to guard against 'None' instead at
# user level
if fn is None:
return None
return fn(*args, **kwargs)
# These are callables which automatically dispatch the function specific to the accelerator
def backend_manual_seed(device: str, seed: int):
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
def backend_empty_cache(device: str):
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
def backend_device_count(device: str):
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
# These are callables which return boolean behaviour flags and can be used to specify some
# device agnostic alternative where the feature is unsupported.
def backend_supports_training(device: str):
if not is_torch_available():
return False
if device not in BACKEND_SUPPORTS_TRAINING:
device = "default"
return BACKEND_SUPPORTS_TRAINING[device]
# Guard for when Torch is not available
if is_torch_available():
# Update device function dict mapping
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
try:
# Try to import the function directly
spec_fn = getattr(device_spec_module, attribute_name)
device_fn_dict[torch_device] = spec_fn
except AttributeError as e:
# If the function doesn't exist, and there is no default, throw an error
if "default" not in device_fn_dict:
raise AttributeError(
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
) from e
if "DIFFUSERS_TEST_DEVICE_SPEC" in os.environ:
device_spec_path = os.environ["DIFFUSERS_TEST_DEVICE_SPEC"]
if not Path(device_spec_path).is_file():
raise ValueError(f"Specified path to device specification file is not found. Received {device_spec_path}")
try:
import_name = device_spec_path[: device_spec_path.index(".py")]
except ValueError as e:
raise ValueError(f"Provided device spec file is not a Python file! Received {device_spec_path}") from e
device_spec_module = importlib.import_module(import_name)
try:
device_name = device_spec_module.DEVICE_NAME
except AttributeError:
raise AttributeError("Device spec file did not contain `DEVICE_NAME`")
if "DIFFUSERS_TEST_DEVICE" in os.environ and torch_device != device_name:
msg = f"Mismatch between environment variable `DIFFUSERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
msg += "Either unset `DIFFUSERS_TEST_DEVICE` or ensure it matches device spec name."
raise ValueError(msg)
torch_device = device_name
# Add one entry here for each `BACKEND_*` dictionary.
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING")
|
diffusers/src/diffusers/utils/testing_utils.py/0
|
{
"file_path": "diffusers/src/diffusers/utils/testing_utils.py",
"repo_id": "diffusers",
"token_count": 14935
}
| 169
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from datasets import load_dataset
from parameterized import parameterized
from diffusers import (
AsymmetricAutoencoderKL,
AutoencoderKL,
AutoencoderKLTemporalDecoder,
AutoencoderOobleck,
AutoencoderTiny,
ConsistencyDecoderVAE,
StableDiffusionPipeline,
)
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.loading_utils import load_image
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
floats_tensor,
load_hf_numpy,
require_torch_accelerator,
require_torch_accelerator_with_fp16,
require_torch_accelerator_with_training,
require_torch_gpu,
skip_mps,
slow,
torch_all_close,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
def get_autoencoder_kl_config(block_out_channels=None, norm_num_groups=None):
block_out_channels = block_out_channels or [2, 4]
norm_num_groups = norm_num_groups or 2
init_dict = {
"block_out_channels": block_out_channels,
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels),
"up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels),
"latent_channels": 4,
"norm_num_groups": norm_num_groups,
}
return init_dict
def get_asym_autoencoder_kl_config(block_out_channels=None, norm_num_groups=None):
block_out_channels = block_out_channels or [2, 4]
norm_num_groups = norm_num_groups or 2
init_dict = {
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels),
"down_block_out_channels": block_out_channels,
"layers_per_down_block": 1,
"up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels),
"up_block_out_channels": block_out_channels,
"layers_per_up_block": 1,
"act_fn": "silu",
"latent_channels": 4,
"norm_num_groups": norm_num_groups,
"sample_size": 32,
"scaling_factor": 0.18215,
}
return init_dict
def get_autoencoder_tiny_config(block_out_channels=None):
block_out_channels = (len(block_out_channels) * [32]) if block_out_channels is not None else [32, 32]
init_dict = {
"in_channels": 3,
"out_channels": 3,
"encoder_block_out_channels": block_out_channels,
"decoder_block_out_channels": block_out_channels,
"num_encoder_blocks": [b // min(block_out_channels) for b in block_out_channels],
"num_decoder_blocks": [b // min(block_out_channels) for b in reversed(block_out_channels)],
}
return init_dict
def get_consistency_vae_config(block_out_channels=None, norm_num_groups=None):
block_out_channels = block_out_channels or [2, 4]
norm_num_groups = norm_num_groups or 2
return {
"encoder_block_out_channels": block_out_channels,
"encoder_in_channels": 3,
"encoder_out_channels": 4,
"encoder_down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels),
"decoder_add_attention": False,
"decoder_block_out_channels": block_out_channels,
"decoder_down_block_types": ["ResnetDownsampleBlock2D"] * len(block_out_channels),
"decoder_downsample_padding": 1,
"decoder_in_channels": 7,
"decoder_layers_per_block": 1,
"decoder_norm_eps": 1e-05,
"decoder_norm_num_groups": norm_num_groups,
"encoder_norm_num_groups": norm_num_groups,
"decoder_num_train_timesteps": 1024,
"decoder_out_channels": 6,
"decoder_resnet_time_scale_shift": "scale_shift",
"decoder_time_embedding_type": "learned",
"decoder_up_block_types": ["ResnetUpsampleBlock2D"] * len(block_out_channels),
"scaling_factor": 1,
"latent_channels": 4,
}
def get_autoencoder_oobleck_config(block_out_channels=None):
init_dict = {
"encoder_hidden_size": 12,
"decoder_channels": 12,
"decoder_input_channels": 6,
"audio_channels": 2,
"downsampling_ratios": [2, 4],
"channel_multiples": [1, 2],
}
return init_dict
class AutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AutoencoderKL
main_input_name = "sample"
base_precision = 1e-2
@property
def dummy_input(self):
batch_size = 4
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
return {"sample": image}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = get_autoencoder_kl_config()
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_forward_signature(self):
pass
def test_training(self):
pass
@require_torch_accelerator_with_training
def test_gradient_checkpointing(self):
# enable deterministic behavior for gradient checkpointing
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.to(torch_device)
assert not model.is_gradient_checkpointing and model.training
out = model(**inputs_dict).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
labels = torch.randn_like(out)
loss = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
model_2 = self.model_class(**init_dict)
# clone model
model_2.load_state_dict(model.state_dict())
model_2.to(torch_device)
model_2.enable_gradient_checkpointing()
assert model_2.is_gradient_checkpointing and model_2.training
out_2 = model_2(**inputs_dict).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_2.zero_grad()
loss_2 = (out_2 - labels).mean()
loss_2.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_2).abs() < 1e-5)
named_params = dict(model.named_parameters())
named_params_2 = dict(model_2.named_parameters())
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5))
def test_from_pretrained_hub(self):
model, loading_info = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=True)
self.assertIsNotNone(model)
self.assertEqual(len(loading_info["missing_keys"]), 0)
model.to(torch_device)
image = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def test_output_pretrained(self):
model = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy")
model = model.to(torch_device)
model.eval()
# Keep generator on CPU for non-CUDA devices to compare outputs with CPU result tensors
generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda"
if torch_device != "mps":
generator = torch.Generator(device=generator_device).manual_seed(0)
else:
generator = torch.manual_seed(0)
image = torch.randn(
1,
model.config.in_channels,
model.config.sample_size,
model.config.sample_size,
generator=torch.manual_seed(0),
)
image = image.to(torch_device)
with torch.no_grad():
output = model(image, sample_posterior=True, generator=generator).sample
output_slice = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
expected_output_slice = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
]
)
elif generator_device == "cpu":
expected_output_slice = torch.tensor(
[
-0.1352,
0.0878,
0.0419,
-0.0818,
-0.1069,
0.0688,
-0.1458,
-0.4446,
-0.0026,
]
)
else:
expected_output_slice = torch.tensor(
[
-0.2421,
0.4642,
0.2507,
-0.0438,
0.0682,
0.3160,
-0.2018,
-0.0727,
0.2485,
]
)
self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2))
class AsymmetricAutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AsymmetricAutoencoderKL
main_input_name = "sample"
base_precision = 1e-2
@property
def dummy_input(self):
batch_size = 4
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
mask = torch.ones((batch_size, 1) + sizes).to(torch_device)
return {"sample": image, "mask": mask}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = get_asym_autoencoder_kl_config()
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_forward_signature(self):
pass
def test_forward_with_norm_groups(self):
pass
class AutoencoderTinyTests(ModelTesterMixin, unittest.TestCase):
model_class = AutoencoderTiny
main_input_name = "sample"
base_precision = 1e-2
@property
def dummy_input(self):
batch_size = 4
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
return {"sample": image}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = get_autoencoder_tiny_config()
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_outputs_equivalence(self):
pass
class ConsistencyDecoderVAETests(ModelTesterMixin, unittest.TestCase):
model_class = ConsistencyDecoderVAE
main_input_name = "sample"
base_precision = 1e-2
forward_requires_fresh_args = True
def inputs_dict(self, seed=None):
if seed is None:
generator = torch.Generator("cpu").manual_seed(0)
else:
generator = torch.Generator("cpu").manual_seed(seed)
image = randn_tensor((4, 3, 32, 32), generator=generator, device=torch.device(torch_device))
return {"sample": image, "generator": generator}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
@property
def init_dict(self):
return get_consistency_vae_config()
def prepare_init_args_and_inputs_for_common(self):
return self.init_dict, self.inputs_dict()
@unittest.skip
def test_training(self):
...
@unittest.skip
def test_ema_training(self):
...
class AutoencoderKLTemporalDecoderFastTests(ModelTesterMixin, unittest.TestCase):
model_class = AutoencoderKLTemporalDecoder
main_input_name = "sample"
base_precision = 1e-2
@property
def dummy_input(self):
batch_size = 3
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
num_frames = 3
return {"sample": image, "num_frames": num_frames}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"latent_channels": 4,
"layers_per_block": 2,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_forward_signature(self):
pass
def test_training(self):
pass
@unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS")
def test_gradient_checkpointing(self):
# enable deterministic behavior for gradient checkpointing
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.to(torch_device)
assert not model.is_gradient_checkpointing and model.training
out = model(**inputs_dict).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
labels = torch.randn_like(out)
loss = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
model_2 = self.model_class(**init_dict)
# clone model
model_2.load_state_dict(model.state_dict())
model_2.to(torch_device)
model_2.enable_gradient_checkpointing()
assert model_2.is_gradient_checkpointing and model_2.training
out_2 = model_2(**inputs_dict).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_2.zero_grad()
loss_2 = (out_2 - labels).mean()
loss_2.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_2).abs() < 1e-5)
named_params = dict(model.named_parameters())
named_params_2 = dict(model_2.named_parameters())
for name, param in named_params.items():
if "post_quant_conv" in name:
continue
self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5))
class AutoencoderOobleckTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AutoencoderOobleck
main_input_name = "sample"
base_precision = 1e-2
@property
def dummy_input(self):
batch_size = 4
num_channels = 2
seq_len = 24
waveform = floats_tensor((batch_size, num_channels, seq_len)).to(torch_device)
return {"sample": waveform, "sample_posterior": False}
@property
def input_shape(self):
return (2, 24)
@property
def output_shape(self):
return (2, 24)
def prepare_init_args_and_inputs_for_common(self):
init_dict = get_autoencoder_oobleck_config()
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_forward_signature(self):
pass
def test_forward_with_norm_groups(self):
pass
@slow
class AutoencoderTinyIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_file_format(self, seed, shape):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"
def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False):
dtype = torch.float16 if fp16 else torch.float32
image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
return image
def get_sd_vae_model(self, model_id="hf-internal-testing/taesd-diffusers", fp16=False):
torch_dtype = torch.float16 if fp16 else torch.float32
model = AutoencoderTiny.from_pretrained(model_id, torch_dtype=torch_dtype)
model.to(torch_device).eval()
return model
@parameterized.expand(
[
[(1, 4, 73, 97), (1, 3, 584, 776)],
[(1, 4, 97, 73), (1, 3, 776, 584)],
[(1, 4, 49, 65), (1, 3, 392, 520)],
[(1, 4, 65, 49), (1, 3, 520, 392)],
[(1, 4, 49, 49), (1, 3, 392, 392)],
]
)
def test_tae_tiling(self, in_shape, out_shape):
model = self.get_sd_vae_model()
model.enable_tiling()
with torch.no_grad():
zeros = torch.zeros(in_shape).to(torch_device)
dec = model.decode(zeros).sample
assert dec.shape == out_shape
def test_stable_diffusion(self):
model = self.get_sd_vae_model()
image = self.get_sd_image(seed=33)
with torch.no_grad():
sample = model(image).sample
assert sample.shape == image.shape
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor([0.0093, 0.6385, -0.1274, 0.1631, -0.1762, 0.5232, -0.3108, -0.0382])
assert torch_all_close(output_slice, expected_output_slice, atol=3e-3)
@parameterized.expand([(True,), (False,)])
def test_tae_roundtrip(self, enable_tiling):
# load the autoencoder
model = self.get_sd_vae_model()
if enable_tiling:
model.enable_tiling()
# make a black image with a white square in the middle,
# which is large enough to split across multiple tiles
image = -torch.ones(1, 3, 1024, 1024, device=torch_device)
image[..., 256:768, 256:768] = 1.0
# round-trip the image through the autoencoder
with torch.no_grad():
sample = model(image).sample
# the autoencoder reconstruction should match original image, sorta
def downscale(x):
return torch.nn.functional.avg_pool2d(x, model.spatial_scale_factor)
assert torch_all_close(downscale(sample), downscale(image), atol=0.125)
@slow
class AutoencoderKLIntegrationTests(unittest.TestCase):
def get_file_format(self, seed, shape):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False):
dtype = torch.float16 if fp16 else torch.float32
image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
return image
def get_sd_vae_model(self, model_id="CompVis/stable-diffusion-v1-4", fp16=False):
revision = "fp16" if fp16 else None
torch_dtype = torch.float16 if fp16 else torch.float32
model = AutoencoderKL.from_pretrained(
model_id,
subfolder="vae",
torch_dtype=torch_dtype,
revision=revision,
)
model.to(torch_device)
return model
def get_generator(self, seed=0):
generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda"
if torch_device != "mps":
return torch.Generator(device=generator_device).manual_seed(seed)
return torch.manual_seed(seed)
@parameterized.expand(
[
# fmt: off
[
33,
[-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824],
[-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824],
],
[
47,
[-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089],
[0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131],
],
# fmt: on
]
)
def test_stable_diffusion(self, seed, expected_slice, expected_slice_mps):
model = self.get_sd_vae_model()
image = self.get_sd_image(seed)
generator = self.get_generator(seed)
with torch.no_grad():
sample = model(image, generator=generator, sample_posterior=True).sample
assert sample.shape == image.shape
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=3e-3)
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
]
)
@require_torch_accelerator_with_fp16
def test_stable_diffusion_fp16(self, seed, expected_slice):
model = self.get_sd_vae_model(fp16=True)
image = self.get_sd_image(seed, fp16=True)
generator = self.get_generator(seed)
with torch.no_grad():
sample = model(image, generator=generator, sample_posterior=True).sample
assert sample.shape == image.shape
output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-2)
@parameterized.expand(
[
# fmt: off
[
33,
[-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814],
[-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824],
],
[
47,
[-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085],
[0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131],
],
# fmt: on
]
)
def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps):
model = self.get_sd_vae_model()
image = self.get_sd_image(seed)
with torch.no_grad():
sample = model(image).sample
assert sample.shape == image.shape
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=3e-3)
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
]
)
@require_torch_accelerator
@skip_mps
def test_stable_diffusion_decode(self, seed, expected_slice):
model = self.get_sd_vae_model()
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64))
with torch.no_grad():
sample = model.decode(encoding).sample
assert list(sample.shape) == [3, 3, 512, 512]
output_slice = sample[-1, -2:, :2, -2:].flatten().cpu()
expected_output_slice = torch.tensor(expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
]
)
@require_torch_accelerator_with_fp16
def test_stable_diffusion_decode_fp16(self, seed, expected_slice):
model = self.get_sd_vae_model(fp16=True)
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True)
with torch.no_grad():
sample = model.decode(encoding).sample
assert list(sample.shape) == [3, 3, 512, 512]
output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
@parameterized.expand([(13,), (16,), (27,)])
@require_torch_gpu
@unittest.skipIf(
not is_xformers_available(),
reason="xformers is not required when using PyTorch 2.0.",
)
def test_stable_diffusion_decode_xformers_vs_2_0_fp16(self, seed):
model = self.get_sd_vae_model(fp16=True)
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True)
with torch.no_grad():
sample = model.decode(encoding).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
sample_2 = model.decode(encoding).sample
assert list(sample.shape) == [3, 3, 512, 512]
assert torch_all_close(sample, sample_2, atol=1e-1)
@parameterized.expand([(13,), (16,), (37,)])
@require_torch_gpu
@unittest.skipIf(
not is_xformers_available(),
reason="xformers is not required when using PyTorch 2.0.",
)
def test_stable_diffusion_decode_xformers_vs_2_0(self, seed):
model = self.get_sd_vae_model()
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64))
with torch.no_grad():
sample = model.decode(encoding).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
sample_2 = model.decode(encoding).sample
assert list(sample.shape) == [3, 3, 512, 512]
assert torch_all_close(sample, sample_2, atol=1e-2)
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
]
)
def test_stable_diffusion_encode_sample(self, seed, expected_slice):
model = self.get_sd_vae_model()
image = self.get_sd_image(seed)
generator = self.get_generator(seed)
with torch.no_grad():
dist = model.encode(image).latent_dist
sample = dist.sample(generator=generator)
assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
output_slice = sample[0, -1, -3:, -3:].flatten().cpu()
expected_output_slice = torch.tensor(expected_slice)
tolerance = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(output_slice, expected_output_slice, atol=tolerance)
@slow
class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase):
def get_file_format(self, seed, shape):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False):
dtype = torch.float16 if fp16 else torch.float32
image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
return image
def get_sd_vae_model(self, model_id="cross-attention/asymmetric-autoencoder-kl-x-1-5", fp16=False):
revision = "main"
torch_dtype = torch.float32
model = AsymmetricAutoencoderKL.from_pretrained(
model_id,
torch_dtype=torch_dtype,
revision=revision,
)
model.to(torch_device).eval()
return model
def get_generator(self, seed=0):
generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda"
if torch_device != "mps":
return torch.Generator(device=generator_device).manual_seed(seed)
return torch.manual_seed(seed)
@parameterized.expand(
[
# fmt: off
[
33,
[-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078],
[-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824],
],
[
47,
[0.4400, 0.0543, 0.2873, 0.2946, 0.0553, 0.0839, -0.1585, 0.2529],
[-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089],
],
# fmt: on
]
)
def test_stable_diffusion(self, seed, expected_slice, expected_slice_mps):
model = self.get_sd_vae_model()
image = self.get_sd_image(seed)
generator = self.get_generator(seed)
with torch.no_grad():
sample = model(image, generator=generator, sample_posterior=True).sample
assert sample.shape == image.shape
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
@parameterized.expand(
[
# fmt: off
[
33,
[-0.0340, 0.2870, 0.1698, -0.0105, -0.3448, 0.3529, -0.1321, 0.1097],
[-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078],
],
[
47,
[0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531],
[0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531],
],
# fmt: on
]
)
def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps):
model = self.get_sd_vae_model()
image = self.get_sd_image(seed)
with torch.no_grad():
sample = model(image).sample
assert sample.shape == image.shape
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=3e-3)
@parameterized.expand(
[
# fmt: off
[13, [-0.0521, -0.2939, 0.1540, -0.1855, -0.5936, -0.3138, -0.4579, -0.2275]],
[37, [-0.1820, -0.4345, -0.0455, -0.2923, -0.8035, -0.5089, -0.4795, -0.3106]],
# fmt: on
]
)
@require_torch_accelerator
@skip_mps
def test_stable_diffusion_decode(self, seed, expected_slice):
model = self.get_sd_vae_model()
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64))
with torch.no_grad():
sample = model.decode(encoding).sample
assert list(sample.shape) == [3, 3, 512, 512]
output_slice = sample[-1, -2:, :2, -2:].flatten().cpu()
expected_output_slice = torch.tensor(expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=2e-3)
@parameterized.expand([(13,), (16,), (37,)])
@require_torch_gpu
@unittest.skipIf(
not is_xformers_available(),
reason="xformers is not required when using PyTorch 2.0.",
)
def test_stable_diffusion_decode_xformers_vs_2_0(self, seed):
model = self.get_sd_vae_model()
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64))
with torch.no_grad():
sample = model.decode(encoding).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
sample_2 = model.decode(encoding).sample
assert list(sample.shape) == [3, 3, 512, 512]
assert torch_all_close(sample, sample_2, atol=5e-2)
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
]
)
def test_stable_diffusion_encode_sample(self, seed, expected_slice):
model = self.get_sd_vae_model()
image = self.get_sd_image(seed)
generator = self.get_generator(seed)
with torch.no_grad():
dist = model.encode(image).latent_dist
sample = dist.sample(generator=generator)
assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
output_slice = sample[0, -1, -3:, -3:].flatten().cpu()
expected_output_slice = torch.tensor(expected_slice)
tolerance = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(output_slice, expected_output_slice, atol=tolerance)
@slow
class ConsistencyDecoderVAEIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@torch.no_grad()
def test_encode_decode(self):
vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update
vae.to(torch_device)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg"
).resize((256, 256))
image = torch.from_numpy(np.array(image).transpose(2, 0, 1).astype(np.float32) / 127.5 - 1)[
None, :, :, :
].cuda()
latent = vae.encode(image).latent_dist.mean
sample = vae.decode(latent, generator=torch.Generator("cpu").manual_seed(0)).sample
actual_output = sample[0, :2, :2, :2].flatten().cpu()
expected_output = torch.tensor([-0.0141, -0.0014, 0.0115, 0.0086, 0.1051, 0.1053, 0.1031, 0.1024])
assert torch_all_close(actual_output, expected_output, atol=5e-3)
def test_sd(self):
vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", vae=vae, safety_checker=None)
pipe.to(torch_device)
out = pipe(
"horse",
num_inference_steps=2,
output_type="pt",
generator=torch.Generator("cpu").manual_seed(0),
).images[0]
actual_output = out[:2, :2, :2].flatten().cpu()
expected_output = torch.tensor([0.7686, 0.8228, 0.6489, 0.7455, 0.8661, 0.8797, 0.8241, 0.8759])
assert torch_all_close(actual_output, expected_output, atol=5e-3)
def test_encode_decode_f16(self):
vae = ConsistencyDecoderVAE.from_pretrained(
"openai/consistency-decoder", torch_dtype=torch.float16
) # TODO - update
vae.to(torch_device)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg"
).resize((256, 256))
image = (
torch.from_numpy(np.array(image).transpose(2, 0, 1).astype(np.float32) / 127.5 - 1)[None, :, :, :]
.half()
.cuda()
)
latent = vae.encode(image).latent_dist.mean
sample = vae.decode(latent, generator=torch.Generator("cpu").manual_seed(0)).sample
actual_output = sample[0, :2, :2, :2].flatten().cpu()
expected_output = torch.tensor(
[-0.0111, -0.0125, -0.0017, -0.0007, 0.1257, 0.1465, 0.1450, 0.1471],
dtype=torch.float16,
)
assert torch_all_close(actual_output, expected_output, atol=5e-3)
def test_sd_f16(self):
vae = ConsistencyDecoderVAE.from_pretrained(
"openai/consistency-decoder", torch_dtype=torch.float16
) # TODO - update
pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16,
vae=vae,
safety_checker=None,
)
pipe.to(torch_device)
out = pipe(
"horse",
num_inference_steps=2,
output_type="pt",
generator=torch.Generator("cpu").manual_seed(0),
).images[0]
actual_output = out[:2, :2, :2].flatten().cpu()
expected_output = torch.tensor(
[0.0000, 0.0249, 0.0000, 0.0000, 0.1709, 0.2773, 0.0471, 0.1035],
dtype=torch.float16,
)
assert torch_all_close(actual_output, expected_output, atol=5e-3)
def test_vae_tiling(self):
vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", vae=vae, safety_checker=None, torch_dtype=torch.float16
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
out_1 = pipe(
"horse",
num_inference_steps=2,
output_type="pt",
generator=torch.Generator("cpu").manual_seed(0),
).images[0]
# make sure tiled vae decode yields the same result
pipe.enable_vae_tiling()
out_2 = pipe(
"horse",
num_inference_steps=2,
output_type="pt",
generator=torch.Generator("cpu").manual_seed(0),
).images[0]
assert torch_all_close(out_1, out_2, atol=5e-3)
# test that tiled decode works with various shapes
shapes = [(1, 4, 73, 97), (1, 4, 97, 73), (1, 4, 49, 65), (1, 4, 65, 49)]
with torch.no_grad():
for shape in shapes:
image = torch.zeros(shape, device=torch_device, dtype=pipe.vae.dtype)
pipe.vae.decode(image)
@slow
class AutoencoderOobleckIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def _load_datasamples(self, num_samples):
ds = load_dataset(
"hf-internal-testing/librispeech_asr_dummy", "clean", split="validation", trust_remote_code=True
)
# automatic decoding with librispeech
speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"]
return torch.nn.utils.rnn.pad_sequence(
[torch.from_numpy(x["array"]) for x in speech_samples], batch_first=True
)
def get_audio(self, audio_sample_size=2097152, fp16=False):
dtype = torch.float16 if fp16 else torch.float32
audio = self._load_datasamples(2).to(torch_device).to(dtype)
# pad / crop to audio_sample_size
audio = torch.nn.functional.pad(audio[:, :audio_sample_size], pad=(0, audio_sample_size - audio.shape[-1]))
# todo channel
audio = audio.unsqueeze(1).repeat(1, 2, 1).to(torch_device)
return audio
def get_oobleck_vae_model(self, model_id="stabilityai/stable-audio-open-1.0", fp16=False):
torch_dtype = torch.float16 if fp16 else torch.float32
model = AutoencoderOobleck.from_pretrained(
model_id,
subfolder="vae",
torch_dtype=torch_dtype,
)
model.to(torch_device)
return model
def get_generator(self, seed=0):
generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda"
if torch_device != "mps":
return torch.Generator(device=generator_device).manual_seed(seed)
return torch.manual_seed(seed)
@parameterized.expand(
[
# fmt: off
[33, [1.193e-4, 6.56e-05, 1.314e-4, 3.80e-05, -4.01e-06], 0.001192],
[44, [2.77e-05, -2.65e-05, 1.18e-05, -6.94e-05, -9.57e-05], 0.001196],
# fmt: on
]
)
def test_stable_diffusion(self, seed, expected_slice, expected_mean_absolute_diff):
model = self.get_oobleck_vae_model()
audio = self.get_audio()
generator = self.get_generator(seed)
with torch.no_grad():
sample = model(audio, generator=generator, sample_posterior=True).sample
assert sample.shape == audio.shape
assert ((sample - audio).abs().mean() - expected_mean_absolute_diff).abs() <= 1e-6
output_slice = sample[-1, 1, 5:10].cpu()
expected_output_slice = torch.tensor(expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-5)
def test_stable_diffusion_mode(self):
model = self.get_oobleck_vae_model()
audio = self.get_audio()
with torch.no_grad():
sample = model(audio, sample_posterior=False).sample
assert sample.shape == audio.shape
@parameterized.expand(
[
# fmt: off
[33, [1.193e-4, 6.56e-05, 1.314e-4, 3.80e-05, -4.01e-06], 0.001192],
[44, [2.77e-05, -2.65e-05, 1.18e-05, -6.94e-05, -9.57e-05], 0.001196],
# fmt: on
]
)
def test_stable_diffusion_encode_decode(self, seed, expected_slice, expected_mean_absolute_diff):
model = self.get_oobleck_vae_model()
audio = self.get_audio()
generator = self.get_generator(seed)
with torch.no_grad():
x = audio
posterior = model.encode(x).latent_dist
z = posterior.sample(generator=generator)
sample = model.decode(z).sample
# (batch_size, latent_dim, sequence_length)
assert posterior.mean.shape == (audio.shape[0], model.config.decoder_input_channels, 1024)
assert sample.shape == audio.shape
assert ((sample - audio).abs().mean() - expected_mean_absolute_diff).abs() <= 1e-6
output_slice = sample[-1, 1, 5:10].cpu()
expected_output_slice = torch.tensor(expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-5)
|
diffusers/tests/models/autoencoders/test_models_vae.py/0
|
{
"file_path": "diffusers/tests/models/autoencoders/test_models_vae.py",
"repo_id": "diffusers",
"token_count": 21124
}
| 170
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import LatteTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class LatteTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = LatteTransformer3DModel
main_input_name = "hidden_states"
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"enable_temporal_attentions": True,
}
@property
def input_shape(self):
return (4, 1, 8, 8)
@property
def output_shape(self):
return (8, 1, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"sample_size": 8,
"num_layers": 1,
"patch_size": 2,
"attention_head_dim": 4,
"num_attention_heads": 2,
"caption_channels": 8,
"in_channels": 4,
"cross_attention_dim": 8,
"out_channels": 8,
"attention_bias": True,
"activation_fn": "gelu-approximate",
"num_embeds_ada_norm": 1000,
"norm_type": "ada_norm_single",
"norm_elementwise_affine": False,
"norm_eps": 1e-6,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
super().test_output(
expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape
)
|
diffusers/tests/models/transformers/test_models_transformer_latte.py/0
|
{
"file_path": "diffusers/tests/models/transformers/test_models_transformer_latte.py",
"repo_id": "diffusers",
"token_count": 1169
}
| 171
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
check_dummies.PATH_TO_DIFFUSERS = os.path.join(git_repo_path, "src", "diffusers")
class CheckDummiesTester(unittest.TestCase):
def test_find_backend(self):
simple_backend = find_backend(" if not is_torch_available():")
self.assertEqual(simple_backend, "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
double_backend = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(double_backend, "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
triple_backend = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):"
)
self.assertEqual(triple_backend, "torch_and_transformers_and_onnx")
def test_read_init(self):
objects = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch", objects)
self.assertIn("torch_and_transformers", objects)
self.assertIn("flax_and_transformers", objects)
self.assertIn("torch_and_transformers_and_onnx", objects)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel", objects["torch"])
self.assertIn("FlaxUNet2DConditionModel", objects["flax"])
self.assertIn("StableDiffusionPipeline", objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline", objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler", objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline", objects["torch_and_transformers_and_onnx"])
def test_create_dummy_object(self):
dummy_constant = create_dummy_object("CONSTANT", "'torch'")
self.assertEqual(dummy_constant, "\nCONSTANT = None\n")
dummy_function = create_dummy_object("function", "'torch'")
self.assertEqual(
dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n"
)
expected_dummy_class = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
dummy_class = create_dummy_object("FakeClass", "'torch'")
self.assertEqual(dummy_class, expected_dummy_class)
def test_create_dummy_files(self):
expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
"""
dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
|
diffusers/tests/others/test_check_dummies.py/0
|
{
"file_path": "diffusers/tests/others/test_check_dummies.py",
"repo_id": "diffusers",
"token_count": 1872
}
| 172
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AnimateDiffPipeline,
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
LCMScheduler,
MotionAdapter,
StableDiffusionPipeline,
UNet2DConditionModel,
UNetMotionModel,
)
from diffusers.models.attention import FreeNoiseTransformerBlock
from diffusers.utils import is_xformers_available, logging
from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
IPAdapterTesterMixin,
PipelineFromPipeTesterMixin,
PipelineTesterMixin,
SDFunctionTesterMixin,
)
def to_np(tensor):
if isinstance(tensor, torch.Tensor):
tensor = tensor.detach().cpu().numpy()
return tensor
class AnimateDiffPipelineFastTests(
IPAdapterTesterMixin, SDFunctionTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase
):
pipeline_class = AnimateDiffPipeline
params = TEXT_TO_IMAGE_PARAMS
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
def get_dummy_components(self):
cross_attention_dim = 8
block_out_channels = (8, 8)
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=block_out_channels,
layers_per_block=2,
sample_size=8,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=cross_attention_dim,
norm_num_groups=2,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="linear",
clip_sample=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=block_out_channels,
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
norm_num_groups=2,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=cross_attention_dim,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
torch.manual_seed(0)
motion_adapter = MotionAdapter(
block_out_channels=block_out_channels,
motion_layers_per_block=2,
motion_norm_num_groups=2,
motion_num_attention_heads=4,
)
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"motion_adapter": motion_adapter,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "pt",
}
return inputs
def test_from_pipe_consistent_config(self):
assert self.original_pipeline_class == StableDiffusionPipeline
original_repo = "hf-internal-testing/tinier-stable-diffusion-pipe"
original_kwargs = {"requires_safety_checker": False}
# create original_pipeline_class(sd)
pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs)
# original_pipeline_class(sd) -> pipeline_class
pipe_components = self.get_dummy_components()
pipe_additional_components = {}
for name, component in pipe_components.items():
if name not in pipe_original.components:
pipe_additional_components[name] = component
pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components)
# pipeline_class -> original_pipeline_class(sd)
original_pipe_additional_components = {}
for name, component in pipe_original.components.items():
if name not in pipe.components or not isinstance(component, pipe.components[name].__class__):
original_pipe_additional_components[name] = component
pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components)
# compare the config
original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")}
original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")}
assert original_config_2 == original_config
def test_motion_unet_loading(self):
components = self.get_dummy_components()
pipe = AnimateDiffPipeline(**components)
assert isinstance(pipe.unet, UNetMotionModel)
@unittest.skip("Attention slicing is not enabled in this pipeline")
def test_attention_slicing_forward_pass(self):
pass
def test_ip_adapter_single(self):
expected_pipe_slice = None
if torch_device == "cpu":
expected_pipe_slice = np.array(
[
0.5216,
0.5620,
0.4927,
0.5082,
0.4786,
0.5932,
0.5125,
0.4514,
0.5315,
0.4694,
0.3276,
0.4863,
0.3920,
0.3684,
0.5745,
0.4499,
0.5081,
0.5414,
0.6014,
0.5062,
0.3630,
0.5296,
0.6018,
0.5098,
0.4948,
0.5101,
0.5620,
]
)
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
def test_dict_tuple_outputs_equivalent(self):
expected_slice = None
if torch_device == "cpu":
expected_slice = np.array([0.5125, 0.4514, 0.5315, 0.4499, 0.5081, 0.5414, 0.4948, 0.5101, 0.5620])
return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice)
def test_inference_batch_single_identical(
self,
batch_size=2,
expected_max_diff=1e-4,
additional_params_copy_to_batched_inputs=["num_inference_steps"],
):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for components in pipe.components.values():
if hasattr(components, "set_default_attn_processor"):
components.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
# Reset generator in case it is has been used in self.get_dummy_inputs
inputs["generator"] = self.get_generator(0)
logger = logging.get_logger(pipe.__module__)
logger.setLevel(level=diffusers.logging.FATAL)
# batchify inputs
batched_inputs = {}
batched_inputs.update(inputs)
for name in self.batch_params:
if name not in inputs:
continue
value = inputs[name]
if name == "prompt":
len_prompt = len(value)
batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]
batched_inputs[name][-1] = 100 * "very long"
else:
batched_inputs[name] = batch_size * [value]
if "generator" in inputs:
batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)]
if "batch_size" in inputs:
batched_inputs["batch_size"] = batch_size
for arg in additional_params_copy_to_batched_inputs:
batched_inputs[arg] = inputs[arg]
output = pipe(**inputs)
output_batch = pipe(**batched_inputs)
assert output_batch[0].shape[0] == batch_size
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
assert max_diff < expected_max_diff
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
def test_to_device(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to("cpu")
# pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cpu" for device in model_devices))
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
self.assertTrue(np.isnan(output_cpu).sum() == 0)
pipe.to("cuda")
model_devices = [
component.device.type for component in pipe.components.values() if hasattr(component, "device")
]
self.assertTrue(all(device == "cuda" for device in model_devices))
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
def test_to_dtype(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
# pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components
model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")]
self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes))
pipe.to(dtype=torch.float16)
model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")]
self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes))
def test_prompt_embeds(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
inputs.pop("prompt")
inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device)
pipe(**inputs)
def test_free_init(self):
components = self.get_dummy_components()
pipe: AnimateDiffPipeline = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to(torch_device)
inputs_normal = self.get_dummy_inputs(torch_device)
frames_normal = pipe(**inputs_normal).frames[0]
pipe.enable_free_init(
num_iters=2,
use_fast_sampling=True,
method="butterworth",
order=4,
spatial_stop_frequency=0.25,
temporal_stop_frequency=0.25,
)
inputs_enable_free_init = self.get_dummy_inputs(torch_device)
frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0]
pipe.disable_free_init()
inputs_disable_free_init = self.get_dummy_inputs(torch_device)
frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0]
sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum()
max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max()
self.assertGreater(
sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results"
)
self.assertLess(
max_diff_disabled,
1e-4,
"Disabling of FreeInit should lead to results similar to the default pipeline results",
)
def test_free_init_with_schedulers(self):
components = self.get_dummy_components()
pipe: AnimateDiffPipeline = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to(torch_device)
inputs_normal = self.get_dummy_inputs(torch_device)
frames_normal = pipe(**inputs_normal).frames[0]
schedulers_to_test = [
DPMSolverMultistepScheduler.from_config(
components["scheduler"].config,
timestep_spacing="linspace",
beta_schedule="linear",
algorithm_type="dpmsolver++",
steps_offset=1,
clip_sample=False,
),
LCMScheduler.from_config(
components["scheduler"].config,
timestep_spacing="linspace",
beta_schedule="linear",
steps_offset=1,
clip_sample=False,
),
]
components.pop("scheduler")
for scheduler in schedulers_to_test:
components["scheduler"] = scheduler
pipe: AnimateDiffPipeline = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to(torch_device)
pipe.enable_free_init(num_iters=2, use_fast_sampling=False)
inputs = self.get_dummy_inputs(torch_device)
frames_enable_free_init = pipe(**inputs).frames[0]
sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum()
self.assertGreater(
sum_enabled,
1e1,
"Enabling of FreeInit should lead to results different from the default pipeline results",
)
def test_free_noise_blocks(self):
components = self.get_dummy_components()
pipe: AnimateDiffPipeline = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to(torch_device)
pipe.enable_free_noise()
for block in pipe.unet.down_blocks:
for motion_module in block.motion_modules:
for transformer_block in motion_module.transformer_blocks:
self.assertTrue(
isinstance(transformer_block, FreeNoiseTransformerBlock),
"Motion module transformer blocks must be an instance of `FreeNoiseTransformerBlock` after enabling FreeNoise.",
)
pipe.disable_free_noise()
for block in pipe.unet.down_blocks:
for motion_module in block.motion_modules:
for transformer_block in motion_module.transformer_blocks:
self.assertFalse(
isinstance(transformer_block, FreeNoiseTransformerBlock),
"Motion module transformer blocks must not be an instance of `FreeNoiseTransformerBlock` after disabling FreeNoise.",
)
def test_free_noise(self):
components = self.get_dummy_components()
pipe: AnimateDiffPipeline = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to(torch_device)
inputs_normal = self.get_dummy_inputs(torch_device)
frames_normal = pipe(**inputs_normal).frames[0]
for context_length in [8, 9]:
for context_stride in [4, 6]:
pipe.enable_free_noise(context_length, context_stride)
inputs_enable_free_noise = self.get_dummy_inputs(torch_device)
frames_enable_free_noise = pipe(**inputs_enable_free_noise).frames[0]
pipe.disable_free_noise()
inputs_disable_free_noise = self.get_dummy_inputs(torch_device)
frames_disable_free_noise = pipe(**inputs_disable_free_noise).frames[0]
sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_noise)).sum()
max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_noise)).max()
self.assertGreater(
sum_enabled,
1e1,
"Enabling of FreeNoise should lead to results different from the default pipeline results",
)
self.assertLess(
max_diff_disabled,
1e-4,
"Disabling of FreeNoise should lead to results similar to the default pipeline results",
)
def test_free_noise_multi_prompt(self):
components = self.get_dummy_components()
pipe: AnimateDiffPipeline = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
pipe.to(torch_device)
context_length = 8
context_stride = 4
pipe.enable_free_noise(context_length, context_stride)
# Make sure that pipeline works when prompt indices are within num_frames bounds
inputs = self.get_dummy_inputs(torch_device)
inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf"}
inputs["num_frames"] = 16
pipe(**inputs).frames[0]
with self.assertRaises(ValueError):
# Ensure that prompt indices are within bounds
inputs = self.get_dummy_inputs(torch_device)
inputs["num_frames"] = 16
inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf", 42: "Error on a leaf"}
pipe(**inputs).frames[0]
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, "set_default_attn_processor"):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output_without_offload = pipe(**inputs).frames[0]
output_without_offload = (
output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload
)
pipe.enable_xformers_memory_efficient_attention()
inputs = self.get_dummy_inputs(torch_device)
output_with_offload = pipe(**inputs).frames[0]
output_with_offload = (
output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload
)
max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()
self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results")
def test_vae_slicing(self):
return super().test_vae_slicing(image_count=2)
@slow
@require_torch_gpu
class AnimateDiffPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_animatediff(self):
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter)
pipe = pipe.to(torch_device)
pipe.scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="linear",
steps_offset=1,
clip_sample=False,
)
pipe.enable_vae_slicing()
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
prompt = "night, b&w photo of old house, post apocalypse, forest, storm weather, wind, rocks, 8k uhd, dslr, soft lighting, high quality, film grain"
negative_prompt = "bad quality, worse quality"
generator = torch.Generator("cpu").manual_seed(0)
output = pipe(
prompt,
negative_prompt=negative_prompt,
num_frames=16,
generator=generator,
guidance_scale=7.5,
num_inference_steps=3,
output_type="np",
)
image = output.frames[0]
assert image.shape == (16, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array(
[
0.11357737,
0.11285847,
0.11180121,
0.11084166,
0.11414117,
0.09785956,
0.10742754,
0.10510018,
0.08045256,
]
)
assert numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice.flatten()) < 1e-3
|
diffusers/tests/pipelines/animatediff/test_animatediff.py/0
|
{
"file_path": "diffusers/tests/pipelines/animatediff/test_animatediff.py",
"repo_id": "diffusers",
"token_count": 10689
}
| 173
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNet2DModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
nightly,
require_torch_2,
require_torch_gpu,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class ConsistencyModelPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = ConsistencyModelPipeline
params = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
batch_params = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
]
)
@property
def dummy_uncond_unet(self):
unet = UNet2DModel.from_pretrained(
"diffusers/consistency-models-test",
subfolder="test_unet",
)
return unet
@property
def dummy_cond_unet(self):
unet = UNet2DModel.from_pretrained(
"diffusers/consistency-models-test",
subfolder="test_unet_class_cond",
)
return unet
def get_dummy_components(self, class_cond=False):
if class_cond:
unet = self.dummy_cond_unet
else:
unet = self.dummy_uncond_unet
# Default to CM multistep sampler
scheduler = CMStochasticIterativeScheduler(
num_train_timesteps=40,
sigma_min=0.002,
sigma_max=80.0,
)
components = {
"unet": unet,
"scheduler": scheduler,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def test_consistency_model_pipeline_multistep(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = ConsistencyModelPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_consistency_model_pipeline_multistep_class_cond(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(class_cond=True)
pipe = ConsistencyModelPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["class_labels"] = 0
image = pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_consistency_model_pipeline_onestep(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = ConsistencyModelPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["num_inference_steps"] = 1
inputs["timesteps"] = None
image = pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_consistency_model_pipeline_onestep_class_cond(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(class_cond=True)
pipe = ConsistencyModelPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["num_inference_steps"] = 1
inputs["timesteps"] = None
inputs["class_labels"] = 0
image = pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@nightly
@require_torch_gpu
class ConsistencyModelPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, seed=0, get_fixed_latents=False, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)):
generator = torch.manual_seed(seed)
inputs = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
latents = self.get_fixed_latents(seed=seed, device=device, dtype=dtype, shape=shape)
inputs["latents"] = latents
return inputs
def get_fixed_latents(self, seed=0, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)):
if isinstance(device, str):
device = torch.device(device)
generator = torch.Generator(device=device).manual_seed(seed)
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
return latents
def test_consistency_model_cd_multistep(self):
unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2")
scheduler = CMStochasticIterativeScheduler(
num_train_timesteps=40,
sigma_min=0.002,
sigma_max=80.0,
)
pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler)
pipe.to(torch_device=torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
image = pipe(**inputs).images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.0146, 0.0158, 0.0092, 0.0086, 0.0000, 0.0000, 0.0000, 0.0000, 0.0058])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_consistency_model_cd_onestep(self):
unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2")
scheduler = CMStochasticIterativeScheduler(
num_train_timesteps=40,
sigma_min=0.002,
sigma_max=80.0,
)
pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler)
pipe.to(torch_device=torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
inputs["num_inference_steps"] = 1
inputs["timesteps"] = None
image = pipe(**inputs).images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.0059, 0.0003, 0.0000, 0.0023, 0.0052, 0.0007, 0.0165, 0.0081, 0.0095])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_2
def test_consistency_model_cd_multistep_flash_attn(self):
unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2")
scheduler = CMStochasticIterativeScheduler(
num_train_timesteps=40,
sigma_min=0.002,
sigma_max=80.0,
)
pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler)
pipe.to(torch_device=torch_device, torch_dtype=torch.float16)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(get_fixed_latents=True, device=torch_device)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
image = pipe(**inputs).images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.1845, 0.1371, 0.1211, 0.2035, 0.1954, 0.1323, 0.1773, 0.1593, 0.1314])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_2
def test_consistency_model_cd_onestep_flash_attn(self):
unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2")
scheduler = CMStochasticIterativeScheduler(
num_train_timesteps=40,
sigma_min=0.002,
sigma_max=80.0,
)
pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler)
pipe.to(torch_device=torch_device, torch_dtype=torch.float16)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(get_fixed_latents=True, device=torch_device)
inputs["num_inference_steps"] = 1
inputs["timesteps"] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
image = pipe(**inputs).images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.1623, 0.2009, 0.2387, 0.1731, 0.1168, 0.1202, 0.2031, 0.1327, 0.2447])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
diffusers/tests/pipelines/consistency_models/test_consistency_models.py/0
|
{
"file_path": "diffusers/tests/pipelines/consistency_models/test_consistency_models.py",
"repo_id": "diffusers",
"token_count": 5052
}
| 174
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc and The InstantX Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
SD3Transformer2DModel,
StableDiffusion3ControlNetPipeline,
)
from diffusers.models import SD3ControlNetModel, SD3MultiControlNetModel
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class StableDiffusion3ControlNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = StableDiffusion3ControlNetPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
def get_dummy_components(self):
torch.manual_seed(0)
transformer = SD3Transformer2DModel(
sample_size=32,
patch_size=1,
in_channels=8,
num_layers=4,
attention_head_dim=8,
num_attention_heads=4,
joint_attention_dim=32,
caption_projection_dim=32,
pooled_projection_dim=64,
out_channels=8,
)
torch.manual_seed(0)
controlnet = SD3ControlNetModel(
sample_size=32,
patch_size=1,
in_channels=8,
num_layers=1,
attention_head_dim=8,
num_attention_heads=4,
joint_attention_dim=32,
caption_projection_dim=32,
pooled_projection_dim=64,
out_channels=8,
)
clip_text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
hidden_act="gelu",
projection_dim=32,
)
torch.manual_seed(0)
text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config)
torch.manual_seed(0)
text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config)
torch.manual_seed(0)
text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=8,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
return {
"scheduler": scheduler,
"text_encoder": text_encoder,
"text_encoder_2": text_encoder_2,
"text_encoder_3": text_encoder_3,
"tokenizer": tokenizer,
"tokenizer_2": tokenizer_2,
"tokenizer_3": tokenizer_3,
"transformer": transformer,
"vae": vae,
"controlnet": controlnet,
}
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
control_image = randn_tensor(
(1, 3, 32, 32),
generator=generator,
device=torch.device(device),
dtype=torch.float16,
)
controlnet_conditioning_scale = 0.5
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "np",
"control_image": control_image,
"controlnet_conditioning_scale": controlnet_conditioning_scale,
}
return inputs
def test_controlnet_sd3(self):
components = self.get_dummy_components()
sd_pipe = StableDiffusion3ControlNetPipeline(**components)
sd_pipe = sd_pipe.to(torch_device, dtype=torch.float16)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output = sd_pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.5767, 0.7100, 0.5981, 0.5674, 0.5952, 0.4102, 0.5093, 0.5044, 0.6030])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f"Expected: {expected_slice}, got: {image_slice.flatten()}"
@unittest.skip("xFormersAttnProcessor does not work with SD3 Joint Attention")
def test_xformers_attention_forwardGenerator_pass(self):
pass
@slow
@require_torch_gpu
class StableDiffusion3ControlNetPipelineSlowTests(unittest.TestCase):
pipeline_class = StableDiffusion3ControlNetPipeline
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_canny(self):
controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny", torch_dtype=torch.float16)
pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
"stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = "Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text 'InstantX' on image"
n_prompt = "NSFW, nude, naked, porn, ugly"
control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg")
output = pipe(
prompt,
negative_prompt=n_prompt,
control_image=control_image,
controlnet_conditioning_scale=0.5,
guidance_scale=5.0,
num_inference_steps=2,
output_type="np",
generator=generator,
)
image = output.images[0]
assert image.shape == (1024, 1024, 3)
original_image = image[-3:, -3:, -1].flatten()
expected_image = np.array(
[0.20947266, 0.1574707, 0.19897461, 0.15063477, 0.1418457, 0.17285156, 0.14160156, 0.13989258, 0.30810547]
)
assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
def test_pose(self):
controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Pose", torch_dtype=torch.float16)
pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
"stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = 'Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text "InstantX" on image'
n_prompt = "NSFW, nude, naked, porn, ugly"
control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Pose/resolve/main/pose.jpg")
output = pipe(
prompt,
negative_prompt=n_prompt,
control_image=control_image,
controlnet_conditioning_scale=0.5,
guidance_scale=5.0,
num_inference_steps=2,
output_type="np",
generator=generator,
)
image = output.images[0]
assert image.shape == (1024, 1024, 3)
original_image = image[-3:, -3:, -1].flatten()
expected_image = np.array(
[0.8671875, 0.86621094, 0.91015625, 0.8491211, 0.87890625, 0.9140625, 0.8300781, 0.8334961, 0.8623047]
)
assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
def test_tile(self):
controlnet = SD3ControlNetModel.from_pretrained("InstantX//SD3-Controlnet-Tile", torch_dtype=torch.float16)
pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
"stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = 'Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text "InstantX" on image'
n_prompt = "NSFW, nude, naked, porn, ugly"
control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Tile/resolve/main/tile.jpg")
output = pipe(
prompt,
negative_prompt=n_prompt,
control_image=control_image,
controlnet_conditioning_scale=0.5,
guidance_scale=5.0,
num_inference_steps=2,
output_type="np",
generator=generator,
)
image = output.images[0]
assert image.shape == (1024, 1024, 3)
original_image = image[-3:, -3:, -1].flatten()
expected_image = np.array(
[0.6982422, 0.7011719, 0.65771484, 0.6904297, 0.7416992, 0.6904297, 0.6977539, 0.7080078, 0.6386719]
)
assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
def test_multi_controlnet(self):
controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny", torch_dtype=torch.float16)
controlnet = SD3MultiControlNetModel([controlnet, controlnet])
pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
"stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = "Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text 'InstantX' on image"
n_prompt = "NSFW, nude, naked, porn, ugly"
control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg")
output = pipe(
prompt,
negative_prompt=n_prompt,
control_image=[control_image, control_image],
controlnet_conditioning_scale=[0.25, 0.25],
guidance_scale=5.0,
num_inference_steps=2,
output_type="np",
generator=generator,
)
image = output.images[0]
assert image.shape == (1024, 1024, 3)
original_image = image[-3:, -3:, -1].flatten()
expected_image = np.array(
[0.7451172, 0.7416992, 0.7158203, 0.7792969, 0.7607422, 0.7089844, 0.6855469, 0.71777344, 0.7314453]
)
assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
|
diffusers/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py/0
|
{
"file_path": "diffusers/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py",
"repo_id": "diffusers",
"token_count": 5922
}
| 175
|
import gc
import inspect
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
LatentConsistencyModelPipeline,
LCMScheduler,
UNet2DConditionModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class LatentConsistencyModelPipelineFastTests(
IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase
):
pipeline_class = LatentConsistencyModelPipeline
params = TEXT_TO_IMAGE_PARAMS - {"negative_prompt", "negative_prompt_embeds"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {"negative_prompt"}
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(4, 8),
layers_per_block=1,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
norm_num_groups=2,
time_cond_proj_dim=32,
)
scheduler = LCMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[4, 8],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
norm_num_groups=2,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=64,
layer_norm_eps=1e-05,
num_attention_heads=8,
num_hidden_layers=3,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
"requires_safety_checker": False,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "np",
}
return inputs
def test_ip_adapter_single(self):
expected_pipe_slice = None
if torch_device == "cpu":
expected_pipe_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645])
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
def test_lcm_onestep(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = LatentConsistencyModelPipeline(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["num_inference_steps"] = 1
output = pipe(**inputs)
image = output.images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.1441, 0.5304, 0.5452, 0.1361, 0.4011, 0.4370, 0.5326, 0.3492, 0.3637])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_lcm_multistep(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = LatentConsistencyModelPipeline(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
output = pipe(**inputs)
image = output.images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_lcm_custom_timesteps(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = LatentConsistencyModelPipeline(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
del inputs["num_inference_steps"]
inputs["timesteps"] = [999, 499]
output = pipe(**inputs)
image = output.images
assert image.shape == (1, 64, 64, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=5e-4)
# skip because lcm pipeline apply cfg differently
def test_callback_cfg(self):
pass
# override default test because the final latent variable is "denoised" instead of "latents"
def test_callback_inputs(self):
sig = inspect.signature(self.pipeline_class.__call__)
if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
self.assertTrue(
hasattr(pipe, "_callback_tensor_inputs"),
f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs",
)
def callback_inputs_test(pipe, i, t, callback_kwargs):
missing_callback_inputs = set()
for v in pipe._callback_tensor_inputs:
if v not in callback_kwargs:
missing_callback_inputs.add(v)
self.assertTrue(
len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}"
)
last_i = pipe.num_timesteps - 1
if i == last_i:
callback_kwargs["denoised"] = torch.zeros_like(callback_kwargs["denoised"])
return callback_kwargs
inputs = self.get_dummy_inputs(torch_device)
inputs["callback_on_step_end"] = callback_inputs_test
inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs
inputs["output_type"] = "latent"
output = pipe(**inputs)[0]
assert output.abs().sum() == 0
@slow
@require_torch_gpu
class LatentConsistencyModelPipelineSlowTests(unittest.TestCase):
def setUp(self):
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64))
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
inputs = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_lcm_onestep(self):
pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", safety_checker=None)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
inputs["num_inference_steps"] = 1
image = pipe(**inputs).images
assert image.shape == (1, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1].flatten()
expected_slice = np.array([0.1025, 0.0911, 0.0984, 0.0981, 0.0901, 0.0918, 0.1055, 0.0940, 0.0730])
assert np.abs(image_slice - expected_slice).max() < 1e-3
def test_lcm_multistep(self):
pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", safety_checker=None)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = pipe(**inputs).images
assert image.shape == (1, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1].flatten()
expected_slice = np.array([0.01855, 0.01855, 0.01489, 0.01392, 0.01782, 0.01465, 0.01831, 0.02539, 0.0])
assert np.abs(image_slice - expected_slice).max() < 1e-3
|
diffusers/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py/0
|
{
"file_path": "diffusers/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py",
"repo_id": "diffusers",
"token_count": 4773
}
| 176
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from transformers import (
ClapAudioConfig,
ClapConfig,
ClapFeatureExtractor,
ClapModel,
ClapTextConfig,
RobertaTokenizer,
SpeechT5HifiGan,
SpeechT5HifiGanConfig,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
MusicLDMPipeline,
PNDMScheduler,
UNet2DConditionModel,
)
from diffusers.utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class MusicLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = MusicLDMPipeline
params = TEXT_TO_AUDIO_PARAMS
batch_params = TEXT_TO_AUDIO_BATCH_PARAMS
required_optional_params = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
]
)
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=(32, 64),
class_embed_type="simple_projection",
projection_class_embeddings_input_dim=32,
class_embeddings_concat=True,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=1,
out_channels=1,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
torch.manual_seed(0)
text_branch_config = ClapTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=16,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=2,
num_hidden_layers=2,
pad_token_id=1,
vocab_size=1000,
)
audio_branch_config = ClapAudioConfig(
spec_size=64,
window_size=4,
num_mel_bins=64,
intermediate_size=37,
layer_norm_eps=1e-05,
depths=[2, 2],
num_attention_heads=[2, 2],
num_hidden_layers=2,
hidden_size=192,
patch_size=2,
patch_stride=2,
patch_embed_input_channels=4,
)
text_encoder_config = ClapConfig.from_text_audio_configs(
text_config=text_branch_config, audio_config=audio_branch_config, projection_dim=32
)
text_encoder = ClapModel(text_encoder_config)
tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77)
feature_extractor = ClapFeatureExtractor.from_pretrained(
"hf-internal-testing/tiny-random-ClapModel", hop_length=7900
)
torch.manual_seed(0)
vocoder_config = SpeechT5HifiGanConfig(
model_in_dim=8,
sampling_rate=16000,
upsample_initial_channel=16,
upsample_rates=[2, 2],
upsample_kernel_sizes=[4, 4],
resblock_kernel_sizes=[3, 7],
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]],
normalize_before=False,
)
vocoder = SpeechT5HifiGan(vocoder_config)
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"feature_extractor": feature_extractor,
"vocoder": vocoder,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def test_musicldm_ddim(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
musicldm_pipe = MusicLDMPipeline(**components)
musicldm_pipe = musicldm_pipe.to(torch_device)
musicldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
output = musicldm_pipe(**inputs)
audio = output.audios[0]
assert audio.ndim == 1
assert len(audio) == 256
audio_slice = audio[:10]
expected_slice = np.array(
[-0.0027, -0.0036, -0.0037, -0.0020, -0.0035, -0.0019, -0.0037, -0.0020, -0.0038, -0.0019]
)
assert np.abs(audio_slice - expected_slice).max() < 1e-4
def test_musicldm_prompt_embeds(self):
components = self.get_dummy_components()
musicldm_pipe = MusicLDMPipeline(**components)
musicldm_pipe = musicldm_pipe.to(torch_device)
musicldm_pipe = musicldm_pipe.to(torch_device)
musicldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
inputs["prompt"] = 3 * [inputs["prompt"]]
# forward
output = musicldm_pipe(**inputs)
audio_1 = output.audios[0]
inputs = self.get_dummy_inputs(torch_device)
prompt = 3 * [inputs.pop("prompt")]
text_inputs = musicldm_pipe.tokenizer(
prompt,
padding="max_length",
max_length=musicldm_pipe.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_inputs = text_inputs["input_ids"].to(torch_device)
prompt_embeds = musicldm_pipe.text_encoder.get_text_features(text_inputs)
inputs["prompt_embeds"] = prompt_embeds
# forward
output = musicldm_pipe(**inputs)
audio_2 = output.audios[0]
assert np.abs(audio_1 - audio_2).max() < 1e-2
def test_musicldm_negative_prompt_embeds(self):
components = self.get_dummy_components()
musicldm_pipe = MusicLDMPipeline(**components)
musicldm_pipe = musicldm_pipe.to(torch_device)
musicldm_pipe = musicldm_pipe.to(torch_device)
musicldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
negative_prompt = 3 * ["this is a negative prompt"]
inputs["negative_prompt"] = negative_prompt
inputs["prompt"] = 3 * [inputs["prompt"]]
# forward
output = musicldm_pipe(**inputs)
audio_1 = output.audios[0]
inputs = self.get_dummy_inputs(torch_device)
prompt = 3 * [inputs.pop("prompt")]
embeds = []
for p in [prompt, negative_prompt]:
text_inputs = musicldm_pipe.tokenizer(
p,
padding="max_length",
max_length=musicldm_pipe.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_inputs = text_inputs["input_ids"].to(torch_device)
text_embeds = musicldm_pipe.text_encoder.get_text_features(
text_inputs,
)
embeds.append(text_embeds)
inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds
# forward
output = musicldm_pipe(**inputs)
audio_2 = output.audios[0]
assert np.abs(audio_1 - audio_2).max() < 1e-2
def test_musicldm_negative_prompt(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
components["scheduler"] = PNDMScheduler(skip_prk_steps=True)
musicldm_pipe = MusicLDMPipeline(**components)
musicldm_pipe = musicldm_pipe.to(device)
musicldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
negative_prompt = "egg cracking"
output = musicldm_pipe(**inputs, negative_prompt=negative_prompt)
audio = output.audios[0]
assert audio.ndim == 1
assert len(audio) == 256
audio_slice = audio[:10]
expected_slice = np.array(
[-0.0027, -0.0036, -0.0037, -0.0019, -0.0035, -0.0018, -0.0037, -0.0021, -0.0038, -0.0018]
)
assert np.abs(audio_slice - expected_slice).max() < 1e-4
def test_musicldm_num_waveforms_per_prompt(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
components["scheduler"] = PNDMScheduler(skip_prk_steps=True)
musicldm_pipe = MusicLDMPipeline(**components)
musicldm_pipe = musicldm_pipe.to(device)
musicldm_pipe.set_progress_bar_config(disable=None)
prompt = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
audios = musicldm_pipe(prompt, num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
batch_size = 2
audios = musicldm_pipe([prompt] * batch_size, num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
num_waveforms_per_prompt = 2
audios = musicldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
batch_size = 2
audios = musicldm_pipe(
[prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt
).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def test_musicldm_audio_length_in_s(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
musicldm_pipe = MusicLDMPipeline(**components)
musicldm_pipe = musicldm_pipe.to(torch_device)
musicldm_pipe.set_progress_bar_config(disable=None)
vocoder_sampling_rate = musicldm_pipe.vocoder.config.sampling_rate
inputs = self.get_dummy_inputs(device)
output = musicldm_pipe(audio_length_in_s=0.016, **inputs)
audio = output.audios[0]
assert audio.ndim == 1
assert len(audio) / vocoder_sampling_rate == 0.016
output = musicldm_pipe(audio_length_in_s=0.032, **inputs)
audio = output.audios[0]
assert audio.ndim == 1
assert len(audio) / vocoder_sampling_rate == 0.032
def test_musicldm_vocoder_model_in_dim(self):
components = self.get_dummy_components()
musicldm_pipe = MusicLDMPipeline(**components)
musicldm_pipe = musicldm_pipe.to(torch_device)
musicldm_pipe.set_progress_bar_config(disable=None)
prompt = ["hey"]
output = musicldm_pipe(prompt, num_inference_steps=1)
audio_shape = output.audios.shape
assert audio_shape == (1, 256)
config = musicldm_pipe.vocoder.config
config.model_in_dim *= 2
musicldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device)
output = musicldm_pipe(prompt, num_inference_steps=1)
audio_shape = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False)
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical()
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
def test_to_dtype(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
# The method component.dtype returns the dtype of the first parameter registered in the model, not the
# dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale)
model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")}
# Without the logit scale parameters, everything is float32
model_dtypes.pop("text_encoder")
self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values()))
# the CLAP sub-models are float32
model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype
self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values()))
# Once we send to fp16, all params are in half-precision, including the logit scale
pipe.to(dtype=torch.float16)
model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")}
self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values()))
@nightly
@require_torch_gpu
class MusicLDMPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16))
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
inputs = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def test_musicldm(self):
musicldm_pipe = MusicLDMPipeline.from_pretrained("cvssp/musicldm")
musicldm_pipe = musicldm_pipe.to(torch_device)
musicldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
inputs["num_inference_steps"] = 25
audio = musicldm_pipe(**inputs).audios[0]
assert audio.ndim == 1
assert len(audio) == 81952
# check the portion of the generated audio with the largest dynamic range (reduces flakiness)
audio_slice = audio[8680:8690]
expected_slice = np.array(
[-0.1042, -0.1068, -0.1235, -0.1387, -0.1428, -0.136, -0.1213, -0.1097, -0.0967, -0.0945]
)
max_diff = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-3
def test_musicldm_lms(self):
musicldm_pipe = MusicLDMPipeline.from_pretrained("cvssp/musicldm")
musicldm_pipe.scheduler = LMSDiscreteScheduler.from_config(musicldm_pipe.scheduler.config)
musicldm_pipe = musicldm_pipe.to(torch_device)
musicldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
audio = musicldm_pipe(**inputs).audios[0]
assert audio.ndim == 1
assert len(audio) == 81952
# check the portion of the generated audio with the largest dynamic range (reduces flakiness)
audio_slice = audio[58020:58030]
expected_slice = np.array([0.3592, 0.3477, 0.4084, 0.4665, 0.5048, 0.5891, 0.6461, 0.5579, 0.4595, 0.4403])
max_diff = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-3
|
diffusers/tests/pipelines/musicldm/test_musicldm.py/0
|
{
"file_path": "diffusers/tests/pipelines/musicldm/test_musicldm.py",
"repo_id": "diffusers",
"token_count": 8048
}
| 177
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import (
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
DPTConfig,
DPTForDepthEstimation,
DPTImageProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionDepth2ImgPipeline,
UNet2DConditionModel,
)
from diffusers.utils import is_accelerate_available, is_accelerate_version
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS,
TEXT_TO_IMAGE_IMAGE_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class StableDiffusionDepth2ImgPipelineFastTests(
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
):
pipeline_class = StableDiffusionDepth2ImgPipeline
test_save_load_optional_components = False
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"depth_mask"})
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=5,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
attention_head_dim=(2, 4),
use_linear_projection=True,
)
scheduler = PNDMScheduler(skip_prk_steps=True)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
backbone_config = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
depth_estimator_config = DPTConfig(
image_size=32,
patch_size=16,
num_channels=3,
hidden_size=32,
num_hidden_layers=4,
backbone_out_indices=(0, 1, 2, 3),
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
is_decoder=False,
initializer_range=0.02,
is_hybrid=True,
backbone_config=backbone_config,
backbone_featmap_shape=[1, 384, 24, 24],
)
depth_estimator = DPTForDepthEstimation(depth_estimator_config).eval()
feature_extractor = DPTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-DPTForDepthEstimation")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"depth_estimator": depth_estimator,
"feature_extractor": feature_extractor,
}
return components
def get_dummy_inputs(self, device, seed=0):
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed))
image = image.cpu().permute(0, 2, 3, 1)[0]
image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32))
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "np",
}
return inputs
def test_save_load_local(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output = pipe(**inputs)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
pipe_loaded.to(torch_device)
pipe_loaded.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output_loaded = pipe_loaded(**inputs)[0]
max_diff = np.abs(output - output_loaded).max()
self.assertLess(max_diff, 1e-4)
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
def test_save_load_float16(self):
components = self.get_dummy_components()
for name, module in components.items():
if hasattr(module, "half"):
components[name] = module.to(torch_device).half()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output = pipe(**inputs)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16)
pipe_loaded.to(torch_device)
pipe_loaded.set_progress_bar_config(disable=None)
for name, component in pipe_loaded.components.items():
if hasattr(component, "dtype"):
self.assertTrue(
component.dtype == torch.float16,
f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.",
)
inputs = self.get_dummy_inputs(torch_device)
output_loaded = pipe_loaded(**inputs)[0]
max_diff = np.abs(output - output_loaded).max()
self.assertLess(max_diff, 2e-2, "The output of the fp16 pipeline changed after saving and loading.")
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
def test_float16_inference(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
for name, module in components.items():
if hasattr(module, "half"):
components[name] = module.half()
pipe_fp16 = self.pipeline_class(**components)
pipe_fp16.to(torch_device)
pipe_fp16.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(torch_device))[0]
output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0]
max_diff = np.abs(output - output_fp16).max()
self.assertLess(max_diff, 1.3e-2, "The outputs of the fp16 and fp32 pipelines are too different.")
@unittest.skipIf(
torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher",
)
def test_cpu_offload_forward_pass(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output_without_offload = pipe(**inputs)[0]
pipe.enable_sequential_cpu_offload()
inputs = self.get_dummy_inputs(torch_device)
output_with_offload = pipe(**inputs)[0]
max_diff = np.abs(output_with_offload - output_without_offload).max()
self.assertLess(max_diff, 1e-4, "CPU offloading should not affect the inference results")
def test_dict_tuple_outputs_equivalent(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(torch_device))[0]
output_tuple = pipe(**self.get_dummy_inputs(torch_device), return_dict=False)[0]
max_diff = np.abs(output - output_tuple).max()
self.assertLess(max_diff, 1e-4)
def test_progress_bar(self):
super().test_progress_bar()
def test_stable_diffusion_depth2img_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = StableDiffusionDepth2ImgPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
if torch_device == "mps":
expected_slice = np.array([0.6071, 0.5035, 0.4378, 0.5776, 0.5753, 0.4316, 0.4513, 0.5263, 0.4546])
else:
expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_depth2img_negative_prompt(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = StableDiffusionDepth2ImgPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
negative_prompt = "french fries"
output = pipe(**inputs, negative_prompt=negative_prompt)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
if torch_device == "mps":
expected_slice = np.array([0.6296, 0.5125, 0.3890, 0.4456, 0.5955, 0.4621, 0.3810, 0.5310, 0.4626])
else:
expected_slice = np.array([0.6012, 0.4507, 0.3769, 0.4121, 0.5566, 0.4585, 0.3803, 0.5045, 0.4631])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_depth2img_multiple_init_images(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = StableDiffusionDepth2ImgPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["prompt"] = [inputs["prompt"]] * 2
inputs["image"] = 2 * [inputs["image"]]
image = pipe(**inputs).images
image_slice = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
if torch_device == "mps":
expected_slice = np.array([0.6501, 0.5150, 0.4939, 0.6688, 0.5437, 0.5758, 0.5115, 0.4406, 0.4551])
else:
expected_slice = np.array([0.6557, 0.6214, 0.6254, 0.5775, 0.4785, 0.5949, 0.5904, 0.4785, 0.4730])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_depth2img_pil(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
pipe = StableDiffusionDepth2ImgPipeline(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
if torch_device == "mps":
expected_slice = np.array([0.53232, 0.47015, 0.40868, 0.45651, 0.4891, 0.4668, 0.4287, 0.48822, 0.47439])
else:
expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
@slow
@require_torch_gpu
class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png"
)
inputs = {
"prompt": "two tigers",
"image": init_image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_stable_diffusion_depth2img_pipeline_default(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs()
image = pipe(**inputs).images
image_slice = image[0, 253:256, 253:256, -1].flatten()
assert image.shape == (1, 480, 640, 3)
expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655])
assert np.abs(expected_slice - image_slice).max() < 6e-1
def test_stable_diffusion_depth2img_pipeline_k_lms(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None
)
pipe.unet.set_default_attn_processor()
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs()
image = pipe(**inputs).images
image_slice = image[0, 253:256, 253:256, -1].flatten()
assert image.shape == (1, 480, 640, 3)
expected_slice = np.array([0.6363, 0.6274, 0.6309, 0.6370, 0.6226, 0.6286, 0.6213, 0.6453, 0.6306])
assert np.abs(expected_slice - image_slice).max() < 8e-4
def test_stable_diffusion_depth2img_pipeline_ddim(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None
)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs()
image = pipe(**inputs).images
image_slice = image[0, 253:256, 253:256, -1].flatten()
assert image.shape == (1, 480, 640, 3)
expected_slice = np.array([0.6424, 0.6524, 0.6249, 0.6041, 0.6634, 0.6420, 0.6522, 0.6555, 0.6436])
assert np.abs(expected_slice - image_slice).max() < 5e-4
def test_stable_diffusion_depth2img_intermediate_state(self):
number_of_steps = 0
def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None:
callback_fn.has_been_called = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 60, 80)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array(
[-0.7168, -1.5137, -0.1418, -2.9219, -2.7266, -2.4414, -2.1035, -3.0078, -1.7051]
)
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 60, 80)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array(
[-0.7109, -1.5068, -0.1403, -2.9160, -2.7207, -2.4414, -2.1035, -3.0059, -1.7090]
)
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
callback_fn.has_been_called = False
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(dtype=torch.float16)
pipe(**inputs, callback=callback_fn, callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 2
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16
)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
inputs = self.get_inputs(dtype=torch.float16)
_ = pipe(**inputs)
mem_bytes = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
@nightly
@require_torch_gpu
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png"
)
inputs = {
"prompt": "two tigers",
"image": init_image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_depth2img_pndm(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
image = pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_depth2img/stable_diffusion_2_0_pndm.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_depth2img_ddim(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
image = pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_depth2img/stable_diffusion_2_0_ddim.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_img2img_lms(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
image = pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_depth2img/stable_diffusion_2_0_lms.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_img2img_dpm(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs()
inputs["num_inference_steps"] = 30
image = pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_depth2img/stable_diffusion_2_0_dpm_multi.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
|
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py/0
|
{
"file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py",
"repo_id": "diffusers",
"token_count": 11083
}
| 178
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import numpy as np
import torch
from parameterized import parameterized
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
LCMScheduler,
MultiAdapter,
StableDiffusionXLAdapterPipeline,
T2IAdapter,
UNet2DConditionModel,
)
from diffusers.utils import logging
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
IPAdapterTesterMixin,
PipelineTesterMixin,
SDXLOptionalComponentsTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class StableDiffusionXLAdapterPipelineFastTests(
IPAdapterTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase
):
pipeline_class = StableDiffusionXLAdapterPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
def get_dummy_components(self, adapter_type="full_adapter_xl", time_cond_proj_dim=None):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
# SD2-specific config below
attention_head_dim=(2, 4),
use_linear_projection=True,
addition_embed_type="text_time",
addition_time_embed_dim=8,
transformer_layers_per_block=(1, 2),
projection_class_embeddings_input_dim=80, # 6 * 8 + 32
cross_attention_dim=64,
time_cond_proj_dim=time_cond_proj_dim,
)
scheduler = EulerDiscreteScheduler(
beta_start=0.00085,
beta_end=0.012,
steps_offset=1,
beta_schedule="scaled_linear",
timestep_spacing="leading",
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
sample_size=128,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
# SD2-specific config below
hidden_act="gelu",
projection_dim=32,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
if adapter_type == "full_adapter_xl":
adapter = T2IAdapter(
in_channels=3,
channels=[32, 64],
num_res_blocks=2,
downscale_factor=4,
adapter_type=adapter_type,
)
elif adapter_type == "multi_adapter":
adapter = MultiAdapter(
[
T2IAdapter(
in_channels=3,
channels=[32, 64],
num_res_blocks=2,
downscale_factor=4,
adapter_type="full_adapter_xl",
),
T2IAdapter(
in_channels=3,
channels=[32, 64],
num_res_blocks=2,
downscale_factor=4,
adapter_type="full_adapter_xl",
),
]
)
else:
raise ValueError(
f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter_xl', or 'multi_adapter''"
)
components = {
"adapter": adapter,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_2,
"tokenizer_2": tokenizer_2,
# "safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_components_with_full_downscaling(self, adapter_type="full_adapter_xl"):
"""Get dummy components with x8 VAE downscaling and 3 UNet down blocks.
These dummy components are intended to fully-exercise the T2I-Adapter
downscaling behavior.
"""
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"),
# SD2-specific config below
attention_head_dim=2,
use_linear_projection=True,
addition_embed_type="text_time",
addition_time_embed_dim=8,
transformer_layers_per_block=1,
projection_class_embeddings_input_dim=80, # 6 * 8 + 32
cross_attention_dim=64,
)
scheduler = EulerDiscreteScheduler(
beta_start=0.00085,
beta_end=0.012,
steps_offset=1,
beta_schedule="scaled_linear",
timestep_spacing="leading",
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 32, 32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
sample_size=128,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
# SD2-specific config below
hidden_act="gelu",
projection_dim=32,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
if adapter_type == "full_adapter_xl":
adapter = T2IAdapter(
in_channels=3,
channels=[32, 32, 64],
num_res_blocks=2,
downscale_factor=16,
adapter_type=adapter_type,
)
elif adapter_type == "multi_adapter":
adapter = MultiAdapter(
[
T2IAdapter(
in_channels=3,
channels=[32, 32, 64],
num_res_blocks=2,
downscale_factor=16,
adapter_type="full_adapter_xl",
),
T2IAdapter(
in_channels=3,
channels=[32, 32, 64],
num_res_blocks=2,
downscale_factor=16,
adapter_type="full_adapter_xl",
),
]
)
else:
raise ValueError(
f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter_xl', or 'multi_adapter''"
)
components = {
"adapter": adapter,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_2,
"tokenizer_2": tokenizer_2,
# "safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_inputs(self, device, seed=0, height=64, width=64, num_images=1):
if num_images == 1:
image = floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device)
else:
image = [
floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) for _ in range(num_images)
]
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "np",
}
return inputs
def test_ip_adapter_single(self, from_multi=False, expected_pipe_slice=None):
if not from_multi:
expected_pipe_slice = None
if torch_device == "cpu":
expected_pipe_slice = np.array(
[0.5752, 0.6155, 0.4826, 0.5111, 0.5741, 0.4678, 0.5199, 0.5231, 0.4794]
)
return super().test_ip_adapter_single(expected_pipe_slice=expected_pipe_slice)
def test_stable_diffusion_adapter_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionXLAdapterPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([00.5752, 0.6155, 0.4826, 0.5111, 0.5741, 0.4678, 0.5199, 0.5231, 0.4794])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
@parameterized.expand(
[
# (dim=144) The internal feature map will be 9x9 after initial pixel unshuffling (downscaled x16).
(((4 * 2 + 1) * 16),),
# (dim=160) The internal feature map will be 5x5 after the first T2I down block (downscaled x32).
(((4 * 1 + 1) * 32),),
]
)
def test_multiple_image_dimensions(self, dim):
"""Test that the T2I-Adapter pipeline supports any input dimension that
is divisible by the adapter's `downscale_factor`. This test was added in
response to an issue where the T2I Adapter's downscaling padding
behavior did not match the UNet's behavior.
Note that we have selected `dim` values to produce odd resolutions at
each downscaling level.
"""
components = self.get_dummy_components_with_full_downscaling()
sd_pipe = StableDiffusionXLAdapterPipeline(**components)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device, height=dim, width=dim)
image = sd_pipe(**inputs).images
assert image.shape == (1, dim, dim, 3)
@parameterized.expand(["full_adapter", "full_adapter_xl", "light_adapter"])
def test_total_downscale_factor(self, adapter_type):
"""Test that the T2IAdapter correctly reports its total_downscale_factor."""
batch_size = 1
in_channels = 3
out_channels = [320, 640, 1280, 1280]
in_image_size = 512
adapter = T2IAdapter(
in_channels=in_channels,
channels=out_channels,
num_res_blocks=2,
downscale_factor=8,
adapter_type=adapter_type,
)
adapter.to(torch_device)
in_image = floats_tensor((batch_size, in_channels, in_image_size, in_image_size)).to(torch_device)
adapter_state = adapter(in_image)
# Assume that the last element in `adapter_state` has been downsampled the most, and check
# that it matches the `total_downscale_factor`.
expected_out_image_size = in_image_size // adapter.total_downscale_factor
assert adapter_state[-1].shape == (
batch_size,
out_channels[-1],
expected_out_image_size,
expected_out_image_size,
)
def test_save_load_optional_components(self):
return self._test_save_load_optional_components()
def test_adapter_sdxl_lcm(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(time_cond_proj_dim=256)
sd_pipe = StableDiffusionXLAdapterPipeline(**components)
sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
output = sd_pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5425, 0.5385, 0.4964, 0.5045, 0.6149, 0.4974, 0.5469, 0.5332, 0.5426])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_adapter_sdxl_lcm_custom_timesteps(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(time_cond_proj_dim=256)
sd_pipe = StableDiffusionXLAdapterPipeline(**components)
sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
del inputs["num_inference_steps"]
inputs["timesteps"] = [999, 499]
output = sd_pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5425, 0.5385, 0.4964, 0.5045, 0.6149, 0.4974, 0.5469, 0.5332, 0.5426])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
class StableDiffusionXLMultiAdapterPipelineFastTests(
StableDiffusionXLAdapterPipelineFastTests, PipelineTesterMixin, unittest.TestCase
):
def get_dummy_components(self, time_cond_proj_dim=None):
return super().get_dummy_components("multi_adapter", time_cond_proj_dim=time_cond_proj_dim)
def get_dummy_components_with_full_downscaling(self):
return super().get_dummy_components_with_full_downscaling("multi_adapter")
def get_dummy_inputs(self, device, seed=0, height=64, width=64):
inputs = super().get_dummy_inputs(device, seed, height, width, num_images=2)
inputs["adapter_conditioning_scale"] = [0.5, 0.5]
return inputs
def test_stable_diffusion_adapter_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionXLAdapterPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5617, 0.6081, 0.4807, 0.5071, 0.5665, 0.4614, 0.5165, 0.5164, 0.4786])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
def test_ip_adapter_single(self):
expected_pipe_slice = None
if torch_device == "cpu":
expected_pipe_slice = np.array([0.5617, 0.6081, 0.4807, 0.5071, 0.5665, 0.4614, 0.5165, 0.5164, 0.4786])
return super().test_ip_adapter_single(from_multi=True, expected_pipe_slice=expected_pipe_slice)
def test_inference_batch_consistent(
self, batch_sizes=[2, 4, 13], additional_params_copy_to_batched_inputs=["num_inference_steps"]
):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
logger = logging.get_logger(pipe.__module__)
logger.setLevel(level=diffusers.logging.FATAL)
# batchify inputs
for batch_size in batch_sizes:
batched_inputs = {}
for name, value in inputs.items():
if name in self.batch_params:
# prompt is string
if name == "prompt":
len_prompt = len(value)
# make unequal batch sizes
batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]
# make last batch super long
batched_inputs[name][-1] = 100 * "very long"
elif name == "image":
batched_images = []
for image in value:
batched_images.append(batch_size * [image])
batched_inputs[name] = batched_images
else:
batched_inputs[name] = batch_size * [value]
elif name == "batch_size":
batched_inputs[name] = batch_size
else:
batched_inputs[name] = value
for arg in additional_params_copy_to_batched_inputs:
batched_inputs[arg] = inputs[arg]
batched_inputs["output_type"] = "np"
output = pipe(**batched_inputs)
assert len(output[0]) == batch_size
batched_inputs["output_type"] = "np"
output = pipe(**batched_inputs)[0]
assert output.shape[0] == batch_size
logger.setLevel(level=diffusers.logging.WARNING)
def test_num_images_per_prompt(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
batch_sizes = [1, 2]
num_images_per_prompts = [1, 2]
for batch_size in batch_sizes:
for num_images_per_prompt in num_images_per_prompts:
inputs = self.get_dummy_inputs(torch_device)
for key in inputs.keys():
if key in self.batch_params:
if key == "image":
batched_images = []
for image in inputs[key]:
batched_images.append(batch_size * [image])
inputs[key] = batched_images
else:
inputs[key] = batch_size * [inputs[key]]
images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
def test_inference_batch_single_identical(
self,
batch_size=3,
test_max_difference=None,
test_mean_pixel_difference=None,
relax_max_difference=False,
expected_max_diff=2e-3,
additional_params_copy_to_batched_inputs=["num_inference_steps"],
):
if test_max_difference is None:
# TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems
# make sure that batched and non-batched is identical
test_max_difference = torch_device != "mps"
if test_mean_pixel_difference is None:
# TODO same as above
test_mean_pixel_difference = torch_device != "mps"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
logger = logging.get_logger(pipe.__module__)
logger.setLevel(level=diffusers.logging.FATAL)
# batchify inputs
batched_inputs = {}
for name, value in inputs.items():
if name in self.batch_params:
# prompt is string
if name == "prompt":
len_prompt = len(value)
# make unequal batch sizes
batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]
# make last batch super long
batched_inputs[name][-1] = 100 * "very long"
elif name == "image":
batched_images = []
for image in value:
batched_images.append(batch_size * [image])
batched_inputs[name] = batched_images
else:
batched_inputs[name] = batch_size * [value]
elif name == "batch_size":
batched_inputs[name] = batch_size
elif name == "generator":
batched_inputs[name] = [self.get_generator(i) for i in range(batch_size)]
else:
batched_inputs[name] = value
for arg in additional_params_copy_to_batched_inputs:
batched_inputs[arg] = inputs[arg]
output_batch = pipe(**batched_inputs)
assert output_batch[0].shape[0] == batch_size
inputs["generator"] = self.get_generator(0)
output = pipe(**inputs)
logger.setLevel(level=diffusers.logging.WARNING)
if test_max_difference:
if relax_max_difference:
# Taking the median of the largest <n> differences
# is resilient to outliers
diff = np.abs(output_batch[0][0] - output[0][0])
diff = diff.flatten()
diff.sort()
max_diff = np.median(diff[-5:])
else:
max_diff = np.abs(output_batch[0][0] - output[0][0]).max()
assert max_diff < expected_max_diff
if test_mean_pixel_difference:
assert_mean_pixel_difference(output_batch[0][0], output[0][0])
def test_adapter_sdxl_lcm(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(time_cond_proj_dim=256)
sd_pipe = StableDiffusionXLAdapterPipeline(**components)
sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
output = sd_pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448])
debug = [str(round(i, 4)) for i in image_slice.flatten().tolist()]
print(",".join(debug))
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_adapter_sdxl_lcm_custom_timesteps(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(time_cond_proj_dim=256)
sd_pipe = StableDiffusionXLAdapterPipeline(**components)
sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
del inputs["num_inference_steps"]
inputs["timesteps"] = [999, 499]
output = sd_pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448])
debug = [str(round(i, 4)) for i in image_slice.flatten().tolist()]
print(",".join(debug))
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py/0
|
{
"file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py",
"repo_id": "diffusers",
"token_count": 12911
}
| 179
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class OnnxPipelineTesterMixin:
"""
This mixin is designed to be used with unittest.TestCase classes.
It provides a set of common tests for each ONNXRuntime pipeline, e.g. saving and loading the pipeline,
equivalence of dict and tuple outputs, etc.
"""
pass
|
diffusers/tests/pipelines/test_pipelines_onnx_common.py/0
|
{
"file_path": "diffusers/tests/pipelines/test_pipelines_onnx_common.py",
"repo_id": "diffusers",
"token_count": 118
}
| 180
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class CMStochasticIterativeSchedulerTest(SchedulerCommonTest):
scheduler_classes = (CMStochasticIterativeScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**kwargs)
return config
# Override test_step_shape to add CMStochasticIterativeScheduler-specific logic regarding timesteps
# Problem is that we don't know two timesteps that will always be in the timestep schedule from only the scheduler
# config; scaled sigma_max is always in the timestep schedule, but sigma_min is in the sigma schedule while scaled
# sigma_min is not in the timestep schedule
def test_step_shape(self):
num_inference_steps = 10
scheduler_config = self.get_scheduler_config()
scheduler = self.scheduler_classes[0](**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
timestep_0 = scheduler.timesteps[0]
timestep_1 = scheduler.timesteps[1]
sample = self.dummy_sample
residual = 0.1 * sample
output_0 = scheduler.step(residual, timestep_0, sample).prev_sample
output_1 = scheduler.step(residual, timestep_1, sample).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_clip_denoised(self):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=clip_denoised)
def test_full_loop_no_noise_onestep(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 1
scheduler.set_timesteps(num_inference_steps)
timesteps = scheduler.timesteps
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(timesteps):
# 1. scale model input
scaled_sample = scheduler.scale_model_input(sample, t)
# 2. predict noise residual
residual = model(scaled_sample, t)
# 3. predict previous sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 192.7614) < 1e-2
assert abs(result_mean.item() - 0.2510) < 1e-3
def test_full_loop_no_noise_multistep(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [106, 0]
scheduler.set_timesteps(timesteps=timesteps)
timesteps = scheduler.timesteps
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
scaled_sample = scheduler.scale_model_input(sample, t)
# 2. predict noise residual
residual = model(scaled_sample, t)
# 3. predict previous sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 347.6357) < 1e-2
assert abs(result_mean.item() - 0.4527) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = 10
t_start = 8
scheduler.set_timesteps(num_inference_steps)
timesteps = scheduler.timesteps
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
noise = self.dummy_noise_deter
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for t in timesteps:
# 1. scale model input
scaled_sample = scheduler.scale_model_input(sample, t)
# 2. predict noise residual
residual = model(scaled_sample, t)
# 3. predict previous sample x_t-1
pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample
sample = pred_prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 763.9186) < 1e-2, f" expected result sum 763.9186, but get {result_sum}"
assert abs(result_mean.item() - 0.9947) < 1e-3, f" expected result mean 0.9947, but get {result_mean}"
def test_custom_timesteps_increasing_order(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [39, 30, 12, 15, 0]
with self.assertRaises(ValueError, msg="`timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=timesteps)
def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [39, 30, 12, 1, 0]
num_inference_steps = len(timesteps)
with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `timesteps`."):
scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps)
def test_custom_timesteps_too_large(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
timesteps = [scheduler.config.num_train_timesteps]
with self.assertRaises(
ValueError,
msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}",
):
scheduler.set_timesteps(timesteps=timesteps)
|
diffusers/tests/schedulers/test_scheduler_consistency_model.py/0
|
{
"file_path": "diffusers/tests/schedulers/test_scheduler_consistency_model.py",
"repo_id": "diffusers",
"token_count": 3029
}
| 181
|
import torch
from diffusers import HeunDiscreteScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class HeunDiscreteSchedulerTest(SchedulerCommonTest):
scheduler_classes = (HeunDiscreteScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear", "exp"]:
self.check_over_configs(beta_schedule=schedule)
def test_clip_sample(self):
for clip_sample_range in [1.0, 2.0, 3.0]:
self.check_over_configs(clip_sample_range=clip_sample_range, clip_sample=True)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction", "sample"]:
self.check_over_configs(prediction_type=prediction_type)
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = self.num_inference_steps
scheduler.set_timesteps(num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
return sample
def full_loop_custom_timesteps(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = self.num_inference_steps
scheduler.set_timesteps(num_inference_steps)
timesteps = scheduler.timesteps
timesteps = torch.cat([timesteps[:1], timesteps[1::2]])
# reset the timesteps using `timesteps`
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps=None, timesteps=timesteps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
return sample
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 0.1233) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 0.1233) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07) < 1e-2
assert abs(result_mean.item() - 6.1112e-10) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
def test_full_loop_device(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
if str(torch_device).startswith("cpu"):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 0.1233) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
elif str(torch_device).startswith("mps"):
# Larger tolerance on mps
assert abs(result_mean.item() - 0.0002) < 1e-2
else:
# CUDA
assert abs(result_sum.item() - 0.1233) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
def test_full_loop_device_karras_sigmas(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
model = self.dummy_model()
sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 0.00015) < 1e-2
assert abs(result_mean.item() - 1.9869554535034695e-07) < 1e-2
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
t_start = self.num_inference_steps - 2
noise = self.dummy_noise_deter
noise = noise.to(torch_device)
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 75074.8906) < 1e-2, f" expected result sum 75074.8906, but get {result_sum}"
assert abs(result_mean.item() - 97.7538) < 1e-3, f" expected result mean 97.7538, but get {result_mean}"
def test_custom_timesteps(self):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
for timestep_spacing in ["linspace", "leading"]:
sample = self.full_loop(
prediction_type=prediction_type,
timestep_spacing=timestep_spacing,
)
sample_custom_timesteps = self.full_loop_custom_timesteps(
prediction_type=prediction_type,
timestep_spacing=timestep_spacing,
)
assert (
torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5
), f"Scheduler outputs are not identical for prediction_type: {prediction_type}, timestep_spacing: {timestep_spacing}"
|
diffusers/tests/schedulers/test_scheduler_heun.py/0
|
{
"file_path": "diffusers/tests/schedulers/test_scheduler_heun.py",
"repo_id": "diffusers",
"token_count": 3945
}
| 182
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
ControlNetModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
)
enable_full_determinism()
@slow
@require_torch_gpu
class ControlNetModelSingleFileTests(unittest.TestCase):
model_class = ControlNetModel
ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"
repo_id = "lllyasviel/control_v11p_sd15_canny"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
def test_single_file_arguments(self):
model_default = self.model_class.from_single_file(self.ckpt_path)
assert model_default.config.upcast_attention is False
assert model_default.dtype == torch.float32
torch_dtype = torch.float16
upcast_attention = True
model = self.model_class.from_single_file(
self.ckpt_path,
upcast_attention=upcast_attention,
torch_dtype=torch_dtype,
)
assert model.config.upcast_attention == upcast_attention
assert model.dtype == torch_dtype
|
diffusers/tests/single_file/test_model_controlnet_single_file.py/0
|
{
"file_path": "diffusers/tests/single_file/test_model_controlnet_single_file.py",
"repo_id": "diffusers",
"token_count": 991
}
| 183
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
PATH_TO_TRANSFORMERS = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def check_config_docstrings_have_checkpoints():
configs_without_checkpoint = []
for config_class in list(CONFIG_MAPPING.values()):
checkpoint_found = False
# source code of `config_class`
config_source = inspect.getsource(config_class)
checkpoints = _re_checkpoint.findall(config_source)
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
ckpt_name, ckpt_link = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
checkpoint_found = True
break
name = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(name)
if len(configs_without_checkpoint) > 0:
message = "\n".join(sorted(configs_without_checkpoint))
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}")
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
|
diffusers/utils/check_config_docstrings.py/0
|
{
"file_path": "diffusers/utils/check_config_docstrings.py",
"repo_id": "diffusers",
"token_count": 1113
}
| 184
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
|
diffusers/utils/print_env.py/0
|
{
"file_path": "diffusers/utils/print_env.py",
"repo_id": "diffusers",
"token_count": 424
}
| 185
|
# Video benchmark
## Questions
What is the optimal trade-off between:
- maximizing loading time with random access,
- minimizing memory space on disk,
- maximizing success rate of policies,
- compatibility across devices/platforms for decoding videos (e.g. video players, web browsers).
How to encode videos?
- Which video codec (`-vcodec`) to use? h264, h265, AV1?
- What pixel format to use (`-pix_fmt`)? `yuv444p` or `yuv420p`?
- How much compression (`-crf`)? No compression with `0`, intermediate compression with `25` or extreme with `50+`?
- Which frequency to chose for key frames (`-g`)? A key frame every `10` frames?
How to decode videos?
- Which `decoder`? `torchvision`, `torchaudio`, `ffmpegio`, `decord`, or `nvc`?
- What scenarios to use for the requesting timestamps during benchmark? (`timestamps_mode`)
## Variables
**Image content & size**
We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an appartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution).
For these reasons, we run this benchmark on four representative datasets:
- `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera.
- `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera.
- `aliberts/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera.
- `aliberts/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera.
Note: The datasets used for this benchmark need to be image datasets, not video datasets.
**Data augmentations**
We might revisit this benchmark and find better settings if we train our policies with various data augmentations to make them more robust (e.g. robust to color changes, compression, etc.).
### Encoding parameters
| parameter | values |
|-------------|--------------------------------------------------------------|
| **vcodec** | `libx264`, `libx265`, `libsvtav1` |
| **pix_fmt** | `yuv444p`, `yuv420p` |
| **g** | `1`, `2`, `3`, `4`, `5`, `6`, `10`, `15`, `20`, `40`, `None` |
| **crf** | `0`, `5`, `10`, `15`, `20`, `25`, `30`, `40`, `50`, `None` |
Note that `crf` value might be interpreted differently by various video codecs. In other words, the same value used with one codec doesn't necessarily translate into the same compression level with another codec. In fact, the default value (`None`) isn't the same amongst the different video codecs. Importantly, it is also the case for many other ffmpeg arguments like `g` which specifies the frequency of the key frames.
For a comprehensive list and documentation of these parameters, see the ffmpeg documentation depending on the video codec used:
- h264: https://trac.ffmpeg.org/wiki/Encode/H.264
- h265: https://trac.ffmpeg.org/wiki/Encode/H.265
- AV1: https://trac.ffmpeg.org/wiki/Encode/AV1
### Decoding parameters
**Decoder**
We tested two video decoding backends from torchvision:
- `pyav` (default)
- `video_reader` (requires to build torchvision from source)
**Requested timestamps**
Given the way video decoding works, once a keyframe has been loaded, the decoding of subsequent frames is fast.
This of course is affected by the `-g` parameter during encoding, which specifies the frequency of the keyframes. Given our typical use cases in robotics policies which might request a few timestamps in different random places, we want to replicate these use cases with the following scenarios:
- `1_frame`: 1 frame,
- `2_frames`: 2 consecutive frames (e.g. `[t, t + 1 / fps]`),
- `6_frames`: 6 consecutive frames (e.g. `[t + i / fps for i in range(6)]`)
Note that this differs significantly from a typical use case like watching a movie, in which every frame is loaded sequentially from the beginning to the end and it's acceptable to have big values for `-g`.
Additionally, because some policies might request single timestamps that are a few frames appart, we also have the following scenario:
- `2_frames_4_space`: 2 frames with 4 consecutive frames of spacing in between (e.g `[t, t + 5 / fps]`),
However, due to how video decoding is implemented with `pyav`, we don't have access to an accurate seek so in practice this scenario is essentially the same as `6_frames` since all 6 frames between `t` and `t + 5 / fps` will be decoded.
## Metrics
**Data compression ratio (lower is better)**
`video_images_size_ratio` is the ratio of the memory space on disk taken by the encoded video over the memory space taken by the original images. For instance, `video_images_size_ratio=25%` means that the video takes 4 times less memory space on disk compared to the original images.
**Loading time ratio (lower is better)**
`video_images_load_time_ratio` is the ratio of the time it takes to decode frames from the video at a given timestamps over the time it takes to load the exact same original images. Lower is better. For instance, `video_images_load_time_ratio=200%` means that decoding from video is 2 times slower than loading the original images.
**Average Mean Square Error (lower is better)**
`avg_mse` is the average mean square error between each decoded frame and its corresponding original image over all requested timestamps, and also divided by the number of pixels in the image to be comparable when switching to different image sizes.
**Average Peak Signal to Noise Ratio (higher is better)**
`avg_psnr` measures the ratio between the maximum possible power of a signal and the power of corrupting noise that affects the fidelity of its representation. Higher PSNR indicates better quality.
**Average Structural Similarity Index Measure (higher is better)**
`avg_ssim` evaluates the perceived quality of images by comparing luminance, contrast, and structure. SSIM values range from -1 to 1, where 1 indicates perfect similarity.
One aspect that can't be measured here with those metrics is the compatibility of the encoding accross platforms, in particular on web browser, for visualization purposes.
h264, h265 and AV1 are all commonly used codecs and should not be pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility:
- `yuv420p` is more widely supported across various platforms, including web browsers.
- `yuv444p` offers higher color fidelity but might not be supported as broadly.
<!-- **Loss of a pretrained policy (higher is better)** (not available)
`loss_pretrained` is the result of evaluating with the selected encoding/decoding settings a policy pretrained on original images. It is easier to understand than `avg_l2_error`.
**Success rate after retraining (higher is better)** (not available)
`success_rate` is the result of training and evaluating a policy with the selected encoding/decoding settings. It is the most difficult metric to get but also the very best. -->
## How the benchmark works
The benchmark evaluates both encoding and decoding of video frames on the first episode of each dataset.
**Encoding:** for each `vcodec` and `pix_fmt` pair, we use a default value for `g` and `crf` upon which we change a single value (either `g` or `crf`) to one of the specified values (we don't test every combination of those as this would be computationally too heavy).
This gives a unique set of encoding parameters which is used to encode the episode.
**Decoding:** Then, for each of those unique encodings, we iterate through every combination of the decoding parameters `backend` and `timestamps_mode`. For each of them, we record the metrics of a number of samples (given by `--num-samples`). This is parallelized for efficiency and the number of processes can be controlled with `--num-workers`. Ideally, it's best to have a `--num-samples` that is divisible by `--num-workers`.
Intermediate results saved for each `vcodec` and `pix_fmt` combination in csv tables.
These are then all concatenated to a single table ready for analysis.
## Caveats
We tried to measure the most impactful parameters for both encoding and decoding. However, for computational reasons we can't test out every combination.
Additional encoding parameters exist that are not included in this benchmark. In particular:
- `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1.
- `-tune` which allows to optimize the encoding for certains aspects (e.g. film quality, fast decoding, etc.).
See the documentation mentioned above for more detailled info on these settings and for a more comprehensive list of other parameters.
Similarly on the decoding side, other decoders exist but are not implemented in our current benchmark. To name a few:
- `torchaudio`
- `ffmpegio`
- `decord`
- `nvc`
Note as well that since we are mostly interested in the performance at decoding time (also because encoding is done only once before uploading a dataset), we did not measure encoding times nor have any metrics regarding encoding.
However, besides the necessity to build ffmpeg from source, encoding did not pose any issue and it didn't take a significant amount of time during this benchmark.
## Install
Building ffmpeg from source is required to include libx265 and libaom/libsvtav1 (av1) video codecs ([compilation guide](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu)).
**Note:** While you still need to build torchvision with a conda-installed `ffmpeg<4.3` to use the `video_reader` decoder (as described in [#220](https://github.com/huggingface/lerobot/pull/220)), you also need another version which is custom-built with all the video codecs for encoding. For the script to then use that version, you can prepend the command above with `PATH="$HOME/bin:$PATH"`, which is where ffmpeg should be built.
## Adding a video decoder
Right now, we're only benchmarking the two video decoder available with torchvision: `pyav` and `video_reader`.
You can easily add a new decoder to benchmark by adding it to this function in the script:
```diff
def decode_video_frames(
video_path: str,
timestamps: list[float],
tolerance_s: float,
backend: str,
) -> torch.Tensor:
if backend in ["pyav", "video_reader"]:
return decode_video_frames_torchvision(
video_path, timestamps, tolerance_s, backend
)
+ elif backend == ["your_decoder"]:
+ return your_decoder_function(
+ video_path, timestamps, tolerance_s, backend
+ )
else:
raise NotImplementedError(backend)
```
## Example
For a quick run, you can try these parameters:
```bash
python benchmark/video/run_video_benchmark.py \
--output-dir outputs/video_benchmark \
--repo-ids \
lerobot/pusht_image \
aliberts/aloha_mobile_shrimp_image \
--vcodec libx264 libx265 \
--pix-fmt yuv444p yuv420p \
--g 2 20 None \
--crf 10 40 None \
--timestamps-modes 1_frame 2_frames \
--backends pyav video_reader \
--num-samples 5 \
--num-workers 5 \
--save-frames 0
```
## Results
### Reproduce
We ran the benchmark with the following parameters:
```bash
# h264 and h265 encodings
python benchmark/video/run_video_benchmark.py \
--output-dir outputs/video_benchmark \
--repo-ids \
lerobot/pusht_image \
aliberts/aloha_mobile_shrimp_image \
aliberts/paris_street \
aliberts/kitchen \
--vcodec libx264 libx265 \
--pix-fmt yuv444p yuv420p \
--g 1 2 3 4 5 6 10 15 20 40 None \
--crf 0 5 10 15 20 25 30 40 50 None \
--timestamps-modes 1_frame 2_frames 6_frames \
--backends pyav video_reader \
--num-samples 50 \
--num-workers 5 \
--save-frames 1
# av1 encoding (only compatible with yuv420p and pyav decoder)
python benchmark/video/run_video_benchmark.py \
--output-dir outputs/video_benchmark \
--repo-ids \
lerobot/pusht_image \
aliberts/aloha_mobile_shrimp_image \
aliberts/paris_street \
aliberts/kitchen \
--vcodec libsvtav1 \
--pix-fmt yuv420p \
--g 1 2 3 4 5 6 10 15 20 40 None \
--crf 0 5 10 15 20 25 30 40 50 None \
--timestamps-modes 1_frame 2_frames 6_frames \
--backends pyav \
--num-samples 50 \
--num-workers 5 \
--save-frames 1
```
The full results are available [here](https://docs.google.com/spreadsheets/d/1OYJB43Qu8fC26k_OyoMFgGBBKfQRCi4BIuYitQnq3sw/edit?usp=sharing)
### Parameters selected for LeRobotDataset
Considering these results, we chose what we think is the best set of encoding parameter:
- vcodec: `libsvtav1`
- pix-fmt: `yuv420p`
- g: `2`
- crf: `30`
Since we're using av1 encoding, we're choosing the `pyav` decoder as `video_reader` does not support it (and `pyav` doesn't require a custom build of `torchvision`).
### Summary
These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_frames` and `backend=pyav`
| video_images_size_ratio | vcodec | pix_fmt | | | |
|------------------------------------|------------|---------|-----------|-----------|-----------|
| | libx264 | | libx265 | | libsvtav1 |
| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
| lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% |
| aliberts/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% |
| aliberts/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% |
| aliberts/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% |
| video_images_load_time_ratio | vcodec | pix_fmt | | | |
|------------------------------------|---------|---------|----------|---------|-----------|
| | libx264 | | libx265 | | libsvtav1 |
| repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
| lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 |
| aliberts/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** |
| aliberts/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** |
| aliberts/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** |
| | | vcodec | pix_fmt | | | |
|------------------------------------|----------|----------|--------------|----------|-----------|--------------|
| | | libx264 | | libx265 | | libsvtav1 |
| repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
| lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 |
| | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 |
| | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% |
| aliberts/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** |
| | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** |
| | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** |
| aliberts/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** |
| | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** |
| | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** |
| aliberts/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** |
| | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** |
| | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** |
|
lerobot/benchmarks/video/README.md/0
|
{
"file_path": "lerobot/benchmarks/video/README.md",
"repo_id": "lerobot",
"token_count": 6168
}
| 186
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains lists of available environments, dataset and policies to reflect the current state of LeRobot library.
We do not want to import all the dependencies, but instead we keep it lightweight to ensure fast access to these variables.
Example:
```python
import lerobot
print(lerobot.available_envs)
print(lerobot.available_tasks_per_env)
print(lerobot.available_datasets)
print(lerobot.available_datasets_per_env)
print(lerobot.available_real_world_datasets)
print(lerobot.available_policies)
print(lerobot.available_policies_per_env)
```
When implementing a new dataset loadable with LeRobotDataset follow these steps:
- Update `available_datasets_per_env` in `lerobot/__init__.py`
When implementing a new environment (e.g. `gym_aloha`), follow these steps:
- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py`
When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps:
- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py`
- Set the required `name` class attribute.
- Update variables in `tests/test_available.py` by importing your new Policy class
"""
import itertools
from lerobot.__version__ import __version__ # noqa: F401
# TODO(rcadene): Improve policies and envs. As of now, an item in `available_policies`
# refers to a yaml file AND a modeling name. Same for `available_envs` which refers to
# a yaml file AND a environment name. The difference should be more obvious.
available_tasks_per_env = {
"aloha": [
"AlohaInsertion-v0",
"AlohaTransferCube-v0",
],
"pusht": ["PushT-v0"],
"xarm": ["XarmLift-v0"],
"dora_aloha_real": ["DoraAloha-v0", "DoraKoch-v0", "DoraReachy2-v0"],
}
available_envs = list(available_tasks_per_env.keys())
available_datasets_per_env = {
"aloha": [
"lerobot/aloha_sim_insertion_human",
"lerobot/aloha_sim_insertion_scripted",
"lerobot/aloha_sim_transfer_cube_human",
"lerobot/aloha_sim_transfer_cube_scripted",
"lerobot/aloha_sim_insertion_human_image",
"lerobot/aloha_sim_insertion_scripted_image",
"lerobot/aloha_sim_transfer_cube_human_image",
"lerobot/aloha_sim_transfer_cube_scripted_image",
],
# TODO(alexander-soare): Add "lerobot/pusht_keypoints". Right now we can't because this is too tightly
# coupled with tests.
"pusht": ["lerobot/pusht", "lerobot/pusht_image"],
"xarm": [
"lerobot/xarm_lift_medium",
"lerobot/xarm_lift_medium_replay",
"lerobot/xarm_push_medium",
"lerobot/xarm_push_medium_replay",
"lerobot/xarm_lift_medium_image",
"lerobot/xarm_lift_medium_replay_image",
"lerobot/xarm_push_medium_image",
"lerobot/xarm_push_medium_replay_image",
],
"dora_aloha_real": [
"lerobot/aloha_static_battery",
"lerobot/aloha_static_candy",
"lerobot/aloha_static_coffee",
"lerobot/aloha_static_coffee_new",
"lerobot/aloha_static_cups_open",
"lerobot/aloha_static_fork_pick_up",
"lerobot/aloha_static_pingpong_test",
"lerobot/aloha_static_pro_pencil",
"lerobot/aloha_static_screw_driver",
"lerobot/aloha_static_tape",
"lerobot/aloha_static_thread_velcro",
"lerobot/aloha_static_towel",
"lerobot/aloha_static_vinh_cup",
"lerobot/aloha_static_vinh_cup_left",
"lerobot/aloha_static_ziploc_slide",
],
}
available_real_world_datasets = [
"lerobot/aloha_mobile_cabinet",
"lerobot/aloha_mobile_chair",
"lerobot/aloha_mobile_elevator",
"lerobot/aloha_mobile_shrimp",
"lerobot/aloha_mobile_wash_pan",
"lerobot/aloha_mobile_wipe_wine",
"lerobot/aloha_static_battery",
"lerobot/aloha_static_candy",
"lerobot/aloha_static_coffee",
"lerobot/aloha_static_coffee_new",
"lerobot/aloha_static_cups_open",
"lerobot/aloha_static_fork_pick_up",
"lerobot/aloha_static_pingpong_test",
"lerobot/aloha_static_pro_pencil",
"lerobot/aloha_static_screw_driver",
"lerobot/aloha_static_tape",
"lerobot/aloha_static_thread_velcro",
"lerobot/aloha_static_towel",
"lerobot/aloha_static_vinh_cup",
"lerobot/aloha_static_vinh_cup_left",
"lerobot/aloha_static_ziploc_slide",
"lerobot/umi_cup_in_the_wild",
"lerobot/unitreeh1_fold_clothes",
"lerobot/unitreeh1_rearrange_objects",
"lerobot/unitreeh1_two_robot_greeting",
"lerobot/unitreeh1_warehouse",
"lerobot/nyu_rot_dataset",
"lerobot/utokyo_saytap",
"lerobot/imperialcollege_sawyer_wrist_cam",
"lerobot/utokyo_xarm_bimanual",
"lerobot/tokyo_u_lsmo",
"lerobot/utokyo_pr2_opening_fridge",
"lerobot/cmu_franka_exploration_dataset",
"lerobot/cmu_stretch",
"lerobot/asu_table_top",
"lerobot/utokyo_pr2_tabletop_manipulation",
"lerobot/utokyo_xarm_pick_and_place",
"lerobot/ucsd_kitchen_dataset",
"lerobot/austin_buds_dataset",
"lerobot/dlr_sara_grid_clamp",
"lerobot/conq_hose_manipulation",
"lerobot/columbia_cairlab_pusht_real",
"lerobot/dlr_sara_pour",
"lerobot/dlr_edan_shared_control",
"lerobot/ucsd_pick_and_place_dataset",
"lerobot/berkeley_cable_routing",
"lerobot/nyu_franka_play_dataset",
"lerobot/austin_sirius_dataset",
"lerobot/cmu_play_fusion",
"lerobot/berkeley_gnm_sac_son",
"lerobot/nyu_door_opening_surprising_effectiveness",
"lerobot/berkeley_fanuc_manipulation",
"lerobot/jaco_play",
"lerobot/viola",
"lerobot/kaist_nonprehensile",
"lerobot/berkeley_mvp",
"lerobot/uiuc_d3field",
"lerobot/berkeley_gnm_recon",
"lerobot/austin_sailor_dataset",
"lerobot/utaustin_mutex",
"lerobot/roboturk",
"lerobot/stanford_hydra_dataset",
"lerobot/berkeley_autolab_ur5",
"lerobot/stanford_robocook",
"lerobot/toto",
"lerobot/fmb",
"lerobot/droid_100",
"lerobot/berkeley_rpt",
"lerobot/stanford_kuka_multimodal_dataset",
"lerobot/iamlab_cmu_pickup_insert",
"lerobot/taco_play",
"lerobot/berkeley_gnm_cory_hall",
"lerobot/usc_cloth_sim",
]
available_datasets = list(
itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets)
)
# lists all available policies from `lerobot/common/policies` by their class attribute: `name`.
available_policies = [
"act",
"diffusion",
"tdmpc",
"vqbet",
]
# keys and values refer to yaml files
available_policies_per_env = {
"aloha": ["act"],
"pusht": ["diffusion", "vqbet"],
"xarm": ["tdmpc"],
"dora_aloha_real": ["act_real"],
}
env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks]
env_dataset_pairs = [
(env, dataset) for env, datasets in available_datasets_per_env.items() for dataset in datasets
]
env_dataset_policy_triplets = [
(env, dataset, policy)
for env, datasets in available_datasets_per_env.items()
for dataset in datasets
for policy in available_policies_per_env[env]
]
|
lerobot/lerobot/__init__.py/0
|
{
"file_path": "lerobot/lerobot/__init__.py",
"repo_id": "lerobot",
"token_count": 3408
}
| 187
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper code for loading PushT dataset from Diffusion Policy (https://diffusion-policy.cs.columbia.edu/)
Copied from the original Diffusion Policy repository and used in our `download_and_upload_dataset.py` script.
"""
from __future__ import annotations
import math
import numbers
import os
from functools import cached_property
import numcodecs
import numpy as np
import zarr
def check_chunks_compatible(chunks: tuple, shape: tuple):
assert len(shape) == len(chunks)
for c in chunks:
assert isinstance(c, numbers.Integral)
assert c > 0
def rechunk_recompress_array(group, name, chunks=None, chunk_length=None, compressor=None, tmp_key="_temp"):
old_arr = group[name]
if chunks is None:
chunks = (chunk_length,) + old_arr.chunks[1:] if chunk_length is not None else old_arr.chunks
check_chunks_compatible(chunks, old_arr.shape)
if compressor is None:
compressor = old_arr.compressor
if (chunks == old_arr.chunks) and (compressor == old_arr.compressor):
# no change
return old_arr
# rechunk recompress
group.move(name, tmp_key)
old_arr = group[tmp_key]
n_copied, n_skipped, n_bytes_copied = zarr.copy(
source=old_arr,
dest=group,
name=name,
chunks=chunks,
compressor=compressor,
)
del group[tmp_key]
arr = group[name]
return arr
def get_optimal_chunks(shape, dtype, target_chunk_bytes=2e6, max_chunk_length=None):
"""
Common shapes
T,D
T,N,D
T,H,W,C
T,N,H,W,C
"""
itemsize = np.dtype(dtype).itemsize
# reversed
rshape = list(shape[::-1])
if max_chunk_length is not None:
rshape[-1] = int(max_chunk_length)
split_idx = len(shape) - 1
for i in range(len(shape) - 1):
this_chunk_bytes = itemsize * np.prod(rshape[:i])
next_chunk_bytes = itemsize * np.prod(rshape[: i + 1])
if this_chunk_bytes <= target_chunk_bytes and next_chunk_bytes > target_chunk_bytes:
split_idx = i
rchunks = rshape[:split_idx]
item_chunk_bytes = itemsize * np.prod(rshape[:split_idx])
this_max_chunk_length = rshape[split_idx]
next_chunk_length = min(this_max_chunk_length, math.ceil(target_chunk_bytes / item_chunk_bytes))
rchunks.append(next_chunk_length)
len_diff = len(shape) - len(rchunks)
rchunks.extend([1] * len_diff)
chunks = tuple(rchunks[::-1])
# print(np.prod(chunks) * itemsize / target_chunk_bytes)
return chunks
class ReplayBuffer:
"""
Zarr-based temporal datastructure.
Assumes first dimension to be time. Only chunk in time dimension.
"""
def __init__(self, root: zarr.Group | dict[str, dict]):
"""
Dummy constructor. Use copy_from* and create_from* class methods instead.
"""
assert "data" in root
assert "meta" in root
assert "episode_ends" in root["meta"]
for value in root["data"].values():
assert value.shape[0] == root["meta"]["episode_ends"][-1]
self.root = root
# ============= create constructors ===============
@classmethod
def create_empty_zarr(cls, storage=None, root=None):
if root is None:
if storage is None:
storage = zarr.MemoryStore()
root = zarr.group(store=storage)
root.require_group("data", overwrite=False)
meta = root.require_group("meta", overwrite=False)
if "episode_ends" not in meta:
meta.zeros("episode_ends", shape=(0,), dtype=np.int64, compressor=None, overwrite=False)
return cls(root=root)
@classmethod
def create_empty_numpy(cls):
root = {"data": {}, "meta": {"episode_ends": np.zeros((0,), dtype=np.int64)}}
return cls(root=root)
@classmethod
def create_from_group(cls, group, **kwargs):
if "data" not in group:
# create from stratch
buffer = cls.create_empty_zarr(root=group, **kwargs)
else:
# already exist
buffer = cls(root=group, **kwargs)
return buffer
@classmethod
def create_from_path(cls, zarr_path, mode="r", **kwargs):
"""
Open a on-disk zarr directly (for dataset larger than memory).
Slower.
"""
group = zarr.open(os.path.expanduser(zarr_path), mode)
return cls.create_from_group(group, **kwargs)
# ============= copy constructors ===============
@classmethod
def copy_from_store(
cls,
src_store,
store=None,
keys=None,
chunks: dict[str, tuple] | None = None,
compressors: dict | str | numcodecs.abc.Codec | None = None,
if_exists="replace",
**kwargs,
):
"""
Load to memory.
"""
src_root = zarr.group(src_store)
if chunks is None:
chunks = {}
if compressors is None:
compressors = {}
root = None
if store is None:
# numpy backend
meta = {}
for key, value in src_root["meta"].items():
if len(value.shape) == 0:
meta[key] = np.array(value)
else:
meta[key] = value[:]
if keys is None:
keys = src_root["data"].keys()
data = {}
for key in keys:
arr = src_root["data"][key]
data[key] = arr[:]
root = {"meta": meta, "data": data}
else:
root = zarr.group(store=store)
# copy without recompression
n_copied, n_skipped, n_bytes_copied = zarr.copy_store(
source=src_store, dest=store, source_path="/meta", dest_path="/meta", if_exists=if_exists
)
data_group = root.create_group("data", overwrite=True)
if keys is None:
keys = src_root["data"].keys()
for key in keys:
value = src_root["data"][key]
cks = cls._resolve_array_chunks(chunks=chunks, key=key, array=value)
cpr = cls._resolve_array_compressor(compressors=compressors, key=key, array=value)
if cks == value.chunks and cpr == value.compressor:
# copy without recompression
this_path = "/data/" + key
n_copied, n_skipped, n_bytes_copied = zarr.copy_store(
source=src_store,
dest=store,
source_path=this_path,
dest_path=this_path,
if_exists=if_exists,
)
else:
# copy with recompression
n_copied, n_skipped, n_bytes_copied = zarr.copy(
source=value,
dest=data_group,
name=key,
chunks=cks,
compressor=cpr,
if_exists=if_exists,
)
buffer = cls(root=root)
return buffer
@classmethod
def copy_from_path(
cls,
zarr_path,
backend=None,
store=None,
keys=None,
chunks: dict[str, tuple] | None = None,
compressors: dict | str | numcodecs.abc.Codec | None = None,
if_exists="replace",
**kwargs,
):
"""
Copy a on-disk zarr to in-memory compressed.
Recommended
"""
if chunks is None:
chunks = {}
if compressors is None:
compressors = {}
if backend == "numpy":
print("backend argument is deprecated!")
store = None
group = zarr.open(os.path.expanduser(zarr_path), "r")
return cls.copy_from_store(
src_store=group.store,
store=store,
keys=keys,
chunks=chunks,
compressors=compressors,
if_exists=if_exists,
**kwargs,
)
# ============= save methods ===============
def save_to_store(
self,
store,
chunks: dict[str, tuple] | None = None,
compressors: str | numcodecs.abc.Codec | dict | None = None,
if_exists="replace",
**kwargs,
):
root = zarr.group(store)
if chunks is None:
chunks = {}
if compressors is None:
compressors = {}
if self.backend == "zarr":
# recompression free copy
n_copied, n_skipped, n_bytes_copied = zarr.copy_store(
source=self.root.store,
dest=store,
source_path="/meta",
dest_path="/meta",
if_exists=if_exists,
)
else:
meta_group = root.create_group("meta", overwrite=True)
# save meta, no chunking
for key, value in self.root["meta"].items():
_ = meta_group.array(name=key, data=value, shape=value.shape, chunks=value.shape)
# save data, chunk
data_group = root.create_group("data", overwrite=True)
for key, value in self.root["data"].items():
cks = self._resolve_array_chunks(chunks=chunks, key=key, array=value)
cpr = self._resolve_array_compressor(compressors=compressors, key=key, array=value)
if isinstance(value, zarr.Array):
if cks == value.chunks and cpr == value.compressor:
# copy without recompression
this_path = "/data/" + key
n_copied, n_skipped, n_bytes_copied = zarr.copy_store(
source=self.root.store,
dest=store,
source_path=this_path,
dest_path=this_path,
if_exists=if_exists,
)
else:
# copy with recompression
n_copied, n_skipped, n_bytes_copied = zarr.copy(
source=value,
dest=data_group,
name=key,
chunks=cks,
compressor=cpr,
if_exists=if_exists,
)
else:
# numpy
_ = data_group.array(name=key, data=value, chunks=cks, compressor=cpr)
return store
def save_to_path(
self,
zarr_path,
chunks: dict[str, tuple] | None = None,
compressors: str | numcodecs.abc.Codec | dict | None = None,
if_exists="replace",
**kwargs,
):
if chunks is None:
chunks = {}
if compressors is None:
compressors = {}
store = zarr.DirectoryStore(os.path.expanduser(zarr_path))
return self.save_to_store(
store, chunks=chunks, compressors=compressors, if_exists=if_exists, **kwargs
)
@staticmethod
def resolve_compressor(compressor="default"):
if compressor == "default":
compressor = numcodecs.Blosc(cname="lz4", clevel=5, shuffle=numcodecs.Blosc.NOSHUFFLE)
elif compressor == "disk":
compressor = numcodecs.Blosc("zstd", clevel=5, shuffle=numcodecs.Blosc.BITSHUFFLE)
return compressor
@classmethod
def _resolve_array_compressor(cls, compressors: dict | str | numcodecs.abc.Codec, key, array):
# allows compressor to be explicitly set to None
cpr = "nil"
if isinstance(compressors, dict):
if key in compressors:
cpr = cls.resolve_compressor(compressors[key])
elif isinstance(array, zarr.Array):
cpr = array.compressor
else:
cpr = cls.resolve_compressor(compressors)
# backup default
if cpr == "nil":
cpr = cls.resolve_compressor("default")
return cpr
@classmethod
def _resolve_array_chunks(cls, chunks: dict | tuple, key, array):
cks = None
if isinstance(chunks, dict):
if key in chunks:
cks = chunks[key]
elif isinstance(array, zarr.Array):
cks = array.chunks
elif isinstance(chunks, tuple):
cks = chunks
else:
raise TypeError(f"Unsupported chunks type {type(chunks)}")
# backup default
if cks is None:
cks = get_optimal_chunks(shape=array.shape, dtype=array.dtype)
# check
check_chunks_compatible(chunks=cks, shape=array.shape)
return cks
# ============= properties =================
@cached_property
def data(self):
return self.root["data"]
@cached_property
def meta(self):
return self.root["meta"]
def update_meta(self, data):
# sanitize data
np_data = {}
for key, value in data.items():
if isinstance(value, np.ndarray):
np_data[key] = value
else:
arr = np.array(value)
if arr.dtype == object:
raise TypeError(f"Invalid value type {type(value)}")
np_data[key] = arr
meta_group = self.meta
if self.backend == "zarr":
for key, value in np_data.items():
_ = meta_group.array(
name=key, data=value, shape=value.shape, chunks=value.shape, overwrite=True
)
else:
meta_group.update(np_data)
return meta_group
@property
def episode_ends(self):
return self.meta["episode_ends"]
def get_episode_idxs(self):
import numba
numba.jit(nopython=True)
def _get_episode_idxs(episode_ends):
result = np.zeros((episode_ends[-1],), dtype=np.int64)
for i in range(len(episode_ends)):
start = 0
if i > 0:
start = episode_ends[i - 1]
end = episode_ends[i]
for idx in range(start, end):
result[idx] = i
return result
return _get_episode_idxs(self.episode_ends)
@property
def backend(self):
backend = "numpy"
if isinstance(self.root, zarr.Group):
backend = "zarr"
return backend
# =========== dict-like API ==============
def __repr__(self) -> str:
if self.backend == "zarr":
return str(self.root.tree())
else:
return super().__repr__()
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
def __getitem__(self, key):
return self.data[key]
def __contains__(self, key):
return key in self.data
# =========== our API ==============
@property
def n_steps(self):
if len(self.episode_ends) == 0:
return 0
return self.episode_ends[-1]
@property
def n_episodes(self):
return len(self.episode_ends)
@property
def chunk_size(self):
if self.backend == "zarr":
return next(iter(self.data.arrays()))[-1].chunks[0]
return None
@property
def episode_lengths(self):
ends = self.episode_ends[:]
ends = np.insert(ends, 0, 0)
lengths = np.diff(ends)
return lengths
def add_episode(
self,
data: dict[str, np.ndarray],
chunks: dict[str, tuple] | None = None,
compressors: str | numcodecs.abc.Codec | dict | None = None,
):
if chunks is None:
chunks = {}
if compressors is None:
compressors = {}
assert len(data) > 0
is_zarr = self.backend == "zarr"
curr_len = self.n_steps
episode_length = None
for value in data.values():
assert len(value.shape) >= 1
if episode_length is None:
episode_length = len(value)
else:
assert episode_length == len(value)
new_len = curr_len + episode_length
for key, value in data.items():
new_shape = (new_len,) + value.shape[1:]
# create array
if key not in self.data:
if is_zarr:
cks = self._resolve_array_chunks(chunks=chunks, key=key, array=value)
cpr = self._resolve_array_compressor(compressors=compressors, key=key, array=value)
arr = self.data.zeros(
name=key, shape=new_shape, chunks=cks, dtype=value.dtype, compressor=cpr
)
else:
# copy data to prevent modify
arr = np.zeros(shape=new_shape, dtype=value.dtype)
self.data[key] = arr
else:
arr = self.data[key]
assert value.shape[1:] == arr.shape[1:]
# same method for both zarr and numpy
if is_zarr:
arr.resize(new_shape)
else:
arr.resize(new_shape, refcheck=False)
# copy data
arr[-value.shape[0] :] = value
# append to episode ends
episode_ends = self.episode_ends
if is_zarr:
episode_ends.resize(episode_ends.shape[0] + 1)
else:
episode_ends.resize(episode_ends.shape[0] + 1, refcheck=False)
episode_ends[-1] = new_len
# rechunk
if is_zarr and episode_ends.chunks[0] < episode_ends.shape[0]:
rechunk_recompress_array(self.meta, "episode_ends", chunk_length=int(episode_ends.shape[0] * 1.5))
def drop_episode(self):
is_zarr = self.backend == "zarr"
episode_ends = self.episode_ends[:].copy()
assert len(episode_ends) > 0
start_idx = 0
if len(episode_ends) > 1:
start_idx = episode_ends[-2]
for value in self.data.values():
new_shape = (start_idx,) + value.shape[1:]
if is_zarr:
value.resize(new_shape)
else:
value.resize(new_shape, refcheck=False)
if is_zarr:
self.episode_ends.resize(len(episode_ends) - 1)
else:
self.episode_ends.resize(len(episode_ends) - 1, refcheck=False)
def pop_episode(self):
assert self.n_episodes > 0
episode = self.get_episode(self.n_episodes - 1, copy=True)
self.drop_episode()
return episode
def extend(self, data):
self.add_episode(data)
def get_episode(self, idx, copy=False):
idx = list(range(len(self.episode_ends)))[idx]
start_idx = 0
if idx > 0:
start_idx = self.episode_ends[idx - 1]
end_idx = self.episode_ends[idx]
result = self.get_steps_slice(start_idx, end_idx, copy=copy)
return result
def get_episode_slice(self, idx):
start_idx = 0
if idx > 0:
start_idx = self.episode_ends[idx - 1]
end_idx = self.episode_ends[idx]
return slice(start_idx, end_idx)
def get_steps_slice(self, start, stop, step=None, copy=False):
_slice = slice(start, stop, step)
result = {}
for key, value in self.data.items():
x = value[_slice]
if copy and isinstance(value, np.ndarray):
x = x.copy()
result[key] = x
return result
# =========== chunking =============
def get_chunks(self) -> dict:
assert self.backend == "zarr"
chunks = {}
for key, value in self.data.items():
chunks[key] = value.chunks
return chunks
def set_chunks(self, chunks: dict):
assert self.backend == "zarr"
for key, value in chunks.items():
if key in self.data:
arr = self.data[key]
if value != arr.chunks:
check_chunks_compatible(chunks=value, shape=arr.shape)
rechunk_recompress_array(self.data, key, chunks=value)
def get_compressors(self) -> dict:
assert self.backend == "zarr"
compressors = {}
for key, value in self.data.items():
compressors[key] = value.compressor
return compressors
def set_compressors(self, compressors: dict):
assert self.backend == "zarr"
for key, value in compressors.items():
if key in self.data:
arr = self.data[key]
compressor = self.resolve_compressor(value)
if compressor != arr.compressor:
rechunk_recompress_array(self.data, key, compressor=compressor)
|
lerobot/lerobot/common/datasets/push_dataset_to_hub/_diffusion_policy_replay_buffer.py/0
|
{
"file_path": "lerobot/lerobot/common/datasets/push_dataset_to_hub/_diffusion_policy_replay_buffer.py",
"repo_id": "lerobot",
"token_count": 10692
}
| 188
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterator, Union
import torch
class EpisodeAwareSampler:
def __init__(
self,
episode_data_index: dict,
episode_indices_to_use: Union[list, None] = None,
drop_n_first_frames: int = 0,
drop_n_last_frames: int = 0,
shuffle: bool = False,
):
"""Sampler that optionally incorporates episode boundary information.
Args:
episode_data_index: Dictionary with keys 'from' and 'to' containing the start and end indices of each episode.
episode_indices_to_use: List of episode indices to use. If None, all episodes are used.
Assumes that episodes are indexed from 0 to N-1.
drop_n_first_frames: Number of frames to drop from the start of each episode.
drop_n_last_frames: Number of frames to drop from the end of each episode.
shuffle: Whether to shuffle the indices.
"""
indices = []
for episode_idx, (start_index, end_index) in enumerate(
zip(episode_data_index["from"], episode_data_index["to"], strict=True)
):
if episode_indices_to_use is None or episode_idx in episode_indices_to_use:
indices.extend(
range(start_index.item() + drop_n_first_frames, end_index.item() - drop_n_last_frames)
)
self.indices = indices
self.shuffle = shuffle
def __iter__(self) -> Iterator[int]:
if self.shuffle:
for i in torch.randperm(len(self.indices)):
yield self.indices[i]
else:
for i in self.indices:
yield i
def __len__(self) -> int:
return len(self.indices)
|
lerobot/lerobot/common/datasets/sampler.py/0
|
{
"file_path": "lerobot/lerobot/common/datasets/sampler.py",
"repo_id": "lerobot",
"token_count": 952
}
| 189
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
def populate_queues(queues, batch):
for key in batch:
# Ignore keys not in the queues already (leaving the responsibility to the caller to make sure the
# queues have the keys they want).
if key not in queues:
continue
if len(queues[key]) != queues[key].maxlen:
# initialize by copying the first observation several times until the queue is full
while len(queues[key]) != queues[key].maxlen:
queues[key].append(batch[key])
else:
# add latest observation to the queue
queues[key].append(batch[key])
return queues
def get_device_from_parameters(module: nn.Module) -> torch.device:
"""Get a module's device by checking one of its parameters.
Note: assumes that all parameters have the same device
"""
return next(iter(module.parameters())).device
def get_dtype_from_parameters(module: nn.Module) -> torch.dtype:
"""Get a module's parameter dtype by checking one of its parameters.
Note: assumes that all parameters have the same dtype.
"""
return next(iter(module.parameters())).dtype
|
lerobot/lerobot/common/policies/utils.py/0
|
{
"file_path": "lerobot/lerobot/common/policies/utils.py",
"repo_id": "lerobot",
"token_count": 590
}
| 190
|
defaults:
- _self_
- env: pusht
- policy: diffusion
hydra:
run:
# Set `dir` to where you would like to save all of the run outputs. If you run another training session
# with the same value for `dir` its contents will be overwritten unless you set `resume` to true.
dir: outputs/train/${now:%Y-%m-%d}/${now:%H-%M-%S}_${env.name}_${policy.name}_${hydra.job.name}
job:
name: default
# Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure
# `hydra.run.dir` is the directory of an existing run with at least one checkpoint in it.
# Note that when resuming a run, the default behavior is to use the configuration from the checkpoint,
# regardless of what's provided with the training command at the time of resumption.
resume: false
device: cuda # cpu
# `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP,
# automatic gradient scaling is used.
use_amp: false
# `seed` is used for training (eg: model initialization, dataset shuffling)
# AND for the evaluation environments.
seed: ???
# You may provide a list of datasets here. `train.py` creates them all and concatenates them. Note: only data
# keys common between the datasets are kept. Each dataset gets and additional transform that inserts the
# "dataset_index" into the returned item. The index mapping is made according to the order in which the
# datsets are provided.
dataset_repo_id: lerobot/pusht
video_backend: pyav
training:
offline_steps: ???
# Number of workers for the offline training dataloader.
num_workers: 4
batch_size: ???
eval_freq: ???
log_freq: 200
save_checkpoint: true
# Checkpoint is saved every `save_freq` training iterations and after the last training step.
save_freq: ???
# Online training. Note that the online training loop adopts most of the options above apart from the
# dataloader options. Unless otherwise specified.
# The online training look looks something like:
#
# for i in range(online_steps):
# do_online_rollout_and_update_online_buffer()
# for j in range(online_steps_between_rollouts):
# batch = next(dataloader_with_offline_and_online_data)
# loss = policy(batch)
# loss.backward()
# optimizer.step()
#
online_steps: ???
# How many episodes to collect at once when we reach the online rollout part of the training loop.
online_rollout_n_episodes: 1
# The number of environments to use in the gym.vector.VectorEnv. This ends up also being the batch size for
# the policy. Ideally you should set this to by an even divisor or online_rollout_n_episodes.
online_rollout_batch_size: 1
# How many optimization steps (forward, backward, optimizer step) to do between running rollouts.
online_steps_between_rollouts: null
# The proportion of online samples (vs offline samples) to include in the online training batches.
online_sampling_ratio: 0.5
# First seed to use for the online rollout environment. Seeds for subsequent rollouts are incremented by 1.
online_env_seed: null
# Sets the maximum number of frames that are stored in the online buffer for online training. The buffer is
# FIFO.
online_buffer_capacity: null
# The minimum number of frames to have in the online buffer before commencing online training.
# If online_buffer_seed_size > online_rollout_n_episodes, the rollout will be run multiple times until the
# seed size condition is satisfied.
online_buffer_seed_size: 0
# Whether to run the online rollouts asynchronously. This means we can run the online training steps in
# parallel with the rollouts. This might be advised if your GPU has the bandwidth to handle training
# + eval + environment rendering simultaneously.
do_online_rollout_async: false
image_transforms:
# These transforms are all using standard torchvision.transforms.v2
# You can find out how these transformations affect images here:
# https://pytorch.org/vision/0.18/auto_examples/transforms/plot_transforms_illustrations.html
# We use a custom RandomSubsetApply container to sample them.
# For each transform, the following parameters are available:
# weight: This represents the multinomial probability (with no replacement)
# used for sampling the transform. If the sum of the weights is not 1,
# they will be normalized.
# min_max: Lower & upper bound respectively used for sampling the transform's parameter
# (following uniform distribution) when it's applied.
# Set this flag to `true` to enable transforms during training
enable: false
# This is the maximum number of transforms (sampled from these below) that will be applied to each frame.
# It's an integer in the interval [1, number of available transforms].
max_num_transforms: 3
# By default, transforms are applied in Torchvision's suggested order (shown below).
# Set this to True to apply them in a random order.
random_order: false
brightness:
weight: 1
min_max: [0.8, 1.2]
contrast:
weight: 1
min_max: [0.8, 1.2]
saturation:
weight: 1
min_max: [0.5, 1.5]
hue:
weight: 1
min_max: [-0.05, 0.05]
sharpness:
weight: 1
min_max: [0.8, 1.2]
eval:
n_episodes: 1
# `batch_size` specifies the number of environments to use in a gym.vector.VectorEnv.
batch_size: 1
# `use_async_envs` specifies whether to use asynchronous environments (multiprocessing).
use_async_envs: true
wandb:
enable: false
# Set to true to disable saving an artifact despite save_checkpoint == True
disable_artifact: false
project: lerobot
notes: ""
|
lerobot/lerobot/configs/default.yaml/0
|
{
"file_path": "lerobot/lerobot/configs/default.yaml",
"repo_id": "lerobot",
"token_count": 1759
}
| 191
|
_target_: lerobot.common.robot_devices.robots.koch.KochRobot
calibration_path: .cache/calibration/koch_bimanual.pkl
leader_arms:
left:
_target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
port: /dev/tty.usbmodem585A0085511
motors:
# name: (index, model)
shoulder_pan: [1, "xl330-m077"]
shoulder_lift: [2, "xl330-m077"]
elbow_flex: [3, "xl330-m077"]
wrist_flex: [4, "xl330-m077"]
wrist_roll: [5, "xl330-m077"]
gripper: [6, "xl330-m077"]
right:
_target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
port: /dev/tty.usbmodem575E0031751
motors:
# name: (index, model)
shoulder_pan: [1, "xl330-m077"]
shoulder_lift: [2, "xl330-m077"]
elbow_flex: [3, "xl330-m077"]
wrist_flex: [4, "xl330-m077"]
wrist_roll: [5, "xl330-m077"]
gripper: [6, "xl330-m077"]
follower_arms:
left:
_target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
port: /dev/tty.usbmodem585A0076891
motors:
# name: (index, model)
shoulder_pan: [1, "xl430-w250"]
shoulder_lift: [2, "xl430-w250"]
elbow_flex: [3, "xl330-m288"]
wrist_flex: [4, "xl330-m288"]
wrist_roll: [5, "xl330-m288"]
gripper: [6, "xl330-m288"]
right:
_target_: lerobot.common.robot_devices.motors.dynamixel.DynamixelMotorsBus
port: /dev/tty.usbmodem575E0032081
motors:
# name: (index, model)
shoulder_pan: [1, "xl430-w250"]
shoulder_lift: [2, "xl430-w250"]
elbow_flex: [3, "xl330-m288"]
wrist_flex: [4, "xl330-m288"]
wrist_roll: [5, "xl330-m288"]
gripper: [6, "xl330-m288"]
cameras:
laptop:
_target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
camera_index: 0
fps: 30
width: 640
height: 480
phone:
_target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
camera_index: 1
fps: 30
width: 640
height: 480
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
# the number of motors in your follower arms.
max_relative_target: null
# Sets the leader arm in torque mode with the gripper motor set to this angle. This makes it possible
# to squeeze the gripper and have it spring back to an open position on its own.
gripper_open_degree: 35.156
|
lerobot/lerobot/configs/robot/koch_bimanual.yaml/0
|
{
"file_path": "lerobot/lerobot/configs/robot/koch_bimanual.yaml",
"repo_id": "lerobot",
"token_count": 1135
}
| 192
|
version https://git-lfs.github.com/spec/v1
oid sha256:d394d451929b805f2d94f9fc5b12d15c31cfc494df76d7d642b63378b8ba0131
size 247
|
lerobot/tests/data/lerobot/aloha_mobile_elevator/train/state.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_mobile_elevator/train/state.json",
"repo_id": "lerobot",
"token_count": 66
}
| 193
|
version https://git-lfs.github.com/spec/v1
oid sha256:0a2c1f98c816728136291fcb7530cd0ebcf4ea47b0f6750836da56b8324d64c1
size 435600
|
lerobot/tests/data/lerobot/aloha_mobile_wash_pan/train/data-00000-of-00001.arrow/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_mobile_wash_pan/train/data-00000-of-00001.arrow",
"repo_id": "lerobot",
"token_count": 66
}
| 194
|
version https://git-lfs.github.com/spec/v1
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188
|
lerobot/tests/data/lerobot/aloha_sim_insertion_human/meta_data/info.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_sim_insertion_human/meta_data/info.json",
"repo_id": "lerobot",
"token_count": 68
}
| 195
|
version https://git-lfs.github.com/spec/v1
oid sha256:3948750400c273b7e3ef2998fa84ca7a520d8972c8759c2428ff6fbdc2bd8fb7
size 11505144
|
lerobot/tests/data/lerobot/aloha_sim_transfer_cube_scripted_image/train/data-00000-of-00001.arrow/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_sim_transfer_cube_scripted_image/train/data-00000-of-00001.arrow",
"repo_id": "lerobot",
"token_count": 66
}
| 196
|
version https://git-lfs.github.com/spec/v1
oid sha256:e03257da37ff6c6c37551bd2edf7bfaf832bbd9856d074978633a80cb32ee1d0
size 280536
|
lerobot/tests/data/lerobot/aloha_static_candy/train/data-00000-of-00001.arrow/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_static_candy/train/data-00000-of-00001.arrow",
"repo_id": "lerobot",
"token_count": 65
}
| 197
|
version https://git-lfs.github.com/spec/v1
oid sha256:8d97986c8775f069c508fee38129b9707c6ad8bf44dda7de54829e3685c1c0cb
size 247
|
lerobot/tests/data/lerobot/aloha_static_cups_open/train/state.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_static_cups_open/train/state.json",
"repo_id": "lerobot",
"token_count": 65
}
| 198
|
version https://git-lfs.github.com/spec/v1
oid sha256:3ee1bdb3085dc4554f58ccad0db48ca9ad153d866faa4b2ba42afebb3e3206b6
size 280536
|
lerobot/tests/data/lerobot/aloha_static_tape/train/data-00000-of-00001.arrow/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_static_tape/train/data-00000-of-00001.arrow",
"repo_id": "lerobot",
"token_count": 66
}
| 199
|
version https://git-lfs.github.com/spec/v1
oid sha256:6ab094bd9de1a90273cefa1e02a84b3c3916e7812b9c73932f8f92ab2f1b9ba9
size 3432
|
lerobot/tests/data/lerobot/pusht_image/meta_data/episode_data_index.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/pusht_image/meta_data/episode_data_index.safetensors",
"repo_id": "lerobot",
"token_count": 71
}
| 200
|
version https://git-lfs.github.com/spec/v1
oid sha256:ffb9f0f0c5001460cfe1d49ba84ad2722be5ee7885de97dd733cf9b36104a702
size 1245
|
lerobot/tests/data/lerobot/umi_cup_in_the_wild/train/dataset_info.json/0
|
{
"file_path": "lerobot/tests/data/lerobot/umi_cup_in_the_wild/train/dataset_info.json",
"repo_id": "lerobot",
"token_count": 64
}
| 201
|
version https://git-lfs.github.com/spec/v1
oid sha256:1d6f6342fa2e42703d1584c19e018c2f1254d24beb15a5ac54623c72e6e45e28
size 136
|
lerobot/tests/data/lerobot/xarm_lift_medium_replay/meta_data/episode_data_index.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/xarm_lift_medium_replay/meta_data/episode_data_index.safetensors",
"repo_id": "lerobot",
"token_count": 67
}
| 202
|
version https://git-lfs.github.com/spec/v1
oid sha256:791cbb9702e4b7313939600d64658c51e929c4e6d3945ed65fae25e0f9c27484
size 4928
|
lerobot/tests/data/lerobot/xarm_push_medium/train/data-00000-of-00001.arrow/0
|
{
"file_path": "lerobot/tests/data/lerobot/xarm_push_medium/train/data-00000-of-00001.arrow",
"repo_id": "lerobot",
"token_count": 67
}
| 203
|
version https://git-lfs.github.com/spec/v1
oid sha256:c8d9f9cc9e232820760fe4a46b47000c921fa5d868420e55d8dbc05dae56e8bd
size 111338
|
lerobot/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_160.safetensors/0
|
{
"file_path": "lerobot/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_160.safetensors",
"repo_id": "lerobot",
"token_count": 69
}
| 204
|
version https://git-lfs.github.com/spec/v1
oid sha256:d796577863740e8fd643a056e9eff891e51a858ff66019eba11f0a982cb9e9c0
size 16904
|
lerobot/tests/data/save_policy_to_safetensors/xarm_tdmpcuse_mpc/grad_stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/save_policy_to_safetensors/xarm_tdmpcuse_mpc/grad_stats.safetensors",
"repo_id": "lerobot",
"token_count": 68
}
| 205
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import numpy as np
import pytest
import torch
from PIL import Image
from safetensors.torch import load_file
from torchvision.transforms import v2
from torchvision.transforms.v2 import functional as F # noqa: N812
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.datasets.transforms import RandomSubsetApply, SharpnessJitter, get_image_transforms
from lerobot.common.utils.utils import init_hydra_config, seeded_context
from lerobot.scripts.visualize_image_transforms import visualize_transforms
from tests.utils import DEFAULT_CONFIG_PATH, require_x86_64_kernel
ARTIFACT_DIR = Path("tests/data/save_image_transforms_to_safetensors")
DATASET_REPO_ID = "lerobot/aloha_mobile_shrimp"
def load_png_to_tensor(path: Path):
return torch.from_numpy(np.array(Image.open(path).convert("RGB"))).permute(2, 0, 1)
@pytest.fixture
def img():
dataset = LeRobotDataset(DATASET_REPO_ID)
return dataset[0][dataset.camera_keys[0]]
@pytest.fixture
def img_random():
return torch.rand(3, 480, 640)
@pytest.fixture
def color_jitters():
return [
v2.ColorJitter(brightness=0.5),
v2.ColorJitter(contrast=0.5),
v2.ColorJitter(saturation=0.5),
]
@pytest.fixture
def single_transforms():
return load_file(ARTIFACT_DIR / "single_transforms.safetensors")
@pytest.fixture
def default_transforms():
return load_file(ARTIFACT_DIR / "default_transforms.safetensors")
def test_get_image_transforms_no_transform(img):
tf_actual = get_image_transforms(brightness_min_max=(0.5, 0.5), max_num_transforms=0)
torch.testing.assert_close(tf_actual(img), img)
@pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)])
def test_get_image_transforms_brightness(img, min_max):
tf_actual = get_image_transforms(brightness_weight=1.0, brightness_min_max=min_max)
tf_expected = v2.ColorJitter(brightness=min_max)
torch.testing.assert_close(tf_actual(img), tf_expected(img))
@pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)])
def test_get_image_transforms_contrast(img, min_max):
tf_actual = get_image_transforms(contrast_weight=1.0, contrast_min_max=min_max)
tf_expected = v2.ColorJitter(contrast=min_max)
torch.testing.assert_close(tf_actual(img), tf_expected(img))
@pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)])
def test_get_image_transforms_saturation(img, min_max):
tf_actual = get_image_transforms(saturation_weight=1.0, saturation_min_max=min_max)
tf_expected = v2.ColorJitter(saturation=min_max)
torch.testing.assert_close(tf_actual(img), tf_expected(img))
@pytest.mark.parametrize("min_max", [(-0.25, -0.25), (0.25, 0.25)])
def test_get_image_transforms_hue(img, min_max):
tf_actual = get_image_transforms(hue_weight=1.0, hue_min_max=min_max)
tf_expected = v2.ColorJitter(hue=min_max)
torch.testing.assert_close(tf_actual(img), tf_expected(img))
@pytest.mark.parametrize("min_max", [(0.5, 0.5), (2.0, 2.0)])
def test_get_image_transforms_sharpness(img, min_max):
tf_actual = get_image_transforms(sharpness_weight=1.0, sharpness_min_max=min_max)
tf_expected = SharpnessJitter(sharpness=min_max)
torch.testing.assert_close(tf_actual(img), tf_expected(img))
def test_get_image_transforms_max_num_transforms(img):
tf_actual = get_image_transforms(
brightness_min_max=(0.5, 0.5),
contrast_min_max=(0.5, 0.5),
saturation_min_max=(0.5, 0.5),
hue_min_max=(0.5, 0.5),
sharpness_min_max=(0.5, 0.5),
random_order=False,
)
tf_expected = v2.Compose(
[
v2.ColorJitter(brightness=(0.5, 0.5)),
v2.ColorJitter(contrast=(0.5, 0.5)),
v2.ColorJitter(saturation=(0.5, 0.5)),
v2.ColorJitter(hue=(0.5, 0.5)),
SharpnessJitter(sharpness=(0.5, 0.5)),
]
)
torch.testing.assert_close(tf_actual(img), tf_expected(img))
@require_x86_64_kernel
def test_get_image_transforms_random_order(img):
out_imgs = []
tf = get_image_transforms(
brightness_min_max=(0.5, 0.5),
contrast_min_max=(0.5, 0.5),
saturation_min_max=(0.5, 0.5),
hue_min_max=(0.5, 0.5),
sharpness_min_max=(0.5, 0.5),
random_order=True,
)
with seeded_context(1337):
for _ in range(10):
out_imgs.append(tf(img))
for i in range(1, len(out_imgs)):
with pytest.raises(AssertionError):
torch.testing.assert_close(out_imgs[0], out_imgs[i])
@pytest.mark.parametrize(
"transform, min_max_values",
[
("brightness", [(0.5, 0.5), (2.0, 2.0)]),
("contrast", [(0.5, 0.5), (2.0, 2.0)]),
("saturation", [(0.5, 0.5), (2.0, 2.0)]),
("hue", [(-0.25, -0.25), (0.25, 0.25)]),
("sharpness", [(0.5, 0.5), (2.0, 2.0)]),
],
)
def test_backward_compatibility_torchvision(transform, min_max_values, img, single_transforms):
for min_max in min_max_values:
kwargs = {
f"{transform}_weight": 1.0,
f"{transform}_min_max": min_max,
}
tf = get_image_transforms(**kwargs)
actual = tf(img)
key = f"{transform}_{min_max[0]}_{min_max[1]}"
expected = single_transforms[key]
torch.testing.assert_close(actual, expected)
@require_x86_64_kernel
def test_backward_compatibility_default_config(img, default_transforms):
cfg = init_hydra_config(DEFAULT_CONFIG_PATH)
cfg_tf = cfg.training.image_transforms
default_tf = get_image_transforms(
brightness_weight=cfg_tf.brightness.weight,
brightness_min_max=cfg_tf.brightness.min_max,
contrast_weight=cfg_tf.contrast.weight,
contrast_min_max=cfg_tf.contrast.min_max,
saturation_weight=cfg_tf.saturation.weight,
saturation_min_max=cfg_tf.saturation.min_max,
hue_weight=cfg_tf.hue.weight,
hue_min_max=cfg_tf.hue.min_max,
sharpness_weight=cfg_tf.sharpness.weight,
sharpness_min_max=cfg_tf.sharpness.min_max,
max_num_transforms=cfg_tf.max_num_transforms,
random_order=cfg_tf.random_order,
)
with seeded_context(1337):
actual = default_tf(img)
expected = default_transforms["default"]
torch.testing.assert_close(actual, expected)
@pytest.mark.parametrize("p", [[0, 1], [1, 0]])
def test_random_subset_apply_single_choice(p, img):
flips = [v2.RandomHorizontalFlip(p=1), v2.RandomVerticalFlip(p=1)]
random_choice = RandomSubsetApply(flips, p=p, n_subset=1, random_order=False)
actual = random_choice(img)
p_horz, _ = p
if p_horz:
torch.testing.assert_close(actual, F.horizontal_flip(img))
else:
torch.testing.assert_close(actual, F.vertical_flip(img))
def test_random_subset_apply_random_order(img):
flips = [v2.RandomHorizontalFlip(p=1), v2.RandomVerticalFlip(p=1)]
random_order = RandomSubsetApply(flips, p=[0.5, 0.5], n_subset=2, random_order=True)
# We can't really check whether the transforms are actually applied in random order. However,
# horizontal and vertical flip are commutative. Meaning, even under the assumption that the transform
# applies them in random order, we can use a fixed order to compute the expected value.
actual = random_order(img)
expected = v2.Compose(flips)(img)
torch.testing.assert_close(actual, expected)
def test_random_subset_apply_valid_transforms(color_jitters, img):
transform = RandomSubsetApply(color_jitters)
output = transform(img)
assert output.shape == img.shape
def test_random_subset_apply_probability_length_mismatch(color_jitters):
with pytest.raises(ValueError):
RandomSubsetApply(color_jitters, p=[0.5, 0.5])
@pytest.mark.parametrize("n_subset", [0, 5])
def test_random_subset_apply_invalid_n_subset(color_jitters, n_subset):
with pytest.raises(ValueError):
RandomSubsetApply(color_jitters, n_subset=n_subset)
def test_sharpness_jitter_valid_range_tuple(img):
tf = SharpnessJitter((0.1, 2.0))
output = tf(img)
assert output.shape == img.shape
def test_sharpness_jitter_valid_range_float(img):
tf = SharpnessJitter(0.5)
output = tf(img)
assert output.shape == img.shape
def test_sharpness_jitter_invalid_range_min_negative():
with pytest.raises(ValueError):
SharpnessJitter((-0.1, 2.0))
def test_sharpness_jitter_invalid_range_max_smaller():
with pytest.raises(ValueError):
SharpnessJitter((2.0, 0.1))
@pytest.mark.parametrize(
"repo_id, n_examples",
[
("lerobot/aloha_sim_transfer_cube_human", 3),
],
)
def test_visualize_image_transforms(repo_id, n_examples):
cfg = init_hydra_config(DEFAULT_CONFIG_PATH, overrides=[f"dataset_repo_id={repo_id}"])
output_dir = Path(__file__).parent / "outputs" / "image_transforms"
visualize_transforms(cfg, output_dir=output_dir, n_examples=n_examples)
output_dir = output_dir / repo_id.split("/")[-1]
# Check if the original frame image exists
assert (output_dir / "original_frame.png").exists(), "Original frame image was not saved."
# Check if the transformed images exist for each transform type
transforms = ["brightness", "contrast", "saturation", "hue", "sharpness"]
for transform in transforms:
transform_dir = output_dir / transform
assert transform_dir.exists(), f"{transform} directory was not created."
assert any(transform_dir.iterdir()), f"No transformed images found in {transform} directory."
# Check for specific files within each transform directory
expected_files = [f"{i}.png" for i in range(1, n_examples + 1)] + ["min.png", "max.png", "mean.png"]
for file_name in expected_files:
assert (
transform_dir / file_name
).exists(), f"{file_name} was not found in {transform} directory."
# Check if the combined transforms directory exists and contains the right files
combined_transforms_dir = output_dir / "all"
assert combined_transforms_dir.exists(), "Combined transforms directory was not created."
assert any(
combined_transforms_dir.iterdir()
), "No transformed images found in combined transforms directory."
for i in range(1, n_examples + 1):
assert (
combined_transforms_dir / f"{i}.png"
).exists(), f"Combined transform image {i}.png was not found."
|
lerobot/tests/test_image_transforms.py/0
|
{
"file_path": "lerobot/tests/test_image_transforms.py",
"repo_id": "lerobot",
"token_count": 4575
}
| 206
|
from transformers import AutoFeatureExtractor, AutoTokenizer
from parler_tts import ParlerTTSForConditionalGeneration
path = "TODO"
repo_id = "parler_tts_600M"
AutoFeatureExtractor.from_pretrained("ylacombe/dac_44khZ_8kbps").push_to_hub(repo_id)
AutoTokenizer.from_pretrained("google/t5-v1_1-base").push_to_hub(repo_id)
ParlerTTSForConditionalGeneration.from_pretrained(path).push_to_hub(repo_id)
|
parler-tts/helpers/push_to_hub_scripts/push_trained_parler_tts_to_hub.py/0
|
{
"file_path": "parler-tts/helpers/push_to_hub_scripts/push_trained_parler_tts_to_hub.py",
"repo_id": "parler-tts",
"token_count": 158
}
| 207
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import Seq2SeqTrainingArguments
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
feature_extractor_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained feature extractor name or path if not the same as model_name"}
)
description_tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained description tokenizer name or path if not the same as model_name"}
)
prompt_tokenizer_name: Optional[str] = field(
default=None,
metadata={"help": "Pretrained prompt tokenizer name or path if not the same as description_tokenizer_name"},
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
pad_token_id: int = field(
default=None,
metadata={"help": "If specified, change the model pad token id."},
)
decoder_start_token_id: int = field(
default=None,
metadata={"help": "If specified, change the model decoder start token id."},
)
freeze_text_encoder: bool = field(
default=False,
metadata={"help": "Whether to freeze the text encoder."},
)
do_sample: bool = field(
default=True,
metadata={"help": "Whether to do sampling or greedy decoding."},
)
temperature: float = field(
default=1.0,
metadata={"help": "Temperature if sampling."},
)
max_length: int = field(
default=2580,
metadata={"help": "Generation max length."},
)
bandwidth: float = field(
default=6,
metadata={"help": "Audio encoder bandwidth."},
)
asr_model_name_or_path: str = field(
default="distil-whisper/distil-large-v2",
metadata={
"help": "Used to compute WER during evaluation. Path to pretrained model or model identifier from huggingface.co/models"
},
)
clap_model_name_or_path: str = field(
default="laion/larger_clap_music_and_speech",
metadata={
"help": "Used to compute audio similarity during evaluation. Path to pretrained model or model identifier from huggingface.co/models"
},
)
attn_implementation: str = field(
default="eager",
metadata={"help": "Attention implementation used. One of `eager`, `sdpa`, `flash_attention_2`"},
)
cross_attention_implementation_strategy: str = field(
default=None,
metadata={
"help": "If not specified, the cross-attention implementation will be the same as `_attn_implementation`. If `always_eager`, it will always be the eager implementation. If `always_sdpa`, it will always be the sdpa implementation."
},
)
prompt_padding_side: Optional[str] = field(
default="left",
metadata={
"help": "Prompt tokenizer padding side. Defaults to `left`. If the prompt is pre-pended to the codebooks hidden states, it should be padded on the left."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
train_dataset_name: str = field(
default=None,
metadata={
"help": "The name of the training dataset to use (via the datasets library). Load and combine "
"multiple datasets by separating dataset ids by a '+' symbol. For example, to load and combine "
" librispeech and common voice, set `train_dataset_name='librispeech_asr+common_voice'`."
},
)
train_dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": "The configuration name of the training dataset to use (via the datasets library). Load and combine "
"multiple datasets by separating dataset configs by a '+' symbol."
},
)
train_split_name: str = field(
default="train",
metadata={
"help": ("The name of the training data set split to use (via the datasets library). Defaults to 'train'")
},
)
train_dataset_samples: str = field(
default=None,
metadata={
"help": "Number of samples in the training data. Load and combine "
"multiple datasets by separating dataset samples by a '+' symbol."
},
)
train_metadata_dataset_name: str = field(
default=None,
metadata={
"help": "The name of the metadata training dataset to use (via the datasets library). Load and combine "
"multiple datasets by separating dataset ids by a '+' symbol. For example, to load and combine "
" librispeech and common voice, set `train_dataset_name='librispeech_asr+common_voice'`."
},
)
eval_dataset_name: str = field(
default=None,
metadata={
"help": "The name of the evaluation dataset to use (via the datasets library). Defaults to the training dataset name if unspecified."
},
)
eval_dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": "The configuration name of the evaluation dataset to use (via the datasets library). Defaults to the training dataset config name if unspecified"
},
)
eval_split_name: str = field(
default="test",
metadata={
"help": "The name of the evaluation data set split to use (via the datasets library). Defaults to 'test'"
},
)
eval_metadata_dataset_name: str = field(
default=None,
metadata={
"help": "The name of the metadata training dataset to use (via the datasets library). Load and combine "
"multiple datasets by separating dataset ids by a '+' symbol. For example, to load and combine "
" librispeech and common voice, set `train_dataset_name='librispeech_asr+common_voice'`."
},
)
target_audio_column_name: str = field(
default="audio",
metadata={"help": "The name of the dataset column containing the target audio data. Defaults to 'audio'"},
)
description_column_name: str = field(
default=None,
metadata={"help": "The name of the dataset column containing the description text data. Defaults to 'None'."},
)
prompt_column_name: str = field(
default=None,
metadata={"help": "The name of the dataset column containing the prompt text data. Defaults to 'None'."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
},
)
max_duration_in_seconds: float = field(
default=35.0,
metadata={
"help": (
"Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`."
"Also, used to set maximum audio length if `pad_to_max_length=True`."
)
},
)
min_duration_in_seconds: float = field(
default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
)
max_text_length: int = field(
default=500, metadata={"help": "If set, max description lengths in number of characters."}
)
max_prompt_token_length: int = field(
default=None,
metadata={
"help": (
"If set, filter samples with prompts that are longer than `max_prompt_token_length` tokens."
"Also, used to set maximum prompt token length if `pad_to_max_length=True`."
)
},
)
max_description_token_length: int = field(
default=None,
metadata={
"help": (
"If set, filter samples with descriptions that are longer than `max_description_token_length` tokens."
"Also, used to set maximum description token length if `pad_to_max_length=True`."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"If `True`, pad audio, prompt and description to a maximum length set with respectively "
"`max_duration_in_seconds`, `max_prompt_token_length`, `max_description_token_length`."
)
},
)
preprocessing_only: bool = field(
default=False,
metadata={
"help": (
"Whether to only do data preprocessing and skip training. This is especially useful when data"
" preprocessing errors out in distributed training due to timeout. In this case, one should run the"
" preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets"
" can consequently be loaded in distributed training."
" In this training script, `save_to_disk` must be set to the path in which the dataset should be saved. "
)
},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `huggingface-cli login` (stored in `~/.huggingface`)."
)
},
)
use_auth_token: bool = field(
default=None,
metadata={
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead."
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option "
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
"execute code present on the Hub on your local machine."
)
},
)
add_audio_samples_to_wandb: bool = field(
default=False,
metadata={"help": "If set and if `wandb` in args.report_to, will add generated audio samples to wandb logs."},
)
id_column_name: str = field(default=None, metadata={"help": "id column name."})
wandb_project: str = field(
default="parler-speech",
metadata={"help": "The name of the wandb project."},
)
wandb_run_name: str = field(
default=None,
metadata={
"help": "If specified, the name of the run. If not specified, wandb will give a random name to this run."
},
)
save_to_disk: str = field(
default=None,
metadata={
"help": "If set, will save the dataset to this path if this is an empyt folder. If not empty, will load the datasets from it."
},
)
temporary_save_to_disk: str = field(default=None, metadata={"help": "Temporarily save audio labels here."})
save_codec_steps: Optional[int] = field(
default=500,
metadata={"help": "Temporarily save the audio labels every `save_steps`."},
)
pad_to_multiple_of: Optional[int] = field(
default=2,
metadata={"help": ("Pad to multiple of for tokenizers.")},
)
@dataclass
class ParlerTTSTrainingArguments(Seq2SeqTrainingArguments):
dtype: Optional[str] = field(
default="float32",
metadata={
"help": (
"The data type (dtype) in which to run training. One of `float32` (full-precision), "
"`float16` or `bfloat16` (both half-precision)."
)
},
)
audio_encoder_per_device_batch_size: int = field(
default=8,
metadata={"help": ("Specify the batch size of the audio encoding pre-processing steps.")},
)
eval_dataloader_num_workers: Optional[int] = field(
default=0,
metadata={
"help": (
"Number of subprocesses to use for evaluation data loading (PyTorch only). 0 means that the data will be loaded in the main process."
)
},
)
compute_clap_similarity_metric: bool = field(
default=True,
metadata={
"help": (
"Whether or not to compute the clap similarity metric between the description and the generation during evalution."
)
},
)
compute_noise_level_metric: bool = field(
default=True,
metadata={"help": ("Whether or not to compute the squim si-sdr measure of the generations.")},
)
noise_level_to_compute_clean_wer: float = field(
default=25,
metadata={
"help": (
"if `compute_noise_level_metric=True`, will compute a 'clean' WER on samples with generated noise higher than `noise_level_to_compute_clean_wer`."
"This is a proxy measure to compute WER on clean audios, provided that the model learn to generate clean audios."
)
},
)
|
parler-tts/training/arguments.py/0
|
{
"file_path": "parler-tts/training/arguments.py",
"repo_id": "parler-tts",
"token_count": 5930
}
| 208
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# IA3
This conceptual guide gives a brief overview of [IA3](https://arxiv.org/abs/2205.05638), a parameter-efficient fine tuning technique that is
intended to improve over [LoRA](./lora).
To make fine-tuning more efficient, IA3 (Infused Adapter by Inhibiting and Amplifying Inner Activations)
rescales inner activations with learned vectors. These learned vectors are injected in the attention and feedforward modules
in a typical transformer-based architecture. These learned vectors are the only trainable parameters during fine-tuning, and thus the original
weights remain frozen. Dealing with learned vectors (as opposed to learned low-rank updates to a weight matrix like LoRA)
keeps the number of trainable parameters much smaller.
Being similar to LoRA, IA3 carries many of the same advantages:
* IA3 makes fine-tuning more efficient by drastically reducing the number of trainable parameters. (For T0, an IA3 model only has about 0.01% trainable parameters, while even LoRA has > 0.1%)
* The original pre-trained weights are kept frozen, which means you can have multiple lightweight and portable IA3 models for various downstream tasks built on top of them.
* Performance of models fine-tuned using IA3 is comparable to the performance of fully fine-tuned models.
* IA3 does not add any inference latency because adapter weights can be merged with the base model.
In principle, IA3 can be applied to any subset of weight matrices in a neural network to reduce the number of trainable
parameters. Following the authors' implementation, IA3 weights are added to the key, value and feedforward layers
of a Transformer model. To be specific, for transformer models, IA3 weights are added to the outputs of key and value layers, and to the input of the second feedforward layer
in each transformer block.
Given the target layers for injecting IA3 parameters, the number of trainable parameters
can be determined based on the size of the weight matrices.
## Common IA3 parameters in PEFT
As with other methods supported by PEFT, to fine-tune a model using IA3, you need to:
1. Instantiate a base model.
2. Create a configuration (`IA3Config`) where you define IA3-specific parameters.
3. Wrap the base model with `get_peft_model()` to get a trainable `PeftModel`.
4. Train the `PeftModel` as you normally would train the base model.
`IA3Config` allows you to control how IA3 is applied to the base model through the following parameters:
- `target_modules`: The modules (for example, attention blocks) to apply the IA3 vectors.
- `feedforward_modules`: The list of modules to be treated as feedforward layers in `target_modules`. While learned vectors are multiplied with
the output activation for attention blocks, the vectors are multiplied with the input for classic feedforward layers. Note that `feedforward_modules` must be a subset of `target_modules`.
- `modules_to_save`: List of modules apart from IA3 layers to be set as trainable and saved in the final checkpoint. These typically include model's custom head that is randomly initialized for the fine-tuning task.
## Example Usage
For the task of sequence classification, one can initialize the IA3 config for a Llama model as follows:
```py
peft_config = IA3Config(
task_type=TaskType.SEQ_CLS, target_modules=["k_proj", "v_proj", "down_proj"], feedforward_modules=["down_proj"]
)
```
|
peft/docs/source/conceptual_guides/ia3.md/0
|
{
"file_path": "peft/docs/source/conceptual_guides/ia3.md",
"repo_id": "peft",
"token_count": 1030
}
| 209
|
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Models
[`PeftModel`] is the base model class for specifying the base Transformer model and configuration to apply a PEFT method to. The base `PeftModel` contains methods for loading and saving models from the Hub.
## PeftModel
[[autodoc]] PeftModel
- all
## PeftModelForSequenceClassification
A `PeftModel` for sequence classification tasks.
[[autodoc]] PeftModelForSequenceClassification
- all
## PeftModelForTokenClassification
A `PeftModel` for token classification tasks.
[[autodoc]] PeftModelForTokenClassification
- all
## PeftModelForCausalLM
A `PeftModel` for causal language modeling.
[[autodoc]] PeftModelForCausalLM
- all
## PeftModelForSeq2SeqLM
A `PeftModel` for sequence-to-sequence language modeling.
[[autodoc]] PeftModelForSeq2SeqLM
- all
## PeftModelForQuestionAnswering
A `PeftModel` for question answering.
[[autodoc]] PeftModelForQuestionAnswering
- all
## PeftModelForFeatureExtraction
A `PeftModel` for getting extracting features/embeddings from transformer models.
[[autodoc]] PeftModelForFeatureExtraction
- all
## PeftMixedModel
A `PeftModel` for mixing different adapter types (e.g. LoRA and LoHa).
[[autodoc]] PeftMixedModel
- all
## Utilities
[[autodoc]] utils.cast_mixed_precision_params
[[autodoc]] get_peft_model
[[autodoc]] inject_adapter_in_model
[[autodoc]] utils.get_peft_model_state_dict
[[autodoc]] utils.prepare_model_for_kbit_training
[[autodoc]] get_layer_status
[[autodoc]] get_model_status
|
peft/docs/source/package_reference/peft_model.md/0
|
{
"file_path": "peft/docs/source/package_reference/peft_model.md",
"repo_id": "peft",
"token_count": 564
}
| 210
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The implementation is based on "Parameter-Efficient Orthogonal Finetuning
# via Butterfly Factorization" (https://arxiv.org/abs/2311.06243) in ICLR 2024.
import glob
import os
from pathlib import Path
import cv2
import face_alignment
import numpy as np
import torch
from accelerate import Accelerator
from skimage.io import imread
from torchvision.utils import save_image
from tqdm import tqdm
from transformers import AutoTokenizer
from utils.args_loader import parse_args
from utils.dataset import make_dataset
detect_model = face_alignment.FaceAlignment(face_alignment.LandmarksType.TWO_D, device="cuda:0", flip_input=False)
# with open('./data/celebhq-text/prompt_val_blip_full.json', 'rt') as f: # fill50k, COCO
# for line in f:
# val_data = json.loads(line)
end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype=np.int32) - 1
def count_txt_files(directory):
pattern = os.path.join(directory, "*.txt")
txt_files = glob.glob(pattern)
return len(txt_files)
def plot_kpts(image, kpts, color="g"):
"""Draw 68 key points
Args:
image: the input image
kpt: (68, 3).
"""
if color == "r":
c = (255, 0, 0)
elif color == "g":
c = (0, 255, 0)
elif color == "b":
c = (255, 0, 0)
image = image.copy()
kpts = kpts.copy()
radius = max(int(min(image.shape[0], image.shape[1]) / 200), 1)
for i in range(kpts.shape[0]):
st = kpts[i, :2]
if kpts.shape[1] == 4:
if kpts[i, 3] > 0.5:
c = (0, 255, 0)
else:
c = (0, 0, 255)
image = cv2.circle(image, (int(st[0]), int(st[1])), radius, c, radius * 2)
if i in end_list:
continue
ed = kpts[i + 1, :2]
image = cv2.line(image, (int(st[0]), int(st[1])), (int(ed[0]), int(ed[1])), (255, 255, 255), radius)
return image
def generate_landmark2d(dataset, input_dir, pred_lmk_dir, gt_lmk_dir, vis=False):
print("Generate 2d landmarks ...")
os.makedirs(pred_lmk_dir, exist_ok=True)
imagepath_list = sorted(glob.glob(f"{input_dir}/pred*.png"))
for imagepath in tqdm(imagepath_list):
name = Path(imagepath).stem
idx = int(name.split("_")[-1])
pred_txt_path = os.path.join(pred_lmk_dir, f"{idx}.txt")
gt_lmk_path = os.path.join(gt_lmk_dir, f"{idx}_gt_lmk.jpg")
gt_txt_path = os.path.join(gt_lmk_dir, f"{idx}.txt")
gt_img_path = os.path.join(gt_lmk_dir, f"{idx}_gt_img.jpg")
if (not os.path.exists(pred_txt_path)) or (not os.path.exists(gt_txt_path)):
image = imread(imagepath) # [:, :, :3]
out = detect_model.get_landmarks(image)
if out is None:
continue
pred_kpt = out[0].squeeze()
np.savetxt(pred_txt_path, pred_kpt)
# Your existing code for obtaining the image tensor
gt_lmk_img = dataset[idx]["conditioning_pixel_values"]
save_image(gt_lmk_img, gt_lmk_path)
gt_img = (dataset[idx]["pixel_values"]) * 0.5 + 0.5
save_image(gt_img, gt_img_path)
gt_img = (gt_img.permute(1, 2, 0) * 255).type(torch.uint8).cpu().numpy()
out = detect_model.get_landmarks(gt_img)
if out is None:
continue
gt_kpt = out[0].squeeze()
np.savetxt(gt_txt_path, gt_kpt)
# gt_image = cv2.resize(cv2.imread(gt_lmk_path), (512, 512))
if vis:
gt_lmk_image = cv2.imread(gt_lmk_path)
# visualize predicted landmarks
vis_path = os.path.join(pred_lmk_dir, f"{idx}_overlay.jpg")
image = cv2.imread(imagepath)
image_point = plot_kpts(image, pred_kpt)
cv2.imwrite(vis_path, np.concatenate([image_point, gt_lmk_image], axis=1))
# visualize gt landmarks
vis_path = os.path.join(gt_lmk_dir, f"{idx}_overlay.jpg")
image = cv2.imread(gt_img_path)
image_point = plot_kpts(image, gt_kpt)
cv2.imwrite(vis_path, np.concatenate([image_point, gt_lmk_image], axis=1))
def landmark_comparison(val_dataset, lmk_dir, gt_lmk_dir):
print("Calculating reprojection error")
lmk_err = []
pbar = tqdm(range(len(val_dataset)))
for i in pbar:
# line = val_dataset[i]
# img_name = line["image"].split(".")[0]
lmk1_path = os.path.join(gt_lmk_dir, f"{i}.txt")
lmk1 = np.loadtxt(lmk1_path)
lmk2_path = os.path.join(lmk_dir, f"{i}.txt")
if not os.path.exists(lmk2_path):
print(f"{lmk2_path} not exist")
continue
lmk2 = np.loadtxt(lmk2_path)
lmk_err.append(np.mean(np.linalg.norm(lmk1 - lmk2, axis=1)))
pbar.set_description(f"lmk_err: {np.mean(lmk_err):.5f}")
print("Reprojection error:", np.mean(lmk_err))
np.save(os.path.join(lmk_dir, "lmk_err.npy"), lmk_err)
def main(args):
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_dir=logging_dir,
)
# Load the tokenizer
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
elif args.pretrained_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer",
revision=args.revision,
use_fast=False,
)
val_dataset = make_dataset(args, tokenizer, accelerator, "test")
gt_lmk_dir = os.path.join(args.output_dir, "gt_lmk")
if not os.path.exists(gt_lmk_dir):
os.makedirs(gt_lmk_dir, exist_ok=True)
pred_lmk_dir = os.path.join(args.output_dir, "pred_lmk")
if not os.path.exists(pred_lmk_dir):
os.makedirs(pred_lmk_dir, exist_ok=True)
input_dir = os.path.join(args.output_dir, "results")
generate_landmark2d(val_dataset, input_dir, pred_lmk_dir, gt_lmk_dir, args.vis_overlays)
if count_txt_files(pred_lmk_dir) == len(val_dataset) and count_txt_files(gt_lmk_dir) == len(val_dataset):
landmark_comparison(val_dataset, pred_lmk_dir, gt_lmk_dir)
if __name__ == "__main__":
args = parse_args()
main(args)
|
peft/examples/boft_controlnet/eval.py/0
|
{
"file_path": "peft/examples/boft_controlnet/eval.py",
"repo_id": "peft",
"token_count": 3390
}
| 211
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# DreamBooth fine-tuning with BOFT
This guide demonstrates how to use BOFT, an orthogonal fine-tuning method, to fine-tune Dreambooth with either `stabilityai/stable-diffusion-2-1` or `runwayml/stable-diffusion-v1-5` model.
By using BOFT from 🤗 PEFT, we can significantly reduce the number of trainable parameters while still achieving impressive results in various fine-tuning tasks across different foundation models. BOFT enhances model efficiency by integrating full-rank orthogonal matrices with a butterfly structure into specific model blocks, such as attention blocks, mirroring the approach used in LoRA. During fine-tuning, only these inserted matrices are trained, leaving the original model parameters untouched. During inference, the trainable BOFT paramteres can be merged into the original model, eliminating any additional computational costs.
As a member of the **orthogonal finetuning** class, BOFT presents a systematic and principled method for fine-tuning. It possesses several unique properties and has demonstrated superior performance compared to LoRA in a variety of scenarios. For further details on BOFT, please consult the [PEFT's GitHub repo's concept guide OFT](https://https://huggingface.co/docs/peft/index), the [original BOFT paper](https://arxiv.org/abs/2311.06243) and the [original OFT paper](https://arxiv.org/abs/2306.07280).
In this guide we provide a Dreambooth fine-tuning script that is available in [PEFT's GitHub repo examples](https://github.com/huggingface/peft/tree/main/examples/boft_dreambooth). This implementation is adapted from [peft's lora_dreambooth](https://github.com/huggingface/peft/tree/main/examples/lora_dreambooth). You can try it out and finetune on your custom images.
## Set up your environment
Start by cloning the PEFT repository:
```bash
git clone --recursive https://github.com/huggingface/peft
```
Navigate to the directory containing the training scripts for fine-tuning Dreambooth with BOFT:
```bash
cd peft/examples/boft_dreambooth
```
Set up your environment: install PEFT, and all the required libraries. At the time of writing this guide we recommend installing PEFT from source. The following environment setup should work on A100 and H100:
```bash
conda create --name peft python=3.10
conda activate peft
conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 pytorch-cuda=11.8 -c pytorch -c nvidia
conda install xformers -c xformers
pip install -r requirements.txt
pip install git+https://github.com/huggingface/peft
```
## Download the data
[dreambooth](https://github.com/google/dreambooth) dataset should have been automatically cloned in the following structure when running the training script.
```
boft_dreambooth
├── data
│ ├── data_dir
│ └── dreambooth
│ └── data
│ ├── backpack
│ └── backpack_dog
│ ...
```
You can also put your custom images into `boft_dreambooth/data/dreambooth`.
## Finetune Dreambooth with BOFT
```bash
./train_dreambooth.sh
```
or using the following script arguments:
```bash
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
export INSTANCE_DIR="path-to-instance-images"
export CLASS_DIR="path-to-class-images"
export OUTPUT_DIR="path-to-save-model"
```
Here:
- `INSTANCE_DIR`: The directory containing the images that you intend to use for training your model.
- `CLASS_DIR`: The directory containing class-specific images. In this example, we use prior preservation to avoid overfitting and language-drift. For prior preservation, you need other images of the same class as part of the training process. However, these images can be generated and the training script will save them to a local path you specify here.
- `OUTPUT_DIR`: The destination folder for storing the trained model's weights.
To learn more about DreamBooth fine-tuning with prior-preserving loss, check out the [Diffusers documentation](https://huggingface.co/docs/diffusers/training/dreambooth#finetuning-with-priorpreserving-loss).
Launch the training script with `accelerate` and pass hyperparameters, as well as LoRa-specific arguments to it such as:
- `use_boft`: Enables BOFT in the training script.
- `boft_block_size`: the BOFT matrix block size across different layers, expressed in `int`. Smaller block size results in sparser update matrices with fewer trainable paramters. **Note**, please choose it to be dividable to most layer `in_features` dimension, e.g., 4, 8, 16. Also, you can only specify either `boft_block_size` or `boft_block_num`, but not both simultaneously, because `boft_block_size` x `boft_block_num` = layer dimension.
- `boft_block_num`: the number of BOFT matrix blocks across different layers, expressed in `int`. Fewer blocks result in sparser update matrices with fewer trainable paramters. **Note**, please choose it to be dividable to most layer `in_features` dimension, e.g., 4, 8, 16. Also, you can only specify either `boft_block_size` or `boft_block_num`, but not both simultaneously, because `boft_block_size` x `boft_block_num` = layer dimension.
- `boft_n_butterfly_factor`: the number of butterfly factors. **Note**, for `boft_n_butterfly_factor=1`, BOFT is the same as vanilla OFT, for `boft_n_butterfly_factor=2`, the effective block size of OFT becomes twice as big and the number of blocks become half.
- `bias`: specify if the `bias` paramteres should be traind. Can be `none`, `all` or `boft_only`.
- `boft_dropout`: specify the probability of multiplicative dropout.
Here's what the full set of script arguments may look like:
```bash
PEFT_TYPE="boft"
BLOCK_NUM=8
BLOCK_SIZE=0
N_BUTTERFLY_FACTOR=1
VALIDATION_PROMPT=${PROMPT_LIST[@]}
INSTANCE_PROMPT="a photo of ${UNIQUE_TOKEN} ${CLASS_TOKEN}"
CLASS_PROMPT="a photo of ${CLASS_TOKEN}"
export MODEL_NAME="stabilityai/stable-diffusion-2-1"
# export MODEL_NAME="runwayml/stable-diffusion-v1-5"
export PROJECT_NAME="dreambooth_${PEFT_TYPE}"
export RUN_NAME="${SELECTED_SUBJECT}_${PEFT_TYPE}_${BLOCK_NUM}${BLOCK_SIZE}${N_BUTTERFLY_FACTOR}"
export INSTANCE_DIR="./data/dreambooth/dataset/${SELECTED_SUBJECT}"
export CLASS_DIR="./data/class_data/${CLASS_TOKEN}"
export OUTPUT_DIR="./data/output/${PEFT_TYPE}"
accelerate launch train_dreambooth.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--class_data_dir="$CLASS_DIR" \
--output_dir=$OUTPUT_DIR \
--wandb_project_name=$PROJECT_NAME \
--wandb_run_name=$RUN_NAME \
--with_prior_preservation --prior_loss_weight=1.0 \
--instance_prompt="$INSTANCE_PROMPT" \
--validation_prompt="$VALIDATION_PROMPT" \
--class_prompt="$CLASS_PROMPT" \
--resolution=512 \
--train_batch_size=1 \
--num_dataloader_workers=2 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--num_class_images=200 \
--use_boft \
--boft_block_num=$BLOCK_NUM \
--boft_block_size=$BLOCK_SIZE \
--boft_n_butterfly_factor=$N_BUTTERFLY_FACTOR \
--boft_dropout=0.1 \
--boft_bias="boft_only" \
--learning_rate=3e-5 \
--max_train_steps=1010 \
--checkpointing_steps=200 \
--validation_steps=200 \
--enable_xformers_memory_efficient_attention \
--report_to="wandb" \
```
or use this training script:
```bash
./train_dreambooth.sh $idx
```
with the `$idx` corresponds to different subjects.
If you are running this script on Windows, you may need to set the `--num_dataloader_workers` to 0.
## Inference with a single adapter
To run inference with the fine-tuned model, simply run the jupyter notebook `dreambooth_inference.ipynb` for visualization with `jupyter notebook` under `./examples/boft_dreambooth`.
|
peft/examples/boft_dreambooth/boft_dreambooth.md/0
|
{
"file_path": "peft/examples/boft_dreambooth/boft_dreambooth.md",
"repo_id": "peft",
"token_count": 2634
}
| 212
|
import os
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
DataCollatorWithPadding,
Trainer,
TrainingArguments,
)
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
def train_model(
base_model: str,
data_path: str,
output_dir: str,
batch_size: int,
num_epochs: int,
learning_rate: float,
cutoff_len: int,
val_set_size: int,
use_dora: bool,
quantize: bool,
eval_step: int,
save_step: int,
device: str,
lora_r: int,
lora_alpha: int,
lora_dropout: float,
lora_target_modules: str,
hub_model_id: str,
push_to_hub: bool,
):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
hf_token = os.getenv("HF_TOKEN")
# Setup device
device = torch.device(device)
print(f"Using device: {device}")
# load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token)
# QDoRA (quantized dora): IF YOU WANNA QUANTIZE THE MODEL
if quantize:
model = AutoModelForCausalLM.from_pretrained(
base_model,
token=hf_token,
quantization_config=BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=(
torch.bfloat16 if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else torch.float16
),
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
),
)
# setup for quantized training
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True)
else:
model = AutoModelForCausalLM.from_pretrained(base_model, token=hf_token)
# LoRa config for the PEFT model
lora_config = LoraConfig(
use_dora=use_dora, # to use Dora OR compare to Lora just set the --use_dora
r=lora_r, # Rank of matrix
lora_alpha=lora_alpha,
target_modules=(
lora_target_modules.split(",")
if lora_target_modules
else ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
),
lora_dropout=lora_dropout,
bias="none",
)
# get the peft model with LoRa config
model = get_peft_model(model, lora_config)
model.to(device) # MODEL TO GPU/CUDA
tokenizer.pad_token = tokenizer.eos_token
# Load the dataset
dataset = load_dataset(data_path)
def tokenize_function(examples):
inputs = tokenizer(examples["text"], padding="max_length", truncation=True, max_length=cutoff_len)
inputs["labels"] = inputs["input_ids"].copy() # setting labels for a language modeling task
return inputs
# Tokenize the dataset and prepare for training
tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=dataset["train"].column_names)
# Data collator to dynamically pad the batched examples
data_collator = DataCollatorWithPadding(tokenizer)
# Define training arguments
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_steps=100,
weight_decay=0.01,
logging_dir="./logs",
logging_steps=eval_step,
save_steps=save_step,
save_total_limit=2,
push_to_hub=push_to_hub,
hub_model_id=hub_model_id,
gradient_accumulation_steps=16,
fp16=True,
learning_rate=learning_rate,
hub_token=hf_token,
)
# Clear CUDA cache to free memory
torch.cuda.empty_cache()
# Initialize the Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
data_collator=data_collator,
)
# Start model training
trainer.train()
# Save and push the trained model and tokenizer
if push_to_hub:
# Push the main model to the hub
trainer.push_to_hub(commit_message="Fine-tuned model")
# Save the model and tokenizer locally
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Fine-tune LLaMA with DoRA and PEFT")
parser.add_argument("--base_model", type=str, default="huggyllama/llama-7b", help="Base model path or name")
parser.add_argument(
"--data_path", type=str, default="timdettmers/openassistant-guanaco", help="Dataset path or name"
)
parser.add_argument(
"--output_dir", type=str, default="path/to/output", help="Output directory for the fine-tuned model"
)
parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=3e-4, help="Learning rate")
parser.add_argument("--cutoff_len", type=int, default=512, help="Cutoff length for tokenization")
parser.add_argument("--val_set_size", type=int, default=500, help="Validation set size")
parser.add_argument("--use_dora", action="store_true", help="Apply Dora")
parser.add_argument("--quantize", action="store_true", help="Use quantization")
parser.add_argument("--eval_step", type=int, default=10, help="Evaluation step interval")
parser.add_argument("--save_step", type=int, default=100, help="Save step interval")
parser.add_argument("--device", type=str, default="cuda:0", help="Device to use for training")
parser.add_argument("--lora_r", type=int, default=8, help="LoRA rank")
parser.add_argument("--lora_alpha", type=int, default=16, help="LoRA alpha")
parser.add_argument("--lora_dropout", type=float, default=0.05, help="LoRA dropout rate")
parser.add_argument(
"--lora_target_modules", type=str, default=None, help="Comma-separated list of target modules for LoRA"
)
parser.add_argument(
"--hub_model_id",
type=str,
default="path/to/repo",
help="Repository name to push the model on the Hugging Face Hub",
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to Hugging Face Hub")
args = parser.parse_args()
train_model(
base_model=args.base_model,
data_path=args.data_path,
output_dir=args.output_dir,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
learning_rate=args.learning_rate,
cutoff_len=args.cutoff_len,
val_set_size=args.val_set_size,
use_dora=args.use_dora,
quantize=args.quantize,
eval_step=args.eval_step,
save_step=args.save_step,
device=args.device,
lora_r=args.lora_r,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
lora_target_modules=args.lora_target_modules,
hub_model_id=args.hub_model_id,
push_to_hub=args.push_to_hub,
)
|
peft/examples/dora_finetuning/dora_finetuning.py/0
|
{
"file_path": "peft/examples/dora_finetuning/dora_finetuning.py",
"repo_id": "peft",
"token_count": 3104
}
| 213
|
# Fine-tuning for image classification using LoRA and 🤗 PEFT
## Vision Transformer model from transformers
[](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_peft_lora.ipynb)
We provide a notebook (`image_classification_peft_lora.ipynb`) where we learn how to use [LoRA](https://arxiv.org/abs/2106.09685) from 🤗 PEFT to fine-tune an image classification model by ONLY using **0.7%** of the original trainable parameters of the model.
LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://arxiv.org/abs/2106.09685).
## PoolFormer model from timm
[](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_timm_peft_lora.ipynb)
The notebook `image_classification_timm_peft_lora.ipynb` showcases fine-tuning an image classification model using from the [timm](https://huggingface.co/docs/timm/index) library. Again, LoRA is used to reduce the numberof trainable parameters to a fraction of the total.
|
peft/examples/image_classification/README.md/0
|
{
"file_path": "peft/examples/image_classification/README.md",
"repo_id": "peft",
"token_count": 457
}
| 214
|
<jupyter_start><jupyter_code>import argparse
import gc
import hashlib
import itertools
import logging
import math
import os
import threading
import warnings
from pathlib import Path
from typing import Optional
import psutil
import json
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.utils.data import Dataset
import datasets
import diffusers
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel
from diffusers import DDPMScheduler, PNDMScheduler, StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available
from huggingface_hub import HfFolder, Repository, whoami
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig, CLIPFeatureExtractor
from peft import PeftModel, LoraConfig, get_peft_model_state_dict, set_peft_model_state_dict
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.10.0.dev0")
logger = get_logger(__name__)
MODEL_NAME = "CompVis/stable-diffusion-v1-4" # "stabilityai/stable-diffusion-2-1-base"
INSTANCE_PROMPT = "a photo of sks dog"
base_path = "/home/sourab/temp/"
def get_lora_sd_pipeline(
ckpt_dir, base_model_name_or_path=None, dtype=torch.float16, device="cuda", adapter_name="default"
):
unet_sub_dir = os.path.join(ckpt_dir, "unet")
text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
if os.path.exists(text_encoder_sub_dir) and base_model_name_or_path is None:
config = LoraConfig.from_pretrained(text_encoder_sub_dir)
base_model_name_or_path = config.base_model_name_or_path
if base_model_name_or_path is None:
raise ValueError("Please specify the base model name or path")
pipe = StableDiffusionPipeline.from_pretrained(
base_model_name_or_path, torch_dtype=dtype, requires_safety_checker=False
).to(device)
pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
if os.path.exists(text_encoder_sub_dir):
pipe.text_encoder = PeftModel.from_pretrained(
pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name
)
if dtype in (torch.float16, torch.bfloat16):
pipe.unet.half()
pipe.text_encoder.half()
pipe.to(device)
return pipe
def load_adapter(pipe, ckpt_dir, adapter_name):
unet_sub_dir = os.path.join(ckpt_dir, "unet")
text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
pipe.unet.load_adapter(unet_sub_dir, adapter_name=adapter_name)
if os.path.exists(text_encoder_sub_dir):
pipe.text_encoder.load_adapter(text_encoder_sub_dir, adapter_name=adapter_name)
def set_adapter(pipe, adapter_name):
pipe.unet.set_adapter(adapter_name)
if isinstance(pipe.text_encoder, PeftModel):
pipe.text_encoder.set_adapter(adapter_name)
def merging_lora_with_base(pipe, ckpt_dir, adapter_name="default"):
unet_sub_dir = os.path.join(ckpt_dir, "unet")
text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
if isinstance(pipe.unet, PeftModel):
pipe.unet.set_adapter(adapter_name)
else:
pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
pipe.unet = pipe.unet.merge_and_unload()
if os.path.exists(text_encoder_sub_dir):
if isinstance(pipe.text_encoder, PeftModel):
pipe.text_encoder.set_adapter(adapter_name)
else:
pipe.text_encoder = PeftModel.from_pretrained(
pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name
)
pipe.text_encoder = pipe.text_encoder.merge_and_unload()
return pipe
def create_weighted_lora_adapter(pipe, adapters, weights, adapter_name="default"):
pipe.unet.add_weighted_adapter(adapters, weights, adapter_name)
if isinstance(pipe.text_encoder, PeftModel):
pipe.text_encoder.add_weighted_adapter(adapters, weights, adapter_name)
return pipe
%%time
pipe = get_lora_sd_pipeline(os.path.join(base_path, "dog_dreambooth_updated"), adapter_name="dog")
%%time
load_adapter(pipe, os.path.join(base_path, "toy_dreambooth"), adapter_name="toy")
pipe = create_weighted_lora_adapter(pipe, ["toy", "dog"], [1.0, 1.05], adapter_name="toy_dog")
%%time
set_adapter(pipe, adapter_name="dog")
prompt = "sks dog playing fetch in the park"
negative_prompt = "low quality, blurry, unfinished"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0]
image
%%time
set_adapter(pipe, adapter_name="toy")
prompt = "narendra modi rendered in the style of <1>"
negative_prompt = "low quality, blurry, unfinished"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0]
image
set_adapter(pipe, adapter_name="dog")
prompt = "sks dog in a big red bucket"
negative_prompt = "low quality, blurry, unfinished"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0]
image
set_adapter(pipe, adapter_name="toy")
prompt = "superman rendered in the style of <1>, close up potrait"
negative_prompt = "low quality, blurry, unfinished"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0]
image
set_adapter(pipe, adapter_name="toy_dog")
prompt = "sks dog rendered in the style of <1>, close up potrait, 4K HD"
negative_prompt = "low quality, blurry, unfinished"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0]
image<jupyter_output><empty_output>
|
peft/examples/lora_dreambooth/lora_dreambooth_inference.ipynb/0
|
{
"file_path": "peft/examples/lora_dreambooth/lora_dreambooth_inference.ipynb",
"repo_id": "peft",
"token_count": 2282
}
| 215
|
from mistralrs import ChatCompletionRequest, Runner, Which
runner = Runner(
which=Which.XLora(
tok_model_id=None, # Automatically determine from ordering file
model_id=..., # Model ID of the base model (local path of HF model ID)
xlora_model_id=..., # X-LoRA Model ID of the base model (local path of HF model ID)
order=..., # Ordering file to ensure compatability with PEFT
tgt_non_granular_index=3, # Only generate scalings for the first 3 decoding tokens, and then use the last generated one
)
)
res = runner.send_chat_completion_request(
ChatCompletionRequest(
model="mistral",
messages=[{"role": "user", "content": "Tell me a story about 2 low rank matrices."}],
max_tokens=256,
presence_penalty=1.0,
top_p=0.1,
temperature=0.5,
)
)
print(res.choices[0].message.content)
print(res.usage)
|
peft/examples/xlora/xlora_inference_mistralrs.py/0
|
{
"file_path": "peft/examples/xlora/xlora_inference_mistralrs.py",
"repo_id": "peft",
"token_count": 356
}
| 216
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import collections
import inspect
import os
import warnings
from contextlib import contextmanager
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Literal, Optional, Union
import packaging.version
import torch
import transformers
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules
from accelerate.utils import get_balanced_memory, named_module_tensors
from huggingface_hub import HfFileSystem, ModelCard, ModelCardData, hf_hub_download
from safetensors import safe_open
from safetensors.torch import save_file as safe_save_file
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers import PreTrainedModel
from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from transformers.utils import PushToHubMixin
from peft.utils.constants import DUMMY_MODEL_CONFIG
from . import __version__
from .config import PeftConfig
from .tuners import (
AdaLoraModel,
AdaptionPromptModel,
BOFTModel,
FourierFTModel,
HRAModel,
IA3Model,
LNTuningModel,
LoHaModel,
LoKrModel,
LoraModel,
MultitaskPromptEmbedding,
OFTModel,
PolyModel,
PrefixEncoder,
PromptEmbedding,
PromptEncoder,
VeraModel,
XLoraConfig,
XLoraModel,
)
from .tuners.tuners_utils import BaseTuner, BaseTunerLayer
from .utils import (
SAFETENSORS_WEIGHTS_NAME,
TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
WEIGHTS_NAME,
PeftType,
TaskType,
_get_batch_size,
_prepare_prompt_learning_config,
_set_adapter,
_set_trainable,
get_peft_model_state_dict,
id_tensor_storage,
infer_device,
load_peft_weights,
set_peft_model_state_dict,
shift_tokens_right,
)
PEFT_TYPE_TO_MODEL_MAPPING = {
PeftType.LORA: LoraModel,
PeftType.LOHA: LoHaModel,
PeftType.LOKR: LoKrModel,
PeftType.PROMPT_TUNING: PromptEmbedding,
PeftType.P_TUNING: PromptEncoder,
PeftType.PREFIX_TUNING: PrefixEncoder,
PeftType.ADALORA: AdaLoraModel,
PeftType.BOFT: BOFTModel,
PeftType.ADAPTION_PROMPT: AdaptionPromptModel,
PeftType.IA3: IA3Model,
PeftType.OFT: OFTModel,
PeftType.POLY: PolyModel,
PeftType.LN_TUNING: LNTuningModel,
PeftType.VERA: VeraModel,
PeftType.FOURIERFT: FourierFTModel,
PeftType.XLORA: XLoraModel,
PeftType.HRA: HRAModel,
}
class PeftModel(PushToHubMixin, torch.nn.Module):
"""
Base model encompassing various Peft methods.
Args:
model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft.
peft_config ([`PeftConfig`]): The configuration of the Peft model.
adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`.
autocast_adapter_dtype (`bool`, *optional*):
Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights
using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect
select PEFT tuners.
**Attributes**:
- **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft.
- **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model.
- **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when
saving the model.
- **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if
using [`PromptLearningConfig`].
- **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if
using [`PromptLearningConfig`].
- **transformer_backbone_name** (`str`) -- The name of the transformer
backbone in the base model if using [`PromptLearningConfig`].
- **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone
in the base model if using [`PromptLearningConfig`].
"""
def __init__(
self,
model: PreTrainedModel,
peft_config: PeftConfig,
adapter_name: str = "default",
autocast_adapter_dtype: bool = True,
) -> None:
super().__init__()
self.modules_to_save = None
self.active_adapter = adapter_name
self.peft_type = peft_config.peft_type
# These args are special PEFT arguments that users can pass. They need to be removed before passing them to
# forward.
self.special_peft_forward_args = {"adapter_names"}
self._is_prompt_learning = peft_config.is_prompt_learning
if self._is_prompt_learning:
self._peft_config = {adapter_name: peft_config}
self.base_model = model
self.add_adapter(adapter_name, peft_config)
else:
self._peft_config = None
cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]
self.base_model = cls(model, {adapter_name: peft_config}, adapter_name)
self.set_additional_trainable_modules(peft_config, adapter_name)
if hasattr(self.base_model, "_cast_adapter_dtype"):
self.base_model._cast_adapter_dtype(
adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype
)
if getattr(model, "is_gradient_checkpointing", True):
model = self._prepare_model_for_gradient_checkpointing(model)
# the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid
# numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected
# behavior we disable that in this line.
if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"):
self.base_model.config.pretraining_tp = 1
@property
def peft_config(self) -> dict[str, PeftConfig]:
if self._is_prompt_learning:
return self._peft_config
return self.base_model.peft_config
@property
def active_adapters(self) -> list[str]:
try:
adapters = self.base_model.active_adapters
if not isinstance(adapters, list):
# Base model is probably a transformers model, see:
# https://github.com/huggingface/transformers/pull/30790#issuecomment-2253808249
# Unfortunately, transformers models also have an active_adapters method but it's 1) not a property and
# 2) calling it fails because the base model (usually) has no loaded adapter. The base model can be a
# transformers model for prompt learning, where the base model is not wrapped in a LoraModel or similar.
adapters = self.active_adapter
if isinstance(adapters, str):
adapters = [adapters]
except AttributeError:
adapters = self.active_adapter
if isinstance(adapters, str):
adapters = [adapters]
return adapters
@peft_config.setter
def peft_config(self, value: dict[str, PeftConfig]):
if self._is_prompt_learning:
self._peft_config = value
else:
self.base_model.peft_config = value
def save_pretrained(
self,
save_directory: str,
safe_serialization: bool = True,
selected_adapters: Optional[list[str]] = None,
save_embedding_layers: Union[str, bool] = "auto",
is_main_process: bool = True,
convert_pissa_to_lora: Optional[str] = None,
path_initial_model_for_weight_conversion: Optional[str] = None,
**kwargs: Any,
) -> None:
r"""
This function saves the adapter model and the adapter configuration files to a directory, so that it can be
reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`]
method.
Args:
save_directory (`str`):
Directory where the adapter model and configuration files will be saved (will be created if it does not
exist).
safe_serialization (`bool`, *optional*):
Whether to save the adapter files in safetensors format, defaults to `True`.
selected_adapters (`List[str]`, *optional*):
A list of adapters to be saved. If `None`, will default to all adapters.
save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`):
If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common
embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available.
and automatically sets the boolean flag. This only works for 🤗 transformers models.
is_main_process (`bool`, *optional*):
Whether the process calling this is the main process or not. Will default to `True`. Will not save the
checkpoint if not on the main process, which is important for multi device setups (e.g. DDP).
convert_pissa_to_lora (`str, *optional*`):
Deprecated. Use `path_initial_model_for_weight_conversion` instead.
path_initial_model_for_weight_conversion (`str, *optional*`):
The path to the initialized adapter, which is obtained after initializing the model with PiSSA or OLoRA
and before performing any training. When `path_initial_model_for_weight_conversion` is not None, the
difference in adapter before and after fine-tuning is calculated. This difference can be represented as
the parameters of a standard LoRA adapter. Using this converted adapter does not require changes to the
base model, thus conveniently allowing the use of multiple PiSSA or OLoRA adapters with LoRA adapters,
and the activation or deactivation of any adapters. Note that this conversion is not supported if
`rslora` is used in combination with `rank_pattern` or `alpha_pattern`.
kwargs (additional keyword arguments, *optional*):
Additional keyword arguments passed along to the `push_to_hub` method.
"""
if os.path.isfile(save_directory):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
if selected_adapters is None:
selected_adapters = list(self.peft_config.keys())
else:
if any(
selected_adapter_name not in list(self.peft_config.keys())
for selected_adapter_name in selected_adapters
):
raise ValueError(
f"You passed an invalid `selected_adapters` arguments, current supported adapter names are"
f" {list(self.peft_config.keys())} - got {selected_adapters}."
)
# TODO: remove deprecated parameter in PEFT v0.14.0
if convert_pissa_to_lora is not None:
warnings.warn(
"`convert_pissa_to_lora` is deprecated and will be removed in a future version. "
"Use `path_initial_model_for_weight_conversion` instead."
)
path_initial_model_for_weight_conversion = convert_pissa_to_lora
def save_mutated_as_lora(peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs):
if peft_config.use_rslora and (peft_config.rank_pattern or peft_config.alpha_pattern):
msg = (
"Passing `path_initial_model_for_weight_conversion` to `save_pretrained` is not supported when "
"using `rank_pattern` or `alpha_pattern` at the same time as `use_rslora=True`."
)
raise ValueError(msg)
if not any(
str(peft_config.init_lora_weights).lower().startswith(prefix) for prefix in ["pissa", "olora", "true"]
):
warnings.warn(
"`path_initial_model_for_weight_conversion` only works for converting a PiSSA or OLoRA adapter to "
"a LoRA adapter"
)
initial_adapter_name = os.path.basename(path_initial_model_for_weight_conversion)
try:
self.load_adapter(
os.path.dirname(path_initial_model_for_weight_conversion),
subfolder=initial_adapter_name,
adapter_name=initial_adapter_name,
)
is_pissa = str(self.peft_config[initial_adapter_name].init_lora_weights).lower().startswith("pissa")
is_olora = str(self.peft_config[initial_adapter_name].init_lora_weights).lower() == "olora"
if is_pissa or is_olora:
raise ValueError(
"The `init_lora_weights` parameter of the initial adapter should be set to `True`. "
"Otherwise, `self.load_adapter` will subtract the decomposed values again based on the "
"residual model."
)
output_state_dict = self.base_model.subtract_mutated_init(
output_state_dict, initial_adapter_name, kwargs
)
finally:
self.delete_adapter(initial_adapter_name)
return output_state_dict
if is_main_process:
os.makedirs(save_directory, exist_ok=True)
self.create_or_update_model_card(save_directory)
for adapter_name in selected_adapters:
peft_config = self.peft_config[adapter_name]
# save only the trainable weights
output_state_dict = get_peft_model_state_dict(
self,
state_dict=kwargs.get("state_dict", None),
adapter_name=adapter_name,
save_embedding_layers=save_embedding_layers,
)
output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory
os.makedirs(output_dir, exist_ok=True)
if is_main_process and safe_serialization:
# Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134
# Safetensors does not allow tensor aliasing.
# We're going to remove aliases before saving
ptrs = collections.defaultdict(list)
for name, tensor in output_state_dict.items():
# Sometimes in the state_dict we have non-tensor objects.
# e.g. in bitsandbytes we have some `str` objects in the state_dict
if isinstance(tensor, torch.Tensor):
ptrs[id_tensor_storage(tensor)].append(name)
else:
# In the non-tensor case, fall back to the pointer of the object itself
ptrs[id(tensor)].append(name)
# These are all the pointers of shared tensors.
shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
for _, names in shared_ptrs.items():
# Here we just clone the shared tensors to avoid tensor aliasing which is
# not supported in safetensors.
for shared_tensor_name in names[1:]:
output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone()
if path_initial_model_for_weight_conversion is not None:
peft_config.init_lora_weights = True
peft_config.save_pretrained(path_initial_model_for_weight_conversion)
output_state_dict = save_mutated_as_lora(
peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs
)
safe_save_file(
output_state_dict,
os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME),
metadata={"format": "pt"},
)
elif is_main_process:
if path_initial_model_for_weight_conversion is not None:
peft_config.init_lora_weights = True
peft_config.save_pretrained(path_initial_model_for_weight_conversion)
output_state_dict = save_mutated_as_lora(
peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs
)
torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME))
# save the config and change the inference mode to `True`
if peft_config.base_model_name_or_path is None:
peft_config.base_model_name_or_path = (
self.base_model.__dict__.get("name_or_path", None)
if peft_config.is_prompt_learning
else self.base_model.model.__dict__.get("name_or_path", None)
)
inference_mode = peft_config.inference_mode
peft_config.inference_mode = True
if peft_config.task_type is None:
# deal with auto mapping
base_model_class = self._get_base_model_class(
is_prompt_tuning=peft_config.is_prompt_learning,
)
parent_library = base_model_class.__module__
auto_mapping_dict = {
"base_model_class": base_model_class.__name__,
"parent_library": parent_library,
}
else:
auto_mapping_dict = None
if is_main_process:
if path_initial_model_for_weight_conversion is not None:
peft_config.init_lora_weights = True
peft_config.r *= 2
if not peft_config.use_rslora:
peft_config.lora_alpha *= 2
else:
# with rslora, we have scaling = alpha / sqrt(r), we thus adjust alpha to keep the same scaling
peft_config.lora_alpha *= 2**0.5
if peft_config.rank_pattern:
peft_config.rank_pattern = {key: 2 * val for key, val in peft_config.rank_pattern.items()}
if peft_config.alpha_pattern:
peft_config.alpha_pattern = {key: 2 * val for key, val in peft_config.alpha_pattern.items()}
peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict)
peft_config.inference_mode = inference_mode
@classmethod
def from_pretrained(
cls,
model: torch.nn.Module,
model_id: Union[str, os.PathLike],
adapter_name: str = "default",
is_trainable: bool = False,
config: Optional[PeftConfig] = None,
autocast_adapter_dtype: bool = True,
ephemeral_gpu_offload: bool = False,
**kwargs: Any,
) -> PeftModel:
r"""
Instantiate a PEFT model from a pretrained model and loaded PEFT weights.
Note that the passed `model` may be modified inplace.
Args:
model ([`torch.nn.Module`]):
The model to be adapted. For 🤗 Transformers models, the model should be initialized with the
[`~transformers.PreTrainedModel.from_pretrained`].
model_id (`str` or `os.PathLike`):
The name of the PEFT configuration to use. Can be either:
- A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face
Hub.
- A path to a directory containing a PEFT configuration file saved using the `save_pretrained`
method (`./my_peft_config_directory/`).
adapter_name (`str`, *optional*, defaults to `"default"`):
The name of the adapter to be loaded. This is useful for loading multiple adapters.
is_trainable (`bool`, *optional*, defaults to `False`):
Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be
used for inference.
config ([`~peft.PeftConfig`], *optional*):
The configuration object to use instead of an automatically loaded configuration. This configuration
object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already
loaded before calling `from_pretrained`.
autocast_adapter_dtype (`bool`, *optional*):
Whether to autocast the adapter dtype. Defaults to `True`. Only relevant for specific adapter types.
ephemeral_gpu_offload (`bool`, *optional*):
Whether to use ephemeral GPU offloading for partially loaded modules. Defaults to `False`. This is
useful when parts of the model and/or components (such as adapters) are kept in CPU memory until they
are needed. Rather than perform expensive operations on small data, the data is transferred to the GPU
on-demand, the operation(s) performed, and the results moved back to CPU memory. This brings a slight
momentary VRAM overhead but gives orders of magnitude speedup in certain cases.
torch_device (`str`, *optional*, defaults to None):
The device to load the adapter on. If `None`, the device will be inferred.
kwargs: (`optional`):
Additional keyword arguments passed along to the specific PEFT configuration class.
"""
from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING
# load the config
if config is None:
config = PEFT_TYPE_TO_CONFIG_MAPPING[
PeftConfig._get_peft_type(
model_id,
subfolder=kwargs.get("subfolder", None),
revision=kwargs.get("revision", None),
cache_dir=kwargs.get("cache_dir", None),
use_auth_token=kwargs.get("use_auth_token", None),
token=kwargs.get("token", None),
)
].from_pretrained(model_id, **kwargs)
elif isinstance(config, PeftConfig):
config.inference_mode = not is_trainable
else:
raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}")
# Runtime configuration, if supported
if hasattr(config, "runtime_config"):
config.runtime_config.ephemeral_gpu_offload = ephemeral_gpu_offload
else:
if ephemeral_gpu_offload:
warnings.warn("Ephemeral GPU offloading is not supported for this model. Ignoring.")
if hasattr(model, "hf_device_map"):
weight_map = dict(named_module_tensors(model, recurse=True))
# recreate the offload_index for disk-offloaded modules: we need to know the location in storage of each weight
# before the offload hook is removed from the model
disk_modules = set()
index = None
for name, module in model.named_modules():
if hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "original_devices"):
if hasattr(module._hf_hook.weights_map, "dataset"):
index = module._hf_hook.weights_map.dataset.index
for key in module._hf_hook.original_devices.keys():
if module._hf_hook.original_devices[key] == torch.device("meta"):
disk_modules.add(str(name) + "." + str(key))
if disk_modules and not kwargs.get("use_safetensors", True):
raise ValueError("Disk offloading currently only supported for safetensors")
if index:
offload_index = {
p: {
"safetensors_file": index[p]["safetensors_file"],
"weight_name": p,
"dtype": str(weight_map[p].dtype).replace("torch.", ""),
}
for p in weight_map.keys()
if p in disk_modules
}
kwargs["offload_index"] = offload_index
if (getattr(model, "hf_device_map", None) is not None) and len(
set(model.hf_device_map.values()).intersection({"cpu", "disk"})
) > 0:
remove_hook_from_submodules(model)
if config.is_prompt_learning and is_trainable:
raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
else:
config.inference_mode = not is_trainable
if isinstance(getattr(model, "base_model", None), XLoraModel):
if not isinstance(config, XLoraConfig):
raise TypeError(f"Expected 'XLoraConfig', got '{type(config)}' instead.")
if "adapters" in kwargs:
config.adapters = kwargs["adapters"]
else:
# If the path is on HF hub, then we get the adapter names to create a subfolders list which tells
# `load_adapter` where the adapters are.
if not os.path.exists(model_id):
s = HfFileSystem()
# The names of the adapters which must be in folders
adapter_names = [
file["name"][len(model_id) + 1 :] for file in s.ls(model_id) if file["type"] == "directory"
]
# Prepare a dict of adapter paths, which really just point to the hf id; we will use the subfolders
adapter_paths = {}
for adapter_name in adapter_names:
adapter_paths[adapter_name] = os.path.join(model_id, model_id)
config.adapters = adapter_paths
config._subfolders = adapter_names
else:
if "adapters" not in kwargs:
raise ValueError("If model_id is a local path, then `adapters` must be passed in kwargs.")
if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys():
model = cls(model, config, adapter_name, autocast_adapter_dtype=autocast_adapter_dtype)
else:
model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](
model, config, adapter_name, autocast_adapter_dtype=autocast_adapter_dtype
)
model.load_adapter(
model_id, adapter_name, is_trainable=is_trainable, autocast_adapter_dtype=autocast_adapter_dtype, **kwargs
)
return model
def _setup_prompt_encoder(self, adapter_name: str):
config = self.peft_config[adapter_name]
if not hasattr(self, "prompt_encoder"):
self.prompt_encoder = torch.nn.ModuleDict({})
self.prompt_tokens = {}
transformer_backbone = None
for name, module in self.base_model.named_children():
for param in module.parameters():
param.requires_grad = False
if isinstance(module, PreTrainedModel):
# Make sure to freeze Tranformers model
if transformer_backbone is None:
transformer_backbone = module
self.transformer_backbone_name = name
if transformer_backbone is None:
transformer_backbone = self.base_model
if config.num_transformer_submodules is None:
config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
for named_param, value in list(transformer_backbone.named_parameters()):
# for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0]
# the actual unsharded shape is stored in "ds_shape" attribute
# special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig
# has been called before
# For reference refer to issue: https://github.com/huggingface/peft/issues/996
deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None)
if value.shape[0] == self.base_model.config.vocab_size or (
deepspeed_distributed_tensor_shape is not None
and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size
):
self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", ""))
break
if config.peft_type == PeftType.PROMPT_TUNING:
prompt_encoder = PromptEmbedding(config, self.word_embeddings)
elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings)
elif config.peft_type == PeftType.P_TUNING:
prompt_encoder = PromptEncoder(config)
elif config.peft_type == PeftType.PREFIX_TUNING:
prompt_encoder = PrefixEncoder(config)
else:
raise ValueError("Not supported")
prompt_encoder = prompt_encoder.to(self.device)
self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder}))
self.prompt_tokens[adapter_name] = torch.arange(
config.num_virtual_tokens * config.num_transformer_submodules
).long()
def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel):
r"""
Prepares the model for gradient checkpointing if necessary
"""
if not (
getattr(model, "is_loaded_in_8bit", False)
or getattr(model, "is_loaded_in_4bit", False)
or getattr(model, "is_quantized", False)
):
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
elif hasattr(model, "get_input_embeddings"):
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
return model
def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor:
"""
Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning
method.
"""
prompt_encoder = self.prompt_encoder[adapter_name]
prompt_tokens = (
self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device)
)
if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING:
prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens]
if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING:
prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens)
else:
prompt_embeddings = prompt_encoder(prompt_tokens)
return prompt_embeddings[0].detach().cpu()
def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method.
"""
peft_config = self.active_peft_config
prompt_encoder = self.prompt_encoder[self.active_adapter]
prompt_tokens = (
self.prompt_tokens[self.active_adapter]
.unsqueeze(0)
.expand(batch_size, -1)
.to(prompt_encoder.embedding.weight.device)
)
if peft_config.peft_type == PeftType.PREFIX_TUNING:
prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens]
if peft_config.inference_mode:
past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
else:
past_key_values = prompt_encoder(prompt_tokens)
if self.base_model_torch_dtype is not None:
past_key_values = past_key_values.to(self.base_model_torch_dtype)
past_key_values = past_key_values.view(
batch_size,
peft_config.num_virtual_tokens,
peft_config.num_layers * 2,
peft_config.num_attention_heads,
peft_config.token_dim // peft_config.num_attention_heads,
)
if peft_config.num_transformer_submodules == 2:
past_key_values = torch.cat([past_key_values, past_key_values], dim=2)
past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(
peft_config.num_transformer_submodules * 2
)
if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None:
post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type]
past_key_values = post_process_fn(past_key_values)
return past_key_values
else:
if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
prompts = prompt_encoder(prompt_tokens, task_ids)
else:
if peft_config.inference_mode:
prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)
else:
prompts = prompt_encoder(prompt_tokens)
return prompts
def get_nb_trainable_parameters(self) -> tuple[int, int]:
r"""
Returns the number of trainable parameters and the number of all parameters in the model.
"""
trainable_params = 0
all_param = 0
for _, param in self.named_parameters():
num_params = param.numel()
# if using DS Zero 3 and the weights are initialized empty
if num_params == 0 and hasattr(param, "ds_numel"):
num_params = param.ds_numel
# Due to the design of 4bit linear layers from bitsandbytes
# one needs to multiply the number of parameters by 2 to get
# the correct number of parameters
if param.__class__.__name__ == "Params4bit":
if hasattr(param, "element_size"):
num_bytes = param.element_size()
elif not hasattr(param, "quant_storage"):
num_bytes = 1
else:
num_bytes = param.quant_storage.itemsize
num_params = num_params * 2 * num_bytes
all_param += num_params
if param.requires_grad:
trainable_params += num_params
return trainable_params, all_param
def print_trainable_parameters(self) -> None:
"""
Prints the number of trainable parameters in the model.
Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from
num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns
(trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model.
For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for
prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number
of trainable parameters of the backbone transformer model which can be different.
"""
trainable_params, all_param = self.get_nb_trainable_parameters()
print(
f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param:.4f}"
)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "base_model": # see #1892: prevent infinite recursion if class is not initialized
raise
return getattr(self.base_model, name)
@contextmanager
def _enable_peft_forward_hooks(self, *args, **kwargs):
# If the base model has a method called _enable_peft_forward_hooks, it is invoked as a context. Otherwise, this
# runs without any changes
if hasattr(self.base_model, "_enable_peft_forward_hooks"):
with self.base_model._enable_peft_forward_hooks(*args, **kwargs):
yield
return
else:
# nothing to enable
yield
return
def forward(self, *args: Any, **kwargs: Any):
"""
Forward pass of the model.
"""
with self._enable_peft_forward_hooks(*args, **kwargs):
kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
return self.get_base_model()(*args, **kwargs)
def generate(self, *args, **kwargs):
with self._enable_peft_forward_hooks(*args, **kwargs):
kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
return self.get_base_model().generate(*args, **kwargs)
def _get_base_model_class(self, is_prompt_tuning=False):
"""
Returns the base model class.
"""
if not is_prompt_tuning:
return self.base_model.model.__class__
return self.base_model.__class__
@contextmanager
def disable_adapter(self):
"""
Context manager that disables the adapter module. Use this to run inference on the base model.
Example:
```py
>>> with model.disable_adapter():
... model(inputs)
```
"""
if self.peft_config[self.active_adapter].is_prompt_learning:
try:
# TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and
# letting the underlying methods deal with it, same as how LoRA does it.
old_forward = self.forward
self.forward = self.base_model.forward
old_prepare_inputs_for_generation = self.prepare_inputs_for_generation
self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
yield
finally:
self.forward = old_forward
self.prepare_inputs_for_generation = old_prepare_inputs_for_generation
elif self.peft_config[self.active_adapter].is_adaption_prompt:
try:
self.base_model.disable_adapter_layers()
yield
finally:
self.base_model.enable_adapter_layers()
else: # LoRA, LoHa, etc.
model_status = self.get_model_status()
if model_status.enabled == "irregular":
warnings.warn(
"The model contains some adapter layers that are enabled and others that are disabled. "
"This is most likely unintentional. After exiting the disable_adapter context, all adapters "
"will be enabled"
)
try:
self.base_model.disable_adapter_layers()
yield
finally:
if model_status.enabled is not False:
# model_status.enabled is `True` or `"irregular"`
self.base_model.enable_adapter_layers()
def get_base_model(self) -> torch.nn.Module:
"""
Returns the base model.
"""
return (
self.base_model
if (self.active_peft_config.is_prompt_learning or self.peft_type == PeftType.POLY)
else self.base_model.model
)
def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
"""
Add an adapter to the model based on the passed configuration.
This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`].
The name for the new adapter should be unique.
The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
adapter.
Args:
adapter_name (`str`):
The name of the adapter to be added.
peft_config ([`PeftConfig`]):
The configuration of the adapter to be added.
"""
if peft_config.peft_type != self.peft_type:
raise ValueError(
f"Cannot combine adapters with different peft types. "
f"Found {self.peft_type} and {peft_config.peft_type}."
)
try:
if peft_config.is_prompt_learning:
self.peft_config[adapter_name] = peft_config
if hasattr(self.config, "to_dict"):
dict_config = self.config.to_dict()
else:
dict_config = self.config
peft_config = _prepare_prompt_learning_config(peft_config, dict_config)
self._setup_prompt_encoder(adapter_name)
elif peft_config.is_adaption_prompt:
self.base_model.add_adapter(adapter_name, peft_config)
else:
self.peft_config[adapter_name] = peft_config
self.base_model.inject_adapter(self.base_model.model, adapter_name)
except Exception: # something went wrong, roll back
if adapter_name in self.peft_config:
del self.peft_config[adapter_name]
raise
self.set_additional_trainable_modules(peft_config, adapter_name)
def set_additional_trainable_modules(self, peft_config, adapter_name):
if getattr(peft_config, "modules_to_save", None) is not None:
if self.modules_to_save is None:
self.modules_to_save = set(peft_config.modules_to_save)
else:
self.modules_to_save.update(peft_config.modules_to_save)
_set_trainable(self, adapter_name) # this may add a new ModulesToSaveWrapper
def get_layer_status(self) -> list[TunerLayerStatus]:
"""Get the status of each adapter layer in the model.
This method returns a list of `TunerLayerStatus` dataclass instances, each of which contains the following
attributes:
- `name` (`str`):
The name of the adapter layer, e.g. `model.encoder.block.0.layer.0.SelfAttention.q`.
- `module_type` (`str`):
The type of the adapter layer, e.g. `lora.Linear`.
- `enabled` (`bool`):
Whether the adapter layer is enabled.
- `active_adapters` (`list[str]`):
The names of the active adapters, if any, e.g. `["default"]`.
- `merged_adapters` (`list[str]`):
The names of the merged adapters, if any, e.g. `["default"]`.
- `available_adapters` (`list[str]`):
The names of the available adapters, e.g. `["default"]`.
Args:
model ([`~PeftModel`]):
The model to get the adapter layer status from.
Returns:
list[`peft.peft_model.TunerLayerStatus`]:
A list of dataclasses, each containing the status of the corresponding adapter layer.
"""
return get_layer_status(self)
def get_model_status(self) -> TunerModelStatus:
"""Get the status of tuners of the model.
This method returns a `TunerModelStatus` dataclass instance, which contains the following attributes:
- `base_model_type` (`str`):
The type of the base model, e.g. `T5Model`.
- `adapter_model_type` (`str`):
The type of the adapter model, e.g. `LoraModel`.
- `peft_types` (`dict[str, str]`):
The mapping of adapter name to adapter type, e.g. `{"default": "LORA"}`.
- `trainable_params` (`int`):
The number of trainable parameters in the model.
- `total_params` (`int`):
The total number of parameters in the model.
- `num_adapter_layers` (`int`):
The number of adapter layers in the model.
- `enabled` (`bool`, `Literal["irregular"]`):
Whether all adapter layers are enabled. If some are enabled and some are not, this will be `"irregular"`.
This means that your model is in an inconsistent state and might not work as expected.
- `active_adapters` (`list[str]`, `Literal["irregular"]`):
The names of the active adapters. If the active adapters are not consistent across all layers, this will be
`"irregular"`, which means that your model is in an inconsistent state and might not work as expected.
- `merged_adapters` (`list[str]`, `Literal["irregular"]`):
The names of the merged adapters. If the merged adapters are not consistent across all layers, this will be
`"irregular"`, which means that your model is in an inconsistent state and might not work as expected.
- `available_adapters` (`list[str]`):
The names of the available adapters, e.g. `["default"]`.
Args:
model ([`~PeftModel`]):
The model to get the adapter layer status from.
Returns:
`peft.peft_model.TunerModelStatus`:
A dataclass containing the status of the model.
"""
return get_model_status(self)
@classmethod
def _split_kwargs(cls, kwargs: dict[str, Any]):
_kwargs_not_in_hf_hub_download_signature = ("use_auth_token",)
hf_hub_download_kwargs = {}
other_kwargs = {}
for key, value in kwargs.items():
if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature:
hf_hub_download_kwargs[key] = value
else:
other_kwargs[key] = value
return hf_hub_download_kwargs, other_kwargs
def _update_offload(self, offload_index: dict[str, dict[str, str]], adapters_weights: dict[str, torch.tensor]):
"""
Update the offload_index and safetensors files for loading and mergine PeftModels with disk-offloaded modules.
Args:
offload_index (Dict[str: str]):
Dictionary of disk-offloaded modules with their metadata and safetensors filenames
adapters_weights (Dict[str: torch.tensor]):
Dictionary of Peft adapter module names and weights
"""
if not offload_index:
return offload_index
prefix = "base_model.model."
# rename offload index weight and model names
adapter_names = list(self.peft_config.keys())
for adapter_name in adapter_names:
keys = list(offload_index.keys())
block_id = keys[0].split(".")[0] + "." # for writing safetensors key,
# replace original offload index keys with PeftModel keys
for key in keys:
suffix_pos = key.rfind(".")
extended_prefix = prefix + key[:suffix_pos]
module = dict(self.named_modules())[extended_prefix]
if isinstance(module, BaseTunerLayer):
new_key = prefix + key[:suffix_pos] + ".base_layer" + key[suffix_pos:]
else:
new_key = prefix + key
offload_index[key]["weight_name"] = new_key
offload_index[new_key] = offload_index[key]
del offload_index[key]
files_seen = set()
# rename safetensors for dispatch
for new_key in list(offload_index.keys()):
fname = offload_index[new_key]["safetensors_file"]
# make a new file name
new_fname_list = list(fname.split(os.sep))
for i, name in enumerate(new_fname_list):
if "--" in name:
new_fname_list[i] += "-peft"
break
new_fname = os.path.join(*new_fname_list)
if fname in files_seen:
continue
safe_dict = {}
with safe_open(fname, framework="pt") as f:
for safe_key in f.keys():
safe_tensor = f.get_tensor(safe_key)
metadata = f.metadata()
suffix_pos = safe_key.rfind(".")
extended_prefix = prefix + block_id + safe_key[:suffix_pos]
safe_module = dict(self.named_modules())[extended_prefix]
if isinstance(safe_module, BaseTunerLayer):
final_key = extended_prefix + ".base_layer" + safe_key[suffix_pos:]
lora_dict = {key: val for key, val in adapters_weights.items() if extended_prefix in key}
# add LoRA keys and values to disk offload
for lora_key, lora_val in lora_dict.items():
divide = lora_key.rfind(".")
new_key = lora_key[:divide] + f".{adapter_name}" + lora_key[divide:]
safe_dict[new_key] = lora_val
else:
final_key = prefix + block_id + safe_key
safe_dict[final_key] = safe_tensor
files_seen.add(new_fname)
# avoid overwriting original safetensors
for key in safe_dict.keys():
offload_index[key] = {"safetensors_file": new_fname, "weight_name": key}
base_name = os.path.dirname(new_fname)
if not os.path.exists(base_name):
os.makedirs(base_name)
safe_save_file(safe_dict, new_fname, metadata=metadata)
def load_adapter(
self,
model_id: str,
adapter_name: str,
is_trainable: bool = False,
torch_device: Optional[str] = None,
autocast_adapter_dtype: bool = True,
ephemeral_gpu_offload: bool = False,
**kwargs: Any,
):
"""
Load a trained adapter into the model.
The name for the new adapter should be unique.
The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
adapter.
Args:
adapter_name (`str`):
The name of the adapter to be added.
peft_config ([`PeftConfig`]):
The configuration of the adapter to be added.
is_trainable (`bool`, *optional*, defaults to `False`):
Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be
used for inference.
torch_device (`str`, *optional*, defaults to None):
The device to load the adapter on. If `None`, the device will be inferred.
autocast_adapter_dtype (`bool`, *optional*, defaults to `True`):
Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter
weights using float16 and bfloat16 to float32, as this is typically required for stable training, and
only affect select PEFT tuners.
ephemeral_gpu_offload (`bool`, *optional*, defaults to `False`):
Whether to use ephemeral GPU offloading for partially loaded modules. Defaults to `False`.
kwargs: (`optional`):
Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub.
"""
from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs)
if torch_device is None:
torch_device = infer_device()
if adapter_name not in self.peft_config:
# load the config
peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[
PeftConfig._get_peft_type(
model_id,
**hf_hub_download_kwargs,
)
].from_pretrained(
model_id,
ephemeral_gpu_offload=ephemeral_gpu_offload,
**hf_hub_download_kwargs,
)
if peft_config.is_prompt_learning and is_trainable:
raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
else:
peft_config.inference_mode = not is_trainable
self.add_adapter(adapter_name, peft_config)
adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs)
# load the weights into the model
ignore_mismatched_sizes = kwargs.get("ignore_mismatched_sizes", False)
load_result = set_peft_model_state_dict(
self, adapters_weights, adapter_name=adapter_name, ignore_mismatched_sizes=ignore_mismatched_sizes
)
if (
(getattr(self, "hf_device_map", None) is not None)
and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0)
and len(self.peft_config) == 1
):
device_map = kwargs.get("device_map", "auto")
max_memory = kwargs.get("max_memory", None)
offload_dir = kwargs.get("offload_folder", None)
offload_index = kwargs.get("offload_index", None)
dispatch_model_kwargs = {}
# Safety checker for previous `accelerate` versions
# `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/
if "offload_index" in inspect.signature(dispatch_model).parameters:
dispatch_model_kwargs["offload_index"] = offload_index
no_split_module_classes = self._no_split_modules
if device_map != "sequential":
max_memory = get_balanced_memory(
self,
max_memory=max_memory,
no_split_module_classes=no_split_module_classes,
low_zero=(device_map == "balanced_low_0"),
)
if isinstance(device_map, str):
device_map = infer_auto_device_map(
self, max_memory=max_memory, no_split_module_classes=no_split_module_classes
)
self._update_offload(offload_index, adapters_weights)
dispatch_model_kwargs["offload_index"] = offload_index
dispatch_model(
self,
device_map=device_map,
offload_dir=offload_dir,
**dispatch_model_kwargs,
)
hook = AlignDevicesHook(io_same_device=True)
if self.peft_config[adapter_name].is_prompt_learning:
remove_hook_from_submodules(self.prompt_encoder)
add_hook_to_module(self.get_base_model(), hook)
if hasattr(self.base_model, "_cast_adapter_dtype"):
self.base_model._cast_adapter_dtype(
adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype
)
# Set model in evaluation mode to deactivate Dropout modules by default
if not is_trainable:
self.eval()
return load_result
def set_adapter(self, adapter_name: str) -> None:
"""
Sets the active adapter.
Only one adapter can be active at a time.
Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is
not desired, use the following code.
```py
>>> for name, param in model_peft.named_parameters():
... if ...: # some check on name (ex. if 'lora' in name)
... param.requires_grad = False
```
Args:
adapter_name (`str`):
The name of the adapter to be set as active. The adapter must be loaded first.
"""
if adapter_name not in self.peft_config:
raise ValueError(f"Adapter {adapter_name} not found.")
self.active_adapter = adapter_name
if not self.peft_config[adapter_name].is_prompt_learning:
self.base_model.set_adapter(adapter_name)
_set_adapter(self, adapter_name)
@property
def base_model_torch_dtype(self):
return getattr(self.base_model, "dtype", None)
@property
def active_peft_config(self):
return self.peft_config[self.active_adapter]
def create_or_update_model_card(self, output_dir: str):
"""
Updates or create model card to include information about peft:
1. Adds `peft` library tag
2. Adds peft version
3. Adds base model info
4. Adds quantization information if it was used
"""
filename = os.path.join(output_dir, "README.md")
card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData())
card.data["library_name"] = "peft"
model_config = BaseTuner.get_model_config(self)
model_config = None if model_config == DUMMY_MODEL_CONFIG else model_config
if model_config is not None and "_name_or_path" in model_config:
card.data["base_model"] = model_config["_name_or_path"]
lines = card.text.splitlines()
quantization_config = None
if hasattr(model_config, "quantization_config"):
quantization_config = self.config.quantization_config.to_dict()
training_config_text = ""
quantization_prefix = "The following `bitsandbytes` quantization config was used during training:"
# Adds quantization information if it was used
if quantization_config is not None:
training_config_text += f"\n{quantization_prefix}\n"
training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()])
training_config_text += "\n"
training_procedure_heading = "## Training procedure"
if quantization_prefix not in lines and bool(training_config_text):
if training_procedure_heading in lines:
lines.insert(lines.index(training_procedure_heading) + 2, training_config_text)
else:
lines.append(f"{training_procedure_heading}\n{training_config_text}")
# Adds peft version
framework_block_heading = "### Framework versions"
if f"- PEFT {__version__}" not in lines:
if framework_block_heading in lines:
lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}")
else:
lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}")
card.text = "\n".join(lines)
card.save(filename)
class PeftModelForSequenceClassification(PeftModel):
"""
Peft model for sequence classification tasks.
Args:
model ([`~transformers.PreTrainedModel`]): Base transformer model.
peft_config ([`PeftConfig`]): Peft config.
adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`.
autocast_adapter_dtype (`bool`, *optional*):
Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights
using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect
select PEFT tuners.
**Attributes**:
- **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
- **cls_layer_name** (`str`) -- The name of the classification layer.
Example:
```py
>>> from transformers import AutoModelForSequenceClassification
>>> from peft import PeftModelForSequenceClassification, get_peft_config
>>> config = {
... "peft_type": "PREFIX_TUNING",
... "task_type": "SEQ_CLS",
... "inference_mode": False,
... "num_virtual_tokens": 20,
... "token_dim": 768,
... "num_transformer_submodules": 1,
... "num_attention_heads": 12,
... "num_layers": 12,
... "encoder_hidden_size": 768,
... "prefix_projection": False,
... "postprocess_past_key_value_function": None,
... }
>>> peft_config = get_peft_config(config)
>>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased")
>>> peft_model = PeftModelForSequenceClassification(model, peft_config)
>>> peft_model.print_trainable_parameters()
trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117
```
"""
def __init__(
self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default", **kwargs
) -> None:
super().__init__(model, peft_config, adapter_name, **kwargs)
classifier_module_names = ["classifier", "score"]
if self.modules_to_save is None:
self.modules_to_save = set(classifier_module_names)
else:
self.modules_to_save.update(classifier_module_names)
if hasattr(peft_config, "modules_to_save"):
if peft_config.modules_to_save is None:
peft_config.modules_to_save = classifier_module_names[:]
else:
peft_config.modules_to_save.extend(classifier_module_names)
for name, _ in self.base_model.named_children():
if any(module_name in name for module_name in self.modules_to_save):
self.cls_layer_name = name
break
# to make sure classifier layer is trainable; this may add a new ModulesToSaveWrapper
_set_trainable(self, adapter_name)
def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
"""
Add an adapter to the model based on the passed configuration.
This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`].
The name for the new adapter should be unique.
The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
adapter.
Args:
adapter_name (`str`):
The name of the adapter to be added.
peft_config ([`PeftConfig`]):
The configuration of the adapter to be added.
"""
# ensure that additional adapters also add the classifier layer to modules_to_save
if hasattr(peft_config, "modules_to_save"):
classifier_module_names = ["classifier", "score"]
if peft_config.modules_to_save is None:
peft_config.modules_to_save = classifier_module_names[:]
else:
peft_config.modules_to_save.extend(classifier_module_names)
return super().add_adapter(adapter_name, peft_config)
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
task_ids=None,
**kwargs,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
peft_config = self.active_peft_config
if not peft_config.is_prompt_learning:
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
if peft_config.peft_type == PeftType.POLY:
kwargs["task_ids"] = task_ids
return self.base_model(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if attention_mask is not None:
# concat prompt attention mask
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if kwargs.get("position_ids", None) is not None:
warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
kwargs["position_ids"] = None
kwargs.update(
{
"attention_mask": attention_mask,
"labels": labels,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
}
)
if peft_config.peft_type == PeftType.PREFIX_TUNING:
return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
else:
if kwargs.get("token_type_ids", None) is not None:
kwargs["token_type_ids"] = torch.cat(
(
torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
kwargs["token_type_ids"],
),
dim=1,
).long()
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
def _prefix_tuning_forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
batch_size = _get_batch_size(input_ids, inputs_embeds)
past_key_values = self.get_prompt(batch_size)
fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
kwargs.update(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"inputs_embeds": inputs_embeds,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
"past_key_values": past_key_values,
}
)
if "past_key_values" in fwd_params:
return self.base_model(labels=labels, **kwargs)
else:
transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
if "past_key_values" not in fwd_params:
raise ValueError("Model does not support past key values which are required for prefix tuning.")
outputs = transformer_backbone_name(**kwargs)
pooled_output = outputs[1] if len(outputs) > 1 else outputs[0]
if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
pooled_output = self.base_model.dropout(pooled_output)
logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.base_model.num_labels == 1:
self.config.problem_type = "regression"
elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.base_model.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class PeftModelForCausalLM(PeftModel):
"""
Peft model for causal language modeling.
Args:
model ([`~transformers.PreTrainedModel`]): Base transformer model.
peft_config ([`PeftConfig`]): Peft config.
adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`.
autocast_adapter_dtype (`bool`, *optional*):
Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights
using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect
select PEFT tuners.
Example:
```py
>>> from transformers import AutoModelForCausalLM
>>> from peft import PeftModelForCausalLM, get_peft_config
>>> config = {
... "peft_type": "PREFIX_TUNING",
... "task_type": "CAUSAL_LM",
... "inference_mode": False,
... "num_virtual_tokens": 20,
... "token_dim": 1280,
... "num_transformer_submodules": 1,
... "num_attention_heads": 20,
... "num_layers": 36,
... "encoder_hidden_size": 1280,
... "prefix_projection": False,
... "postprocess_past_key_value_function": None,
... }
>>> peft_config = get_peft_config(config)
>>> model = AutoModelForCausalLM.from_pretrained("gpt2-large")
>>> peft_model = PeftModelForCausalLM(model, peft_config)
>>> peft_model.print_trainable_parameters()
trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544
```
"""
def __init__(
self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default", **kwargs
) -> None:
super().__init__(model, peft_config, adapter_name, **kwargs)
self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
task_ids=None,
**kwargs,
):
peft_config = self.active_peft_config
if not peft_config.is_prompt_learning:
if self.base_model.config.model_type == "mpt":
if inputs_embeds is not None:
raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds")
return self.base_model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
if peft_config.peft_type == PeftType.POLY:
kwargs["task_ids"] = task_ids
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
return self.base_model(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if attention_mask is not None:
# concat prompt attention mask
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if kwargs.get("position_ids", None) is not None:
warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
kwargs["position_ids"] = None
if kwargs.get("token_type_ids", None) is not None:
warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
kwargs["token_type_ids"] = None
kwargs.update(
{
"attention_mask": attention_mask,
"labels": labels,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
}
)
if peft_config.peft_type == PeftType.PREFIX_TUNING:
# overwrite past_kv in kwargs
kwargs["past_key_values"] = self.get_prompt(batch_size)
return self.base_model(input_ids=input_ids, inputs_embeds=inputs_embeds, **kwargs)
else:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
# concat prompt labels
if labels is not None:
prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1)
prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
def generate(self, *args, **kwargs):
peft_config = self.active_peft_config
self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
if hasattr(self.base_model, "model"):
self.base_model.model.generation_config = self.generation_config
else:
self.base_model.generation_config = self.generation_config
try:
if not peft_config.is_prompt_learning:
with self._enable_peft_forward_hooks(*args, **kwargs):
kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
outputs = self.base_model.generate(*args, **kwargs)
else:
outputs = self.base_model.generate(**kwargs)
except:
self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
raise
else:
self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
return outputs
def prepare_inputs_for_generation(self, *args, task_ids: Optional[torch.Tensor] = None, **kwargs):
peft_config = self.active_peft_config
model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
# https://github.com/huggingface/transformers/pull/26681/ introduced new cache format
# for some architectures which requires a special fix for prompt tuning etc.
# TODO: starting with transformers 4.38, all architectures should support caching.
uses_transformers_4_38 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.38.0")
uses_transformers_4_36 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.36.0")
transformers_new_cache_archs = ["llama", "mistral", "persimmon", "phi"]
if packaging.version.parse(transformers.__version__) > packaging.version.parse("4.43.3"):
# https://github.com/huggingface/transformers/pull/31445
transformers_new_cache_archs.append("bloom")
uses_cache = uses_transformers_4_38 or (
uses_transformers_4_36 and self.base_model.config.model_type in transformers_new_cache_archs
)
if peft_config.peft_type == PeftType.POLY:
model_kwargs["task_ids"] = task_ids
if peft_config.is_prompt_learning:
if uses_cache and (model_kwargs["past_key_values"] is not None):
# change in the logic of `prepare_inputs_for_generation` makes the below code necessary
# In prompt learning methods, past key values are longer when compared to the `input_ids`.
# As such only consider the last input ids in the autogressive generation phase.
past_key_values = model_kwargs["past_key_values"]
if isinstance(past_key_values, (tuple, list)):
seq_len = past_key_values[0][0].shape[-2]
else: # using transformers kv cache
seq_len = past_key_values.get_seq_length()
if seq_len >= model_kwargs["input_ids"].shape[1]:
model_kwargs["input_ids"] = model_kwargs["input_ids"][:, -1:]
if model_kwargs.get("attention_mask", None) is not None:
size = model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens
prefix_attention_mask = torch.ones(size).to(model_kwargs["input_ids"].device)
model_kwargs["attention_mask"] = torch.cat(
(prefix_attention_mask, model_kwargs["attention_mask"]), dim=1
)
if model_kwargs.get("position_ids", None) is not None:
warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
model_kwargs["position_ids"] = None
if kwargs.get("token_type_ids", None) is not None:
warnings.warn(
"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
)
kwargs["token_type_ids"] = None
# no past_key_values or past_key_values empty cache
requires_prompt_injection = (model_kwargs["past_key_values"] is None) or (
isinstance(model_kwargs["past_key_values"], transformers.Cache) and not model_kwargs["past_key_values"]
)
if requires_prompt_injection and peft_config.peft_type == PeftType.PREFIX_TUNING:
new_past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0])
model_kwargs["past_key_values"] = new_past_key_values
elif requires_prompt_injection:
inputs_embeds = self.word_embeddings(model_kwargs["input_ids"])
prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0], task_ids=task_ids)
prompts = prompts.to(inputs_embeds.dtype)
model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1)
model_kwargs["input_ids"] = None
# For transformers>=4.38.0 - for some architectures such as Llama, `cache_position` is
# passed in the forward pass to keep track of the position ids of the cache. We have to
# pop that from `model_kwargs` as `cache_position` is properly created by the model, using the passed
# `inputs_embeds`: https://github.com/huggingface/transformers/blob/593230f0a1150ea9c0477b9d859f25daf73c8c33/src/transformers/models/llama/modeling_llama.py#L956
_ = model_kwargs.pop("cache_position", None)
return model_kwargs
class PeftModelForSeq2SeqLM(PeftModel):
"""
Peft model for sequence-to-sequence language modeling.
Args:
model ([`~transformers.PreTrainedModel`]): Base transformer model.
peft_config ([`PeftConfig`]): Peft config.
adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`.
autocast_adapter_dtype (`bool`, *optional*):
Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights
using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect
select PEFT tuners.
Example:
```py
>>> from transformers import AutoModelForSeq2SeqLM
>>> from peft import PeftModelForSeq2SeqLM, get_peft_config
>>> config = {
... "peft_type": "LORA",
... "task_type": "SEQ_2_SEQ_LM",
... "inference_mode": False,
... "r": 8,
... "target_modules": ["q", "v"],
... "lora_alpha": 32,
... "lora_dropout": 0.1,
... "fan_in_fan_out": False,
... "enable_lora": None,
... "bias": "none",
... }
>>> peft_config = get_peft_config(config)
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> peft_model = PeftModelForSeq2SeqLM(model, peft_config)
>>> peft_model.print_trainable_parameters()
trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566
```
"""
def __init__(
self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default", **kwargs
) -> None:
super().__init__(model, peft_config, adapter_name, **kwargs)
self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation
self.base_model_prepare_encoder_decoder_kwargs_for_generation = (
self.base_model._prepare_encoder_decoder_kwargs_for_generation
)
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
task_ids=None,
**kwargs,
):
peft_config = self.active_peft_config
if not peft_config.is_prompt_learning:
if peft_config.peft_type == PeftType.POLY:
kwargs["task_ids"] = task_ids
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
return self.base_model(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_inputs_embeds=decoder_inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if decoder_attention_mask is not None:
# concat prompt attention mask
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
decoder_attention_mask.device
)
if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1)
if kwargs.get("position_ids", None) is not None:
warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
kwargs["position_ids"] = None
if kwargs.get("token_type_ids", None) is not None:
warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
kwargs["token_type_ids"] = None
kwargs.update(
{
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"labels": labels,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
}
)
if peft_config.peft_type == PeftType.PREFIX_TUNING:
# overwrite past_kv in kwargs
kwargs["past_key_values"] = self.get_prompt(batch_size)
return self.base_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
decoder_inputs_embeds=decoder_inputs_embeds,
**kwargs,
)
elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if attention_mask is not None:
# concat prompt attention mask
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
attention_mask.device
)
kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
prompts = self.get_prompt(batch_size=batch_size)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
return self.base_model(
inputs_embeds=inputs_embeds,
decoder_input_ids=decoder_input_ids,
decoder_inputs_embeds=decoder_inputs_embeds,
**kwargs,
)
else:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if decoder_inputs_embeds is None and decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
decoder_inputs_embeds = self.word_embeddings(decoder_input_ids)
if attention_mask is not None:
# concat prompt attention mask
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
attention_mask.device
)
kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)
# concat prompt labels
if labels is not None:
if peft_config.num_transformer_submodules == 1:
kwargs["labels"] = labels
elif peft_config.num_transformer_submodules == 2:
prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)
kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1)
prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
if peft_config.num_transformer_submodules == 1:
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
elif peft_config.num_transformer_submodules == 2:
decoder_inputs_embeds = torch.cat(
(prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1
)
return self.base_model(
inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs
)
def generate(self, **kwargs):
peft_config = self.active_peft_config
self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation
self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
self._prepare_encoder_decoder_kwargs_for_generation
)
try:
if not peft_config.is_prompt_learning:
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
outputs = self.base_model.generate(**kwargs)
else:
if "input_ids" not in kwargs:
raise ValueError("input_ids must be provided for Peft model generation")
if kwargs.get("position_ids", None) is not None:
warnings.warn(
"Position ids are not supported for parameter efficient tuning. Ignoring position ids."
)
kwargs["position_ids"] = None
if kwargs.get("token_type_ids", None) is not None:
warnings.warn(
"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
)
kwargs["token_type_ids"] = None
if peft_config.peft_type == PeftType.PREFIX_TUNING:
outputs = self.base_model.generate(**kwargs)
elif peft_config.peft_type in [
PeftType.PROMPT_TUNING,
PeftType.P_TUNING,
PeftType.MULTITASK_PROMPT_TUNING,
]:
kwargs = deepcopy(kwargs)
if "encoder_outputs" in kwargs:
del kwargs["encoder_outputs"]
warnings.warn(
"`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it."
)
input_ids = kwargs.pop("input_ids")
inputs_embeds = self.word_embeddings(input_ids)
batch_size = inputs_embeds.shape[0]
prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop("task_ids", None))
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)
kwargs["inputs_embeds"] = inputs_embeds
if "attention_mask" in kwargs:
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(
kwargs["attention_mask"].device
)
kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1)
return self.base_model.generate(**kwargs)
else:
raise NotImplementedError
except:
self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
self.base_model_prepare_encoder_decoder_kwargs_for_generation
)
raise
else:
self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation
self.base_model._prepare_encoder_decoder_kwargs_for_generation = (
self.base_model_prepare_encoder_decoder_kwargs_for_generation
)
return outputs
def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs):
peft_config = self.active_peft_config
model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)
if peft_config.peft_type == PeftType.POLY:
model_kwargs["task_ids"] = task_ids
if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:
batch_size = model_kwargs["decoder_input_ids"].shape[0]
past_key_values = self.get_prompt(batch_size)
model_kwargs["past_key_values"] = past_key_values
return model_kwargs
class PeftModelForTokenClassification(PeftModel):
"""
Peft model for token classification tasks.
Args:
model ([`~transformers.PreTrainedModel`]): Base transformer model.
peft_config ([`PeftConfig`]): Peft config.
adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`.
autocast_adapter_dtype (`bool`, *optional*):
Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights
using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect
select PEFT tuners.
**Attributes**:
- **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
- **cls_layer_name** (`str`) -- The name of the classification layer.
Example:
```py
>>> from transformers import AutoModelForSequenceClassification
>>> from peft import PeftModelForTokenClassification, get_peft_config
>>> config = {
... "peft_type": "PREFIX_TUNING",
... "task_type": "TOKEN_CLS",
... "inference_mode": False,
... "num_virtual_tokens": 20,
... "token_dim": 768,
... "num_transformer_submodules": 1,
... "num_attention_heads": 12,
... "num_layers": 12,
... "encoder_hidden_size": 768,
... "prefix_projection": False,
... "postprocess_past_key_value_function": None,
... }
>>> peft_config = get_peft_config(config)
>>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased")
>>> peft_model = PeftModelForTokenClassification(model, peft_config)
>>> peft_model.print_trainable_parameters()
trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117
```
"""
def __init__(
self, model: torch.nn.Module, peft_config: PeftConfig = None, adapter_name: str = "default", **kwargs
) -> None:
super().__init__(model, peft_config, adapter_name, **kwargs)
classifier_module_names = ["classifier", "score"]
if self.modules_to_save is None:
self.modules_to_save = set(classifier_module_names)
else:
self.modules_to_save.update(classifier_module_names)
if hasattr(peft_config, "modules_to_save"):
if peft_config.modules_to_save is None:
peft_config.modules_to_save = classifier_module_names[:]
else:
peft_config.modules_to_save.extend(classifier_module_names)
for name, _ in self.base_model.named_children():
if any(module_name in name for module_name in self.modules_to_save):
self.cls_layer_name = name
break
# to make sure classifier layer is trainable; this may add a new ModulesToSaveWrapper
_set_trainable(self, adapter_name)
def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
"""
Add an adapter to the model based on the passed configuration.
This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`].
The name for the new adapter should be unique.
The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
adapter.
Args:
adapter_name (`str`):
The name of the adapter to be added.
peft_config ([`PeftConfig`]):
The configuration of the adapter to be added.
"""
# ensure that additional adapters also add the classifier layer to modules_to_save
if hasattr(peft_config, "modules_to_save"):
classifier_module_names = ["classifier", "score"]
if peft_config.modules_to_save is None:
peft_config.modules_to_save = classifier_module_names[:]
else:
peft_config.modules_to_save.extend(classifier_module_names)
return super().add_adapter(adapter_name, peft_config)
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
task_ids=None,
**kwargs,
):
peft_config = self.active_peft_config
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if not peft_config.is_prompt_learning:
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
if peft_config.peft_type == PeftType.POLY:
kwargs["task_ids"] = task_ids
return self.base_model(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if attention_mask is not None:
# concat prompt attention mask
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if kwargs.get("position_ids", None) is not None:
warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
kwargs["position_ids"] = None
kwargs.update(
{
"attention_mask": attention_mask,
"labels": labels,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
}
)
if peft_config.peft_type == PeftType.PREFIX_TUNING:
return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
else:
if kwargs.get("token_type_ids", None) is not None:
kwargs["token_type_ids"] = torch.cat(
(
torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
kwargs["token_type_ids"],
),
dim=1,
).long()
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
def _prefix_tuning_forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
batch_size = _get_batch_size(input_ids, inputs_embeds)
past_key_values = self.get_prompt(batch_size)
fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
kwargs.update(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"inputs_embeds": inputs_embeds,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
"past_key_values": past_key_values,
}
)
if "past_key_values" in fwd_params:
return self.base_model(labels=labels, **kwargs)
else:
transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
if "past_key_values" not in fwd_params:
raise ValueError("Model does not support past key values which are required for prefix tuning.")
outputs = transformer_backbone_name(**kwargs)
sequence_output = outputs[0]
if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
sequence_output = self.base_model.dropout(sequence_output)
logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class PeftModelForQuestionAnswering(PeftModel):
"""
Peft model for extractive question answering.
Args:
model ([`~transformers.PreTrainedModel`]): Base transformer model.
peft_config ([`PeftConfig`]): Peft config.
adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`.
autocast_adapter_dtype (`bool`, *optional*):
Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights
using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect
select PEFT tuners.
**Attributes**:
- **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
- **cls_layer_name** (`str`) -- The name of the classification layer.
Example:
```py
>>> from transformers import AutoModelForQuestionAnswering
>>> from peft import PeftModelForQuestionAnswering, get_peft_config
>>> config = {
... "peft_type": "LORA",
... "task_type": "QUESTION_ANS",
... "inference_mode": False,
... "r": 16,
... "target_modules": ["query", "value"],
... "lora_alpha": 32,
... "lora_dropout": 0.05,
... "fan_in_fan_out": False,
... "bias": "none",
... }
>>> peft_config = get_peft_config(config)
>>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased")
>>> peft_model = PeftModelForQuestionAnswering(model, peft_config)
>>> peft_model.print_trainable_parameters()
trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013
```
"""
def __init__(
self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default", **kwargs
) -> None:
super().__init__(model, peft_config, adapter_name, **kwargs)
qa_module_names = ["qa_outputs"]
if self.modules_to_save is None:
self.modules_to_save = set(qa_module_names)
else:
self.modules_to_save.update(qa_module_names)
if hasattr(peft_config, "modules_to_save"):
if peft_config.modules_to_save is None:
peft_config.modules_to_save = qa_module_names[:]
else:
peft_config.modules_to_save.extend(qa_module_names)
for name, _ in self.base_model.named_children():
if any(module_name in name for module_name in self.modules_to_save):
self.cls_layer_name = name
break
# to make sure classifier layer is trainable; this may add a new ModulesToSaveWrapper
_set_trainable(self, adapter_name)
def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None:
"""
Add an adapter to the model based on the passed configuration.
This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`].
The name for the new adapter should be unique.
The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
adapter.
Args:
adapter_name (`str`):
The name of the adapter to be added.
peft_config ([`PeftConfig`]):
The configuration of the adapter to be added.
"""
# ensure that additional adapters also add the classifier layer to modules_to_save
if hasattr(peft_config, "modules_to_save"):
qa_module_names = ["qa_outputs"]
if peft_config.modules_to_save is None:
peft_config.modules_to_save = qa_module_names[:]
else:
peft_config.modules_to_save.extend(qa_module_names)
return super().add_adapter(adapter_name, peft_config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
task_ids=None,
**kwargs,
):
peft_config = self.active_peft_config
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if not peft_config.is_prompt_learning:
if peft_config.peft_type == PeftType.POLY:
kwargs["task_ids"] = task_ids
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
return self.base_model(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
start_positions=start_positions,
end_positions=end_positions,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if attention_mask is not None:
# concat prompt attention mask
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if kwargs.get("position_ids", None) is not None:
warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
kwargs["position_ids"] = None
kwargs.update(
{
"attention_mask": attention_mask,
"start_positions": start_positions,
"end_positions": end_positions,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
}
)
if peft_config.peft_type == PeftType.PREFIX_TUNING:
return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)
else:
if kwargs.get("token_type_ids", None) is not None:
kwargs["token_type_ids"] = torch.cat(
(
torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device),
kwargs["token_type_ids"],
),
dim=1,
).long()
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
prompts = self.get_prompt(batch_size=batch_size)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
def _prefix_tuning_forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
batch_size = _get_batch_size(input_ids, inputs_embeds)
past_key_values = self.get_prompt(batch_size)
fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())
kwargs.update(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"inputs_embeds": inputs_embeds,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
"past_key_values": past_key_values,
}
)
if "past_key_values" in fwd_params:
return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs)
else:
transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)
fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())
if "past_key_values" not in fwd_params:
raise ValueError("Model does not support past key values which are required for prefix tuning.")
outputs = transformer_backbone_name(**kwargs)
sequence_output = outputs[0]
if "dropout" in [name for name, _ in list(self.base_model.named_children())]:
sequence_output = self.base_model.dropout(sequence_output)
logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class PeftModelForFeatureExtraction(PeftModel):
"""
Peft model for extracting features/embeddings from transformer models
Args:
model ([`~transformers.PreTrainedModel`]): Base transformer model.
peft_config ([`PeftConfig`]): Peft config.
adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`.
autocast_adapter_dtype (`bool`, *optional*):
Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights
using float16 and bfloat16 to float32, as this is typically required for stable training, and only affect
select PEFT tuners.
**Attributes**:
- **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.
Example:
```py
>>> from transformers import AutoModel
>>> from peft import PeftModelForFeatureExtraction, get_peft_config
>>> config = {
... "peft_type": "LORA",
... "task_type": "FEATURE_EXTRACTION",
... "inference_mode": False,
... "r": 16,
... "target_modules": ["query", "value"],
... "lora_alpha": 32,
... "lora_dropout": 0.05,
... "fan_in_fan_out": False,
... "bias": "none",
... }
>>> peft_config = get_peft_config(config)
>>> model = AutoModel.from_pretrained("bert-base-cased")
>>> peft_model = PeftModelForFeatureExtraction(model, peft_config)
>>> peft_model.print_trainable_parameters()
```
"""
def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default", **kwargs):
super().__init__(model, peft_config, adapter_name, **kwargs)
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
task_ids=None,
**kwargs,
):
peft_config = self.active_peft_config
if not peft_config.is_prompt_learning:
if peft_config.peft_type == PeftType.POLY:
kwargs["task_ids"] = task_ids
with self._enable_peft_forward_hooks(**kwargs):
kwargs = {k: v for k, v in kwargs.items() if k not in self.special_peft_forward_args}
return self.base_model(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
batch_size = _get_batch_size(input_ids, inputs_embeds)
if attention_mask is not None:
# concat prompt attention mask
prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
if kwargs.get("position_ids", None) is not None:
warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.")
kwargs["position_ids"] = None
if kwargs.get("token_type_ids", None) is not None:
warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids")
kwargs["token_type_ids"] = None
kwargs.update(
{
"attention_mask": attention_mask,
"output_attentions": output_attentions,
"output_hidden_states": output_hidden_states,
"return_dict": return_dict,
}
)
if peft_config.peft_type == PeftType.PREFIX_TUNING:
# overwrite past_kv in kwargs
kwargs["past_key_values"] = self.get_prompt(batch_size)
return self.base_model(input_ids=input_ids, **kwargs)
else:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
prompts = self.get_prompt(batch_size=batch_size)
prompts = prompts.to(inputs_embeds.dtype)
inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)
return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
@dataclass
class TunerLayerStatus:
name: str
module_type: str
enabled: bool
active_adapters: list[str]
merged_adapters: list[str]
requires_grad: dict[str, bool | Literal["irregular"]]
available_adapters: list[str]
devices: dict[str, list[str]]
def get_layer_status(model: torch.nn.Module) -> list[TunerLayerStatus]:
"""Get the status of each adapter layer in the model.
This function returns a list of `TunerLayerStatus` dataclass instances, each of which contains the following
attributes:
- `name` (`str`):
The name of the adapter layer, e.g. `model.encoder.block.0.layer.0.SelfAttention.q`.
- `module_type` (`str`):
The type of the adapter layer, e.g. `lora.Linear`.
- `enabled` (`bool`):
Whether the adapter layer is enabled.
- `active_adapters` (`list[str]`):
The names of the active adapters, if any, e.g. `["default"]`.
- `merged_adapters` (`list[str]`):
The names of the merged adapters, if any, e.g. `["default"]`.
- requires_grad : dict[str, bool | Literal["irregular"]]
The requires_grad status of the parameters for each adapter module. Ideally, it should be either `True` or
`False`. If the requires_grad status is not consistent across all parameters, the value will be set to
`"irregular"`.
- `available_adapters` (`list[str]`):
The names of the available adapters, e.g. `["default"]`.
- `devices` (`dict[str, list[str]]`):
The devices where the parameters of the given adapter are stored, e.g. `["cuda"]`.
Args:
model ([Union[`~PeftModel`, `~transformers.PreTrainedModel`, `nn.Module`]]):
The model to get the adapter layer status from.
Returns:
list[`peft.peft_model.TunerLayerStatus`]:
A list of dataclasses, each containing the status of the corresponding adapter layer.
"""
if isinstance(model, PeftModel):
base_model = model.base_model
if not isinstance(base_model, BaseTuner):
raise TypeError(
"get_layer_status() got an invalid PeftModel instance; prefix tuning and adaption prompt are not "
"supported."
)
else:
base_model = model
layer_status: list[TunerLayerStatus] = []
for name, module in base_model.named_modules():
if not isinstance(module, BaseTunerLayer):
continue
# determine if all submodules/parameters if this module require grad or not
mapping_requires_grad_list: dict[str, list[bool]] = collections.defaultdict(list)
for adapter_module_name in module.adapter_layer_names:
adapter_module = getattr(module, adapter_module_name)
if isinstance(adapter_module, torch.nn.ModuleDict):
for key, submodule in adapter_module.items():
for param in submodule.parameters():
mapping_requires_grad_list[key].append(param.requires_grad)
elif isinstance(adapter_module, torch.nn.ParameterDict):
for key, param in adapter_module.items():
mapping_requires_grad_list[key].append(param.requires_grad)
else:
# strange, we don't know how to handle this, ignore for now
pass
def check_irrgular(vals: list[bool]) -> bool | Literal["irregular"]:
if all(vals):
return True
if not any(vals):
return False
return "irregular"
requires_grad = {key: check_irrgular(vals) for key, vals in mapping_requires_grad_list.items()}
devices_dd = collections.defaultdict(list)
for adapter_module_name in module.adapter_layer_names + module.other_param_names:
adapter_module = getattr(module, adapter_module_name)
if isinstance(adapter_module, torch.nn.ModuleDict):
for key, submodule in adapter_module.items():
devices_dd[key].extend([param.device.type for param in submodule.parameters()])
elif (
isinstance(adapter_module, torch.nn.ParameterDict)
or (adapter_module.__class__.__name__ == "BufferDict") # VeRA
):
for key, param in adapter_module.items():
devices_dd[key].append(param.device.type)
devices = {key: sorted(set(val)) for key, val in devices_dd.items()}
status = TunerLayerStatus(
name=name,
module_type=repr(module).partition("(")[0],
enabled=not module.disable_adapters,
active_adapters=module.active_adapters,
merged_adapters=module.merged_adapters,
requires_grad=requires_grad,
available_adapters=sorted(module._get_available_adapters()),
devices=devices,
)
layer_status.append(status)
if not layer_status:
raise ValueError(
"No adapter layers found in the model, please ensure that it's a PEFT model or that you have PEFT adapters "
"injected in the model."
)
return layer_status
@dataclass
class TunerModelStatus:
base_model_type: str
adapter_model_type: str
peft_types: dict[str, str]
trainable_params: int
total_params: int
num_adapter_layers: int
enabled: bool | Literal["irregular"]
active_adapters: list[str] | Literal["irregular"]
merged_adapters: list[str] | Literal["irregular"]
requires_grad: dict[str, bool | Literal["irregular"]]
available_adapters: list[str]
devices: dict[str, list[str]]
def get_model_status(model: torch.nn.Module) -> TunerModelStatus:
"""Get the status of tuners of the model.
This function returns a `TunerModelStatus` dataclass instance, which contains the following attributes:
- `base_model_type` (`str`):
The type of the base model, e.g. `T5Model`.
- `adapter_model_type` (`str`):
The type of the adapter model, e.g. `LoraModel`.
- `peft_types` (`dict[str, str]`):
The mapping of adapter name to adapter type, e.g. `{"default": "LORA"}`.
- `trainable_params` (`int`):
The number of trainable parameters in the model.
- `total_params` (`int`):
The total number of parameters in the model.
- `num_adapter_layers` (`int`):
The number of adapter layers in the model.
- `enabled` (`bool`, `Literal["irregular"]`):
Whether all adapter layers are enabled. If some are enabled and some are not, this will be `"irregular"`. This
means that your model is in an inconsistent state and might not work as expected.
- `active_adapters` (`list[str]`, `Literal["irregular"]`):
The names of the active adapters. If the active adapters are not consistent across all layers, this will be
`"irregular"`, which means that your model is in an inconsistent state and might not work as expected.
- `merged_adapters` (`list[str]`, `Literal["irregular"]`):
The names of the merged adapters. If the merged adapters are not consistent across all layers, this will be
`"irregular"`, which means that your model is in an inconsistent state and might not work as expected.
- `requires_grad` (`dict[str, bool | Literal["irregular"]]`):
Whether for the given adapter, all adapter layers have `requires_grad` set to `True` or `False`. If there is a
mix, this will be set to `"irregular"`, which means that your model is in an inconsistent state and might not
work as expected.
- `available_adapters` (`list[str]`):
The names of the available adapters, e.g. `["default"]`.
- `devices` (`dict[str, list[str]]`):
The devices where the parameters of the given adapter are stored, e.g. `["cuda"]`.
Args:
model ([Union[`~PeftModel`, `~transformers.PreTrainedModel`, `nn.Module`]]):
The model to get the adapter layer status from.
Returns:
`peft.peft_model.TunerModelStatus`:
A dataclass containing the status of the model.
"""
if isinstance(model, PeftModel):
if not isinstance(model.base_model, BaseTuner):
raise TypeError(
"get_model_status() got an invalid PeftModel instance; prefix tuning and adaption prompt are not "
"supported."
)
base_model_type = model.get_base_model().__class__.__name__
trainable_params, total_params = model.get_nb_trainable_parameters()
base_model = model.base_model
peft_types = {key: str(config.peft_type).partition(".")[-1] for key, config in base_model.peft_config.items()}
adapter_model_type = base_model.__class__.__name__
elif isinstance(model, PreTrainedModel):
base_model_type = model.__class__.__name__
trainable_params, total_params = PeftModel.get_nb_trainable_parameters(model)
base_model = model
peft_types = {}
adapter_model_type = "None"
else:
base_model_type = "other"
trainable_params, total_params = PeftModel.get_nb_trainable_parameters(model)
base_model = model
peft_types = {}
adapter_model_type = "None"
layer_status = get_layer_status(model)
num_adapter_layers = len(layer_status)
enabled_set: set[bool] = {status.enabled for status in layer_status} # must be {True}, {False}, or {True, False}
enabled: bool | Literal["irregular"]
if len(enabled_set) == 1:
enabled = enabled_set.pop()
else:
enabled = "irregular"
available_adapters: list[str] = sorted(set().union(*(status.available_adapters for status in layer_status)))
# ideally, active adapters should be consistent across all layers of the model, but we cannot guarantee it
all_active_adapters: set[tuple[str, ...]] = {tuple(status.active_adapters) for status in layer_status}
active_adapters: list[str] | Literal["irregular"]
if not all_active_adapters:
active_adapters = []
elif len(all_active_adapters) == 1:
active_adapters = list(all_active_adapters.pop())
else:
active_adapters = "irregular"
# Here we determine what adapters are merged. This is not trivial because multiple adapters can be merged or not at
# the same time. Some layers may only have adapter A, some only adapter B, so it's not as easy as just checking
# which adapters are merged on each layer.
# First, determine all adapters that are merged on at least on module.
merged_all: set[str] = set()
for status in layer_status:
merged_all.update(status.merged_adapters)
# Next, check if on any layer, on of these adapters is not merged.
merged_adapters: list[str] | Literal["irregular"] = sorted(merged_all)
for status in layer_status:
unmerged = set(status.available_adapters) - set(status.merged_adapters)
if unmerged & merged_all:
# there is overlap between unmerged adapters and adapters that should be merged
merged_adapters = "irregular"
break
# check status of requires_grad
# first, merge the values for all layers
requires_grad_all: dict[str, list[bool | Literal["irregular"]]] = collections.defaultdict(list)
for status in layer_status:
for key, val in status.requires_grad.items():
requires_grad_all[key].append(val)
# then, check if the values are consistent
def check_irrgular(vals: list[bool | Literal["irregular"]]) -> bool | Literal["irregular"]:
if all(val is True for val in vals):
return True
if all(val is False for val in vals):
return False
return "irregular"
requires_grad = {key: check_irrgular(vals) for key, vals in requires_grad_all.items()}
devices_dd = collections.defaultdict(list)
for status in layer_status:
for key, val in status.devices.items():
devices_dd[key].extend(val)
devices = {key: sorted(set(val)) for key, val in devices_dd.items()}
adapter_model_status = TunerModelStatus(
base_model_type=base_model_type,
adapter_model_type=adapter_model_type,
peft_types=peft_types,
trainable_params=trainable_params,
total_params=total_params,
num_adapter_layers=num_adapter_layers,
enabled=enabled,
active_adapters=active_adapters,
merged_adapters=merged_adapters,
requires_grad=requires_grad,
available_adapters=available_adapters,
devices=devices,
)
return adapter_model_status
|
peft/src/peft/peft_model.py/0
|
{
"file_path": "peft/src/peft/peft_model.py",
"repo_id": "peft",
"token_count": 60834
}
| 217
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The implementation is based on "Parameter-Efficient Orthogonal Finetuning
# via Butterfly Factorization" (https://arxiv.org/abs/2311.06243) in ICLR 2024.
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class BOFTConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`BOFTModel`].
Args:
boft_block_size (`int`): BOFT block size across different layers.
boft_block_num (`int`): Number of BOFT blocks per injected layer.
boft_n_butterfly_factor (`int`): Number of butterfly factors across different layers.
target_modules (`Union[List[str],str]`): The names of the modules to apply the adapter to.
boft_dropout (`float`): The multiplicative dropout probability for BOFT layers.
fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out).
For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set
to `True`.
bias (`str`): Bias type for BOFT. Can be 'none', 'all' or 'boft_only'. If 'all' or 'boft_only', the
corresponding biases will be updated during training. Be aware that this means that, even when disabling
the adapters, the model will not produce the same output as the base model would have without adaptation.
modules_to_save (`List[str]`):List of modules apart from BOFT layers to be set as trainable
and saved in the final checkpoint.
layers_to_transform (`Union[List[int],int]`):
The layer indexes to transform, if this argument is specified, it will apply the BOFT transformations on
the layer indexes that are specified in this list. If a single integer is passed, it will apply the BOFT
transformations on the layer at this index.
layers_pattern (`str`):
The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer
pattern is not in the common layers pattern.
"""
boft_block_size: int = field(
default=4,
metadata={
"help": "BOFT block size across different layers.",
"note": "You can only specify either boft_block_size or boft_block_num, but not both simultaneously, because boft_block_size x boft_block_num = layer dimension.",
},
)
boft_block_num: int = field(
default=0,
metadata={
"help": "Number of BOFT blocks per injected layer.",
"note": "You can only specify either boft_block_size or boft_block_num, but not both simultaneously, because boft_block_size x boft_block_num = layer dimension.",
},
)
boft_n_butterfly_factor: int = field(
default=1,
metadata={
"help": "Number of butterfly factors.",
"note": (
"for example, boft_n_butterfly_factor=2, the effective block size of OFT becomes twice as big and the number of blocks become half.",
"note: for boft_n_butterfly_factor=1, BOFT is the same as vanilla OFT.",
),
},
)
target_modules: Optional[Union[List[str], str]] = field(
default=None,
metadata={
"help": "List of module names or regex expression of the module names to replace with BOFT.",
"example": "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' ",
},
)
boft_dropout: float = field(default=0.0, metadata={"help": "BOFT multiplicative dropout"})
fan_in_fan_out: bool = field(
default=False,
metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
)
bias: str = field(default="none", metadata={"help": "Bias type for BOFT. Can be 'none', 'all' or 'boft_only'"})
modules_to_save: Optional[List[str]] = field(
default=None,
metadata={
"help": "List of modules apart from BOFT layers to be set as trainable and saved in the final checkpoint. ",
"note": (
"For example, in Sequence Classification or Token Classification tasks, ",
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.",
),
},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Whether to initialize the weights of the BOFT layers with their default initialization. Don't change ",
"this setting, except if you know exactly what you're doing.",
),
},
)
layers_to_transform: Optional[Union[List[int], int]] = field(
default=None,
metadata={
"help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
},
)
layers_pattern: Optional[str] = field(
default=None,
metadata={
"help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
},
)
def __post_init__(self):
self.peft_type = PeftType.BOFT
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
if self.boft_block_size == 0 and self.boft_block_num == 0:
raise ValueError("You must specify either boft_block_size or boft_block_num.")
if not (self.boft_block_size != 0) ^ (self.boft_block_num != 0):
raise ValueError(
f"You can only specify either boft_block_size ({self.boft_block_size}) or boft_block_num ({self.boft_block_num}), "
"but not both simultaneously, because boft_block_size x boft_block_num != in_features."
)
|
peft/src/peft/tuners/boft/config.py/0
|
{
"file_path": "peft/src/peft/tuners/boft/config.py",
"repo_id": "peft",
"token_count": 2537
}
| 218
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class IA3Config(PeftConfig):
"""
This is the configuration class to store the configuration of a [`IA3Model`].
Args:
target_modules (`Optional[Union[List[str], str]]`):
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
strings, either an exact match will be performed or it is checked if the name of the module ends with any
of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
excluding the output layer. If this is not specified, modules will be chosen according to the model
architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
the target modules manually.
feedforward_modules (`Optional[Union[List[str], str]]`):
The names of the modules to be treated as feedforward modules, as in the original paper. These modules will
have (IA)³ vectors multiplied to the input, instead of the output. `feedforward_modules` must be a name or
a subset of names present in `target_modules`.
fan_in_fan_out (`bool`):
Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
`Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
modules_to_save (`Optional[List[str]]`):
List of modules apart from (IA)³ layers to be set as trainable and saved in the final checkpoint.
init_ia3_weights (`bool`):
Whether to initialize the vectors in the (IA)³ layers, defaults to `True`. Setting this to `False` is
discouraged.
"""
target_modules: Optional[Union[List[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with (IA)³."
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'."
"This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
"If not specified, modules will be chosen according to the model architecture, If the architecture is "
"not known, an error will be raised -- in this case, you should specify the target modules manually."
),
},
)
feedforward_modules: Optional[Union[List[str], str]] = field(
default=None,
metadata={
"help": "List of module names or a regex expression of module names which are feedforward"
"For example, ['output.dense']"
},
)
fan_in_fan_out: bool = field(
default=False,
metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
)
modules_to_save: Optional[List[str]] = field(
default=None,
metadata={
"help": "List of modules apart from (IA)^3 layers to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
},
)
init_ia3_weights: bool = field(
default=True,
metadata={"help": "Whether to initialize the vectors in the (IA)^3 layers."},
)
def __post_init__(self):
self.peft_type = PeftType.IA3
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
self.feedforward_modules = (
set(self.feedforward_modules) if isinstance(self.feedforward_modules, list) else self.feedforward_modules
)
# check if feedforward_modules is a subset of target_modules. run the check only if both are sets
if isinstance(self.feedforward_modules, set) and isinstance(self.target_modules, set):
if not self.feedforward_modules.issubset(self.target_modules):
raise ValueError("`feedforward_modules` should be a subset of `target_modules`")
|
peft/src/peft/tuners/ia3/config.py/0
|
{
"file_path": "peft/src/peft/tuners/ia3/config.py",
"repo_id": "peft",
"token_count": 1842
}
| 219
|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from peft.import_utils import is_aqlm_available
from peft.tuners.lora.layer import LoraLayer
from peft.tuners.tuners_utils import BaseTunerLayer
if is_aqlm_available():
from aqlm import QuantizedLinear
class AqlmLoraLinear(torch.nn.Module, LoraLayer):
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: bool = True,
use_rslora: bool = False,
**kwargs,
):
super().__init__()
LoraLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora)
def forward(self, x: torch.Tensor):
# note: logic differs from default Linear because merging is not supported
result = self.base_layer(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
x = x.to(lora_A.weight.dtype)
output = lora_B(lora_A(dropout(x)))
if requires_conversion:
output = output.to(expected_dtype)
output = output * scaling
result += output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "lora." + rep
# TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102
# def reset_lora_parameters(self, adapter_name):
# if adapter_name in self.lora_A.keys():
# torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight)
# torch.nn.init.zeros_(self.lora_B[adapter_name].weight)
def dispatch_aqlm(
target: torch.nn.Module,
adapter_name: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear):
new_module = AqlmLoraLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.codes
return new_module
|
peft/src/peft/tuners/lora/aqlm.py/0
|
{
"file_path": "peft/src/peft/tuners/lora/aqlm.py",
"repo_id": "peft",
"token_count": 1399
}
| 220
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from peft.tuners.prompt_tuning import PromptEmbedding
from peft.utils import TaskType
from peft.utils.save_and_load import torch_load
from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit
# This code is adapted for the paper: https://arxiv.org/abs/2303.02861 and
# constitutes the work done at MIT-IBM Watson Research Lab.
class MultitaskPromptEmbedding(PromptEmbedding):
def __init__(self, config: MultitaskPromptTuningConfig, word_embeddings):
super().__init__(config, word_embeddings)
self.num_tasks = config.num_tasks
self.num_ranks = config.num_ranks
self.num_virtual_tokens = config.num_virtual_tokens
self.num_transformer_submodules = config.num_transformer_submodules
if self.num_transformer_submodules is None:
self.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
self.token_dim = config.token_dim
total_virtual_tokens = self.num_virtual_tokens * self.num_transformer_submodules
self.prefix_task_cols = torch.nn.Parameter(
torch.normal(
mean=0,
std=0.02,
size=(self.num_tasks, total_virtual_tokens, self.num_ranks),
)
)
self.prefix_task_rows = torch.nn.Parameter(
torch.normal(
mean=0,
std=0.02,
size=(self.num_tasks, self.num_ranks, self.token_dim),
)
)
if config.prompt_tuning_init in [
MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS,
MultitaskPromptTuningInit.EXACT_SOURCE_TASK,
MultitaskPromptTuningInit.ONLY_SOURCE_SHARED,
]:
if config.prompt_tuning_init_state_dict_path is None:
raise ValueError(
f"prompt_tuning_init_state_dict_path needs to be specified with {config.prompt_tuning_init} "
"init method"
)
if config.prompt_tuning_init_state_dict_path.endswith(".safetensors"):
from safetensors.torch import load_file
state_dict: dict = load_file(config.prompt_tuning_init_state_dict_path)
else:
state_dict: dict = torch_load(
config.prompt_tuning_init_state_dict_path,
map_location=word_embeddings.weight.device,
)
if config.prompt_tuning_init in [
MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS,
MultitaskPromptTuningInit.EXACT_SOURCE_TASK,
]:
prefix_task_cols_: torch.Tensor = state_dict["prefix_task_cols"]
prefix_task_rows_: torch.Tensor = state_dict["prefix_task_rows"]
if config.prompt_tuning_init == MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS:
prefix_task_cols_ = prefix_task_cols_.mean(0, keepdim=True)
prefix_task_rows_ = prefix_task_rows_.mean(0, keepdim=True)
elif config.prompt_tuning_init == MultitaskPromptTuningInit.EXACT_SOURCE_TASK:
prefix_task_cols_ = prefix_task_cols_[config.prompt_tuning_init_task, ...].unsqueeze(0)
prefix_task_rows_ = prefix_task_rows_[config.prompt_tuning_init_task, ...].unsqueeze(0)
state_dict = {
"embedding.weight": state_dict["prompt_embeddings"],
"prefix_task_cols": prefix_task_cols_,
"prefix_task_rows": prefix_task_rows_,
}
self.load_state_dict(state_dict, strict=True)
elif config.prompt_tuning_init == MultitaskPromptTuningInit.ONLY_SOURCE_SHARED:
state_dict = {
"embedding.weight": state_dict["prompt_embeddings"],
}
self.load_state_dict(state_dict, strict=False)
def forward(self, indices, task_ids):
if task_ids is None:
raise ValueError("task_ids cannot be None")
prompt_embeddings = self.embedding(indices)
task_cols = torch.index_select(self.prefix_task_cols, 0, task_ids)
task_rows = torch.index_select(self.prefix_task_rows, 0, task_ids)
task_prompts = torch.matmul(task_cols, task_rows)
prompt_embeddings *= task_prompts
return prompt_embeddings
|
peft/src/peft/tuners/multitask_prompt_tuning/model.py/0
|
{
"file_path": "peft/src/peft/tuners/multitask_prompt_tuning/model.py",
"repo_id": "peft",
"token_count": 2251
}
| 221
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference code: https://github.com/yxli2123/LoftQ/blob/main/utils.py
# Reference paper: https://arxiv.org/abs/2310.08659
from __future__ import annotations
import logging
import os
from typing import Callable, Optional, Union
import torch
from huggingface_hub import snapshot_download
from huggingface_hub.errors import HFValidationError
from huggingface_hub.utils import LocalEntryNotFoundError
from safetensors import SafetensorError, safe_open
from transformers.utils import cached_file
from transformers.utils.hub import get_checkpoint_shard_files
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
class NFQuantizer:
def __init__(self, num_bits=2, device="cuda", method="normal", block_size=64, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_bits = num_bits
self.device = device
self.method = method
self.block_size = block_size
if self.method == "normal":
self.norm_lookup_table = self.create_normal_map(num_bits=self.num_bits)
self.norm_lookup_table = self.norm_lookup_table.to(device)
elif self.method == "uniform":
self.norm_lookup_table = self.create_uniform_map(num_bits=self.num_bits)
self.norm_lookup_table = self.norm_lookup_table.to(device)
else:
raise NotImplementedError("Other quantization methods not supported yet.")
@staticmethod
def create_uniform_map(symmetric=False, num_bits=4):
if symmetric:
# print("symmetric uniform quantization")
negative = torch.linspace(-1, 0, 2 ** (num_bits - 1))
positive = torch.linspace(0, 1, 2 ** (num_bits - 1))
table = torch.cat([negative, positive[1:]])
else:
# print("asymmetric uniform quantization")
table = torch.linspace(-1, 1, 2**num_bits)
return table
@staticmethod
def create_normal_map(offset=0.9677083, symmetric=False, num_bits=2):
try:
from scipy.stats import norm
except ImportError:
raise ImportError("The required package 'scipy' is not installed. Please install it to continue.")
variations = 2**num_bits
if symmetric:
v = norm.ppf(torch.linspace(1 - offset, offset, variations + 1)).tolist()
values = []
for index in range(len(v) - 1):
values.append(0.5 * v[index] + 0.5 * v[index + 1])
v = values
else:
# one more positive value, this is an asymmetric type
v1 = norm.ppf(torch.linspace(offset, 0.5, variations // 2 + 1)[:-1]).tolist()
v2 = [0]
v3 = (-norm.ppf(torch.linspace(offset, 0.5, variations // 2)[:-1])).tolist()
v = v1 + v2 + v3
values = torch.Tensor(v)
values = values.sort().values
values /= values.max()
return values
def quantize_tensor(self, weight):
max_abs = torch.abs(weight).max()
weight_normed = weight / max_abs
weight_normed_expanded = weight_normed.unsqueeze(-1)
# Reshape L to have the same number of dimensions as X_expanded
L_reshaped = torch.tensor(self.norm_lookup_table).reshape(1, -1)
# Calculate the absolute difference between X_expanded and L_reshaped
abs_diff = torch.abs(weight_normed_expanded - L_reshaped)
# Find the index of the minimum absolute difference for each element
qweight = torch.argmin(abs_diff, dim=-1)
return qweight, max_abs
def dequantize_tensor(self, qweight, max_abs):
qweight_flatten = qweight.flatten()
weight_normed = self.norm_lookup_table[qweight_flatten]
weight = weight_normed * max_abs
weight = weight.reshape(qweight.shape)
return weight
def quantize_block(self, weight):
if len(weight.shape) != 2:
raise ValueError(f"Only support 2D matrix, but your input has {len(weight.shape)} dimensions.")
if weight.shape[0] * weight.shape[1] % self.block_size != 0:
raise ValueError(
f"Weight with shape ({weight.shape[0]} x {weight.shape[1]}) "
f"is not dividable by block size {self.block_size}."
)
M, N = weight.shape
device = weight.device
# Quantization
weight_flatten = weight.flatten() # (M*N, )
weight_block = weight_flatten.reshape(-1, self.block_size) # (L, B), L = M * N / B
if self.method == "normal":
weight_max = weight_block.abs().max(dim=-1)[0] # (L, 1)
elif self.method == "uniform":
weight_max = weight_block.mean(dim=-1) + 2.5 * weight_block.std(dim=-1)
else:
raise NotImplementedError("Method not supported yet.")
weight_max = weight_max.unsqueeze(-1)
weight_divabs = weight_block / weight_max # (L, B)
weight_divabs = weight_divabs.unsqueeze(-1) # (L, B, 1)
L_reshaped = self.norm_lookup_table.reshape(1, -1) # (1, 2**K)
abs_diff = torch.abs(weight_divabs - L_reshaped) # (L, B, 2**K)
qweight = torch.argmin(abs_diff, dim=-1) # (L, B)
# Pack multiple k-bit into uint8
qweight = qweight.reshape(-1, 8 // self.num_bits)
qweight_pack = torch.zeros((M * N // 8 * self.num_bits, 1), dtype=torch.uint8, device=device)
# data format example:
# [1, 0, 3, 2] or [01, 00, 11, 10] -> [10110001], LIFO
for i in range(8 // self.num_bits):
qweight[:, i] = qweight[:, i] << i * self.num_bits
qweight_pack[:, 0] |= qweight[:, i]
return qweight_pack, weight_max, weight.shape
def dequantize_block(self, qweight, weight_max, weight_shape):
# unpack weight
device = qweight.device
weight = torch.zeros((qweight.shape[0], 8 // self.num_bits), dtype=torch.float32, device=device)
for i in range(8 // self.num_bits):
lookup_table_idx = qweight.to(torch.long) % 2**self.num_bits # get the most right 2 bits
lookup_table_idx = lookup_table_idx.to(torch.long)
weight[:, i] = self.norm_lookup_table[lookup_table_idx].squeeze()
qweight = qweight >> self.num_bits # right shift 2 bits of the original data
weight_block = weight.reshape(-1, self.block_size)
weight = weight_block * weight_max
weight = weight.reshape(weight_shape)
return weight
def _low_rank_decomposition(weight, reduced_rank=32):
"""
:param weight: The matrix to decompose, of shape (H, W) :param reduced_rank: the final rank :return:
"""
matrix_dimension = len(weight.size())
if matrix_dimension != 2:
raise ValueError(f"Only support 2D matrix, but your input has {matrix_dimension} dimensions.")
# Use SVD to decompose a matrix, default full_matrices is False to save parameters
U, S, Vh = torch.linalg.svd(weight, full_matrices=False)
L = U @ (torch.sqrt(torch.diag(S)[:, 0:reduced_rank]))
R = torch.sqrt(torch.diag(S)[0:reduced_rank, :]) @ Vh
return {"L": L, "R": R, "U": U, "S": S, "Vh": Vh, "reduced_rank": reduced_rank}
@torch.no_grad()
def loftq_init(weight: Union[torch.Tensor, torch.nn.Parameter], num_bits: int, reduced_rank: int, num_iter=1):
if is_bnb_available():
import bitsandbytes as bnb
else:
raise ValueError("bitsandbytes is not available, please install it to use LoftQ.")
if num_bits not in [2, 4, 8]:
raise ValueError("Only support 2, 4, 8 bits quantization")
if num_iter <= 0:
raise ValueError("Number of iterations must be greater than 0")
out_feature, in_feature = weight.size()
device = weight.device
dtype = weight.dtype
logging.info(
f"Weight: ({out_feature}, {in_feature}) | Rank: {reduced_rank} "
f"| Num Iter: {num_iter} | Num Bits: {num_bits}"
)
if not is_bnb_4bit_available() or num_bits in [2, 8]:
quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64)
compute_device = device
else:
compute_device = "cuda"
weight = weight.to(device=compute_device, dtype=torch.float32)
res = weight.clone()
for i in range(num_iter):
torch.cuda.empty_cache()
# Quantization
if num_bits == 4 and is_bnb_4bit_available():
qweight = bnb.nn.Params4bit(
res.to("cpu"), requires_grad=False, compress_statistics=False, quant_type="nf4"
).to(compute_device)
dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state)
else:
quantized_weight, max_abs, shape = quantizer.quantize_block(res)
dequantized_weight = quantizer.dequantize_block(quantized_weight, max_abs, shape)
res = weight - dequantized_weight
# Decompose the residual by SVD
output = _low_rank_decomposition(res, reduced_rank=reduced_rank)
L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"]
res = weight - torch.mm(L, R)
lora_A, lora_B = R, L
return dequantized_weight.to(device=device, dtype=dtype), lora_A, lora_B
@torch.no_grad()
def _loftq_init_new(qweight, weight, num_bits: int, reduced_rank: int):
import bitsandbytes as bnb
if num_bits != 4:
raise ValueError("Only 4 bit quantization supported at the moment.")
if not is_bnb_4bit_available():
raise ValueError("bitsandbytes 4bit quantization is not available.")
compute_device = "cuda"
dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state)
weight = weight.to(device=compute_device, dtype=torch.float32)
residual = weight - dequantized_weight
torch.cuda.empty_cache()
# Decompose the residualidual by SVD
output = _low_rank_decomposition(residual, reduced_rank=reduced_rank)
L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"]
return R, L
class _SafetensorLoader:
"""
Simple utility class that loads tensors with safetensors from a single file or sharded files.
Takes care of file name normalization etc.
"""
def __init__(self, peft_model, model_path):
if model_path is None:
try:
model_path = snapshot_download(peft_model.base_model.config._name_or_path, local_files_only=True)
except (AttributeError, HFValidationError) as exc:
raise ValueError(
"The provided model does not appear to be a transformers model or is a local model. In this case, "
"you must pass the model_path argument that points to the safetensors file."
) from exc
except LocalEntryNotFoundError as exc:
raise ValueError(
"The model.safetensors file must be present on disk, but it could not be found."
) from exc
suffix = "model.safetensors"
if not model_path.endswith(suffix):
model_path = os.path.join(model_path, suffix)
self.model_path = model_path
self.base_model_prefix = getattr(peft_model.get_base_model(), "base_model_prefix", None)
self.prefix = "base_model.model."
self.is_sharded = False
self.weight_map = None
if not os.path.exists(model_path):
# check if the file is sharded
par_dir = model_path.rpartition(os.path.sep)[0]
try:
resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(
par_dir, cached_file(par_dir, "model.safetensors.index.json")
)
except OSError as exc:
raise FileNotFoundError(
f"Could not find file for {model_path}, ensure that there is a (sharded) safetensors file of the model."
) from exc
self.is_sharded = True
# maps from 'model-X-of-Y.safetensors' to full file path
file_map = {k.rpartition(os.path.sep)[-1]: k for k in resolved_archive_file}
self.weight_map = {k: file_map[v] for k, v in sharded_metadata["weight_map"].items()}
def get_tensor(self, name):
if not self.is_sharded:
file_path = self.model_path
else:
file_path = self.weight_map[name]
with safe_open(file_path, framework="pt", device="cpu") as f:
try:
tensor = f.get_tensor(name)
except SafetensorError as exc:
# no matching key found, we probably need to remove the base model prefix
if self.base_model_prefix:
# remove 1 extra character for "."
name = name[len(self.base_model_prefix) + 1 :]
tensor = f.get_tensor(name)
else:
raise exc
return tensor
@torch.no_grad()
def replace_lora_weights_loftq(
peft_model,
model_path: Optional[str] = None,
adapter_name: str = "default",
callback: Optional[Callable[[torch.nn.Module, str], bool]] = None,
):
"""
Replace the LoRA weights of a model quantized with bitsandbytes, using the LoftQ technique.
The replacement is done on the fly by loading in the non-quantized weights from a locally stored safetensors model
file and initializing the LoRA weights such that the quantization error between the original and quantized weights
is minimized.
As lazy loading is not possible with pickle, normal PyTorch checkpoint files cannot be supported.
Depending on the model size, calling this function may take some time to finish.
Args:
peft_model (`PeftModel`):
The model to replace the weights of. Must be a quantized PEFT model with LoRA layers.
model_path (`Optional[str]`):
The path to the model safetensors file. If the model is a Hugging Face model, this will be inferred from
the model's config. Otherwise, it must be provided.
adapter_name (`str`):
The name of the adapter to replace the weights of. The default adapter name is "default".
callback (`Optional[Callable[[PeftModel, str], bool]]`):
A callback function that will be called after each module is replaced. The callback function should take
the model and the name of the current module as input and return a boolean indicating whether the
replacement should be kept. If the callback returns False, the replacement will be rolled back. This can be
very useful to confirm that the LoftQ initialization actually decreases the quantization error of the
model. As an example, this callback could generate logits for given input and compare it with the logits
from the original, non-quanitzed model with the same input, and only return `True` if there is an
improvement. As this is a greedy optimization, it's possible that calling this function multiple times
yields incremental improvements.
"""
if not is_bnb_4bit_available():
raise ValueError("bitsandbytes must be installed and the model must be quantized in 4bits.")
from peft.tuners.lora import Linear4bit
# model_path = _check_model_path_loftq(model_path, peft_model)
prefix = "base_model.model."
any_match = False
safetensor_loader = _SafetensorLoader(peft_model, model_path)
# if too slow, consider adding tqdm as an option
for name, module in peft_model.named_modules():
if not isinstance(module, Linear4bit):
continue
if not name.startswith(prefix):
raise TypeError("The passed model does not appear to be a valid PeftModel")
any_match = True
name = name[len(prefix) :]
tensor = safetensor_loader.get_tensor(name + ".weight")
reduced_rank = module.r[adapter_name]
lora_A, lora_B = _loftq_init_new(module.weight, tensor, num_bits=4, reduced_rank=reduced_rank)
if not callback:
module.lora_A[adapter_name].weight.data = lora_A
module.lora_B[adapter_name].weight.data = lora_B
continue
lora_A_before = module.lora_A[adapter_name].weight.data
lora_B_before = module.lora_B[adapter_name].weight.data
module.lora_A[adapter_name].weight.data = lora_A
module.lora_B[adapter_name].weight.data = lora_B
should_replace = callback(peft_model, name)
if not should_replace:
# roll back
module.lora_A[adapter_name].weight.data = lora_A_before
module.lora_B[adapter_name].weight.data = lora_B_before
del lora_A_before, lora_B_before
if not any_match:
raise ValueError("No bnb LoRA module found on the model")
|
peft/src/peft/utils/loftq_utils.py/0
|
{
"file_path": "peft/src/peft/utils/loftq_utils.py",
"repo_id": "peft",
"token_count": 7269
}
| 222
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from transformers import AutoModel
from peft import PrefixTuningConfig, PromptLearningConfig
from .testing_common import PeftCommonTester, PeftTestConfigManager
PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST = [
"hf-internal-testing/tiny-random-BertModel",
"hf-internal-testing/tiny-random-RobertaModel",
"hf-internal-testing/tiny-random-DebertaModel",
"hf-internal-testing/tiny-random-DebertaV2Model",
]
FULL_GRID = {
"model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST,
"task_type": "FEATURE_EXTRACTION",
}
def skip_non_prompt_tuning(test_list):
"""Skip tests that are not prompt tuning"""
return [
test for test in test_list if issubclass(test[2], PromptLearningConfig) and (test[2] != PrefixTuningConfig)
]
def skip_deberta_lora_tests(test_list):
r"""
Skip tests that are checkpointing with lora/ia3/boft/vera/fourierft for Deberta models (couldn't find much info on
the error)
"""
to_skip = ["lora", "ia3", "boft", "vera", "fourierft", "hra"]
return [test for test in test_list if not (any(k in test[0] for k in to_skip) and "Deberta" in test[0])]
def skip_deberta_pt_tests(test_list):
r"""
Skip tests that are checkpointing with lora/ia3 tests for Deberta models (couldn't find much info on the error)
"""
return [test for test in test_list if not ("prefix_tuning" in test[0] and "Deberta" in test[0])]
class PeftFeatureExtractionModelTester(unittest.TestCase, PeftCommonTester):
r"""
Test if the PeftModel behaves as expected. This includes:
- test if the model has the expected methods
We use parametrized.expand for debugging purposes to test each model individually.
"""
transformers_class = AutoModel
def prepare_inputs_for_testing(self):
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device)
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
input_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return input_dict
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_model_attr(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs):
self._test_adapter_name(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_prepare_for_training(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs):
self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"task_type": "FEATURE_EXTRACTION",
},
)
)
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training(self, test_name, model_id, config_cls, config_kwargs):
self._test_training(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_deberta_pt_tests)
)
def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_layer_indexing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_layer_indexing(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_deberta_lora_tests)
)
def test_training_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs):
self._test_inference_safetensors(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs):
self._test_peft_model_device_map(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"task_type": "FEATURE_EXTRACTION",
},
)
)
def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_unload_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"task_type": "FEATURE_EXTRACTION",
},
)
)
def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs):
self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_prompt_tuning)
)
def test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs):
self._test_passing_input_embeds_works(test_name, model_id, config_cls, config_kwargs)
|
peft/tests/test_feature_extraction_models.py/0
|
{
"file_path": "peft/tests/test_feature_extraction_models.py",
"repo_id": "peft",
"token_count": 3721
}
| 223
|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is not a full on test suite of vision models, since we already run many tests on dummy models with Conv2d layers
# and on stable diffusion models. Instead, this file contains specific tests for bugs that have been found in the past.
import gc
import numpy as np
import pytest
import torch
from datasets import load_dataset
from safetensors.torch import load_file
from transformers import (
AutoImageProcessor,
AutoModelForImageClassification,
AutoProcessor,
LlavaForConditionalGeneration,
)
from peft import (
HRAConfig,
LoHaConfig,
LoKrConfig,
LoraConfig,
OFTConfig,
PeftModel,
PrefixTuningConfig,
get_peft_model,
)
CONFIGS = {
"lora": LoraConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
"loha": LoHaConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
"lokr": LoKrConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
"oft": OFTConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
"hra": HRAConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"]),
# TODO: cannot use BOFT because some convolutional kernel dimensions are even (64) and others odd (147). There is no
# common denominator for the boft_block_size except 1, but using 1 results in an error in the fbd_cuda kernel:
# > Error in forward_fast_block_diag_cuda_kernel: an illegal memory access was encountered
# "boft": BOFTConfig(target_modules=["convolution"], modules_to_save=["classifier", "normalization"], boft_block_size=2),
}
# Ensure that models like Llava that pass past_key_values automatically do not fail, see #1938
class TestPastKV:
def test_past_kv(self):
model_id = "trl-internal-testing/tiny-random-LlavaForConditionalGeneration"
prompt = "USER: <image>\nWhat are these?\nASSISTANT:"
# prepare model and inputs
model = LlavaForConditionalGeneration.from_pretrained(
model_id,
low_cpu_mem_usage=True,
)
processor = AutoProcessor.from_pretrained(model_id)
raw_image = np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8)
inputs = processor(prompt, raw_image, return_tensors="pt")
# get peft model
peft_config = PrefixTuningConfig(task_type="CAUSAL_LM", num_virtual_tokens=20)
model.language_model = get_peft_model(model.language_model, peft_config)
# check that this does not raise
model(**inputs, output_hidden_states=True)
class TestResnet:
model_id = "microsoft/resnet-18"
@pytest.fixture(autouse=True)
def teardown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
@pytest.fixture(scope="class")
def image_processor(self):
image_processor = AutoImageProcessor.from_pretrained(self.model_id)
return image_processor
@pytest.fixture(scope="class")
def data(self, image_processor):
dataset = load_dataset("huggingface/cats-image", trust_remote_code=True)
image = dataset["test"]["image"][0]
return image_processor(image, return_tensors="pt")
@pytest.mark.parametrize("config", CONFIGS.values(), ids=CONFIGS.keys())
def test_model_with_batchnorm_reproducibility(self, config, tmp_path, data):
# see 1732
torch.manual_seed(0)
model = AutoModelForImageClassification.from_pretrained(self.model_id)
model = get_peft_model(model, config)
# record outputs before training
model.eval()
with torch.inference_mode():
output_before = model(**data)
model.train()
# train the model
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3)
batch_size = 4
max_steps = 5 * batch_size
labels = torch.zeros(1, 1000)
labels[0, 283] = 1
for i in range(0, max_steps, batch_size):
optimizer.zero_grad()
outputs = model(**data, labels=labels)
loss = outputs.loss
loss.backward()
optimizer.step()
# record outputs after training
model.eval()
with torch.inference_mode():
output_after = model(**data)
assert torch.isfinite(output_after.logits).all()
atol, rtol = 1e-4, 1e-4
# sanity check: model was updated
assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol)
# check saving the model and loading it
model.save_pretrained(tmp_path)
del model
torch.manual_seed(0)
model = AutoModelForImageClassification.from_pretrained(self.model_id)
model = PeftModel.from_pretrained(model, tmp_path).eval()
with torch.inference_mode():
output_loaded = model(**data)
assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol)
# ensure that the checkpoint file contains the buffers
model_running_mean = len([k for k in model.state_dict().keys() if "running_mean" in k])
state_dict = load_file(tmp_path / "adapter_model.safetensors")
checkpoint_running_mean = len([k for k in state_dict.keys() if "running_mean" in k])
# note that the model has twice as many "running_mean", as there is one copy per ModulesToSaveWrapper, we need
# to multiply by 2 to get the same number
assert model_running_mean == checkpoint_running_mean * 2
|
peft/tests/test_vision_models.py/0
|
{
"file_path": "peft/tests/test_vision_models.py",
"repo_id": "peft",
"token_count": 2448
}
| 224
|
#!/usr/bin/env python3
""" Bulk Model Script Runner
Run validation or benchmark script in separate process for each model
Benchmark all 'vit*' models:
python bulk_runner.py --model-list 'vit*' --results-file vit_bench.csv benchmark.py --amp -b 512
Validate all models:
python bulk_runner.py --model-list all --results-file val.csv --pretrained validate.py /imagenet/validation/ --amp -b 512 --retry
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import os
import sys
import csv
import json
import subprocess
import time
from typing import Callable, List, Tuple, Union
from timm.models import is_model, list_models, get_pretrained_cfg
parser = argparse.ArgumentParser(description='Per-model process launcher')
# model and results args
parser.add_argument(
'--model-list', metavar='NAME', default='',
help='txt file based list of model names to benchmark')
parser.add_argument(
'--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument(
'--sort-key', default='', type=str, metavar='COL',
help='Specify sort key for results csv')
parser.add_argument(
"--pretrained", action='store_true',
help="only run models with pretrained weights")
parser.add_argument(
"--delay",
type=float,
default=0,
help="Interval, in seconds, to delay between model invocations.",
)
parser.add_argument(
"--start_method", type=str, default="spawn", choices=["spawn", "fork", "forkserver"],
help="Multiprocessing start method to use when creating workers.",
)
parser.add_argument(
"--no_python",
help="Skip prepending the script with 'python' - just execute it directly. Useful "
"when the script is not a Python script.",
)
parser.add_argument(
"-m",
"--module",
help="Change each process to interpret the launch script as a Python module, executing "
"with the same behavior as 'python -m'.",
)
# positional
parser.add_argument(
"script", type=str,
help="Full path to the program/script to be launched for each model config.",
)
parser.add_argument("script_args", nargs=argparse.REMAINDER)
def cmd_from_args(args) -> Tuple[Union[Callable, str], List[str]]:
# If ``args`` not passed, defaults to ``sys.argv[:1]``
with_python = not args.no_python
cmd: Union[Callable, str]
cmd_args = []
if with_python:
cmd = os.getenv("PYTHON_EXEC", sys.executable)
cmd_args.append("-u")
if args.module:
cmd_args.append("-m")
cmd_args.append(args.script)
else:
if args.module:
raise ValueError(
"Don't use both the '--no_python' flag"
" and the '--module' flag at the same time."
)
cmd = args.script
cmd_args.extend(args.script_args)
return cmd, cmd_args
def _get_model_cfgs(
model_names,
num_classes=None,
expand_train_test=False,
include_crop=True,
):
model_cfgs = []
for n in model_names:
pt_cfg = get_pretrained_cfg(n)
if num_classes is not None and getattr(pt_cfg, 'num_classes', 0) != num_classes:
continue
model_cfgs.append((n, pt_cfg.input_size[-1], pt_cfg.crop_pct))
if expand_train_test and pt_cfg.test_input_size is not None:
if pt_cfg.test_crop_pct is not None:
model_cfgs.append((n, pt_cfg.test_input_size[-1], pt_cfg.test_crop_pct))
else:
model_cfgs.append((n, pt_cfg.test_input_size[-1], pt_cfg.crop_pct))
if include_crop:
model_cfgs = [(n, {'img-size': r, 'crop-pct': cp}) for n, r, cp in sorted(model_cfgs)]
else:
model_cfgs = [(n, {'img-size': r}) for n, r, cp in sorted(model_cfgs)]
return model_cfgs
def main():
args = parser.parse_args()
cmd, cmd_args = cmd_from_args(args)
model_cfgs = []
if args.model_list == 'all':
model_names = list_models(
pretrained=args.pretrained, # only include models w/ pretrained checkpoints if set
)
model_cfgs = [(n, None) for n in model_names]
elif args.model_list == 'all_in1k':
model_names = list_models(pretrained=True)
model_cfgs = _get_model_cfgs(model_names, num_classes=1000, expand_train_test=True)
elif args.model_list == 'all_res':
model_names = list_models()
model_cfgs = _get_model_cfgs(model_names, expand_train_test=True, include_crop=False)
elif not is_model(args.model_list):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model_list)
model_cfgs = [(n, None) for n in model_names]
if not model_cfgs and os.path.exists(args.model_list):
with open(args.model_list) as f:
model_cfgs = []
model_names = [line.rstrip() for line in f]
_get_model_cfgs(
model_names,
#num_classes=1000,
expand_train_test=True,
#include_crop=False,
)
if len(model_cfgs):
results_file = args.results_file or './results.csv'
results = []
errors = []
model_strings = '\n'.join([f'{x[0]}, {x[1]}' for x in model_cfgs])
print(f"Running script on these models:\n {model_strings}")
if not args.sort_key:
if 'benchmark' in args.script:
if any(['train' in a for a in args.script_args]):
sort_key = 'train_samples_per_sec'
else:
sort_key = 'infer_samples_per_sec'
else:
sort_key = 'top1'
else:
sort_key = args.sort_key
print(f'Script: {args.script}, Args: {args.script_args}, Sort key: {sort_key}')
try:
for m, ax in model_cfgs:
if not m:
continue
args_str = (cmd, *[str(e) for e in cmd_args], '--model', m)
if ax is not None:
extra_args = [(f'--{k}', str(v)) for k, v in ax.items()]
extra_args = [i for t in extra_args for i in t]
args_str += tuple(extra_args)
try:
o = subprocess.check_output(args=args_str).decode('utf-8').split('--result')[-1]
r = json.loads(o)
results.append(r)
except Exception as e:
# FIXME batch_size retry loop is currently done in either validation.py or benchmark.py
# for further robustness (but more overhead), we may want to manage that by looping here...
errors.append(dict(model=m, error=str(e)))
if args.delay:
time.sleep(args.delay)
except KeyboardInterrupt as e:
pass
errors.extend(list(filter(lambda x: 'error' in x, results)))
if errors:
print(f'{len(errors)} models had errors during run.')
for e in errors:
if 'model' in e:
print(f"\t {e['model']} ({e.get('error', 'Unknown')})")
else:
print(e)
results = list(filter(lambda x: 'error' not in x, results))
no_sortkey = list(filter(lambda x: sort_key not in x, results))
if no_sortkey:
print(f'{len(no_sortkey)} results missing sort key, skipping sort.')
else:
results = sorted(results, key=lambda x: x[sort_key], reverse=True)
if len(results):
print(f'{len(results)} models run successfully. Saving results to {results_file}.')
write_results(results_file, results)
def write_results(results_file, results):
with open(results_file, mode='w') as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
|
pytorch-image-models/bulk_runner.py/0
|
{
"file_path": "pytorch-image-models/bulk_runner.py",
"repo_id": "pytorch-image-models",
"token_count": 3627
}
| 225
|
# CSP-DarkNet
**CSPDarknet53** is a convolutional neural network and backbone for object detection that uses [DarkNet-53](https://paperswithcode.com/method/darknet-53). It employs a CSPNet strategy to partition the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network.
This CNN is used as the backbone for [YOLOv4](https://paperswithcode.com/method/yolov4).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('cspdarknet53', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `cspdarknet53`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('cspdarknet53', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{bochkovskiy2020yolov4,
title={YOLOv4: Optimal Speed and Accuracy of Object Detection},
author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao},
year={2020},
eprint={2004.10934},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: CSP DarkNet
Paper:
Title: 'YOLOv4: Optimal Speed and Accuracy of Object Detection'
URL: https://paperswithcode.com/paper/yolov4-optimal-speed-and-accuracy-of-object
Models:
- Name: cspdarknet53
In Collection: CSP DarkNet
Metadata:
FLOPs: 8545018880
Parameters: 27640000
File Size: 110775135
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Mish
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- CutMix
- Label Smoothing
- Mosaic
- Polynomial Learning Rate Decay
- SGD with Momentum
- Self-Adversarial Training
- Weight Decay
Training Data:
- ImageNet
Training Resources: 1x NVIDIA RTX 2070 GPU
ID: cspdarknet53
LR: 0.1
Layers: 53
Crop Pct: '0.887'
Momentum: 0.9
Batch Size: 128
Image Size: '256'
Warmup Steps: 1000
Weight Decay: 0.0005
Interpolation: bilinear
Training Steps: 8000000
FPS (GPU RTX 2070): 66
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L441
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.05%
Top 5 Accuracy: 95.09%
-->
|
pytorch-image-models/hfdocs/source/models/csp-darknet.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/csp-darknet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1756
}
| 226
|
# PNASNet
**Progressive Neural Architecture Search**, or **PNAS**, is a method for learning the structure of convolutional neural networks (CNNs). It uses a sequential model-based optimization (SMBO) strategy, where we search the space of cell structures, starting with simple (shallow) models and progressing to complex ones, pruning out unpromising structures as we go.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('pnasnet5large', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `pnasnet5large`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('pnasnet5large', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{liu2018progressive,
title={Progressive Neural Architecture Search},
author={Chenxi Liu and Barret Zoph and Maxim Neumann and Jonathon Shlens and Wei Hua and Li-Jia Li and Li Fei-Fei and Alan Yuille and Jonathan Huang and Kevin Murphy},
year={2018},
eprint={1712.00559},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: PNASNet
Paper:
Title: Progressive Neural Architecture Search
URL: https://paperswithcode.com/paper/progressive-neural-architecture-search
Models:
- Name: pnasnet5large
In Collection: PNASNet
Metadata:
FLOPs: 31458865950
Parameters: 86060000
File Size: 345153926
Architecture:
- Average Pooling
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- ReLU
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 100x NVIDIA P100 GPUs
ID: pnasnet5large
LR: 0.015
Dropout: 0.5
Crop Pct: '0.911'
Momentum: 0.9
Batch Size: 1600
Image Size: '331'
Interpolation: bicubic
Label Smoothing: 0.1
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/pnasnet.py#L343
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 0.98%
Top 5 Accuracy: 18.58%
-->
|
pytorch-image-models/hfdocs/source/models/pnasnet.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/pnasnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1622
}
| 227
|
# SSL ResNet
**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification.
Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('ssl_resnet18', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `ssl_resnet18`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('ssl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1905-00546,
author = {I. Zeki Yalniz and
Herv{\'{e}} J{\'{e}}gou and
Kan Chen and
Manohar Paluri and
Dhruv Mahajan},
title = {Billion-scale semi-supervised learning for image classification},
journal = {CoRR},
volume = {abs/1905.00546},
year = {2019},
url = {http://arxiv.org/abs/1905.00546},
archivePrefix = {arXiv},
eprint = {1905.00546},
timestamp = {Mon, 28 Sep 2020 08:19:37 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: SSL ResNet
Paper:
Title: Billion-scale semi-supervised learning for image classification
URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for
Models:
- Name: ssl_resnet18
In Collection: SSL ResNet
Metadata:
FLOPs: 2337073152
Parameters: 11690000
File Size: 46811375
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- YFCC-100M
Training Resources: 64x GPUs
ID: ssl_resnet18
LR: 0.0015
Epochs: 30
Layers: 18
Crop Pct: '0.875'
Batch Size: 1536
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L894
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 72.62%
Top 5 Accuracy: 91.42%
- Name: ssl_resnet50
In Collection: SSL ResNet
Metadata:
FLOPs: 5282531328
Parameters: 25560000
File Size: 102480594
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- YFCC-100M
Training Resources: 64x GPUs
ID: ssl_resnet50
LR: 0.0015
Epochs: 30
Layers: 50
Crop Pct: '0.875'
Batch Size: 1536
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L904
Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.24%
Top 5 Accuracy: 94.83%
-->
|
pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2425
}
| 228
|
# Learning Rate Schedulers
This page contains the API reference documentation for learning rate schedulers included in `timm`.
## Schedulers
### Factory functions
[[autodoc]] timm.scheduler.scheduler_factory.create_scheduler
[[autodoc]] timm.scheduler.scheduler_factory.create_scheduler_v2
### Scheduler Classes
[[autodoc]] timm.scheduler.cosine_lr.CosineLRScheduler
[[autodoc]] timm.scheduler.multistep_lr.MultiStepLRScheduler
[[autodoc]] timm.scheduler.plateau_lr.PlateauLRScheduler
[[autodoc]] timm.scheduler.poly_lr.PolyLRScheduler
[[autodoc]] timm.scheduler.step_lr.StepLRScheduler
[[autodoc]] timm.scheduler.tanh_lr.TanhLRScheduler
|
pytorch-image-models/hfdocs/source/reference/schedulers.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/reference/schedulers.mdx",
"repo_id": "pytorch-image-models",
"token_count": 242
}
| 229
|
DEFAULT_CROP_PCT = 0.875
DEFAULT_CROP_MODE = 'center'
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)
IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)
IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255)
IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3)
OPENAI_CLIP_MEAN = (0.48145466, 0.4578275, 0.40821073)
OPENAI_CLIP_STD = (0.26862954, 0.26130258, 0.27577711)
|
pytorch-image-models/timm/data/constants.py/0
|
{
"file_path": "pytorch-image-models/timm/data/constants.py",
"repo_id": "pytorch-image-models",
"token_count": 236
}
| 230
|
""" A dataset reader that extracts images from folders
Folders are scanned recursively to find image files. Labels are based
on the folder hierarchy, just leaf folders by default.
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
from typing import Dict, List, Optional, Set, Tuple, Union
from timm.utils.misc import natural_key
from .class_map import load_class_map
from .img_extensions import get_img_extensions
from .reader import Reader
def find_images_and_targets(
folder: str,
types: Optional[Union[List, Tuple, Set]] = None,
class_to_idx: Optional[Dict] = None,
leaf_name_only: bool = True,
sort: bool = True
):
""" Walk folder recursively to discover images and map them to classes by folder names.
Args:
folder: root of folder to recrusively search
types: types (file extensions) to search for in path
class_to_idx: specify mapping for class (folder name) to class index if set
leaf_name_only: use only leaf-name of folder walk for class names
sort: re-sort found images by name (for consistent ordering)
Returns:
A list of image and target tuples, class_to_idx mapping
"""
types = get_img_extensions(as_set=True) if not types else set(types)
labels = []
filenames = []
for root, subdirs, files in os.walk(folder, topdown=False, followlinks=True):
rel_path = os.path.relpath(root, folder) if (root != folder) else ''
label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_')
for f in files:
base, ext = os.path.splitext(f)
if ext.lower() in types:
filenames.append(os.path.join(root, f))
labels.append(label)
if class_to_idx is None:
# building class index
unique_labels = set(labels)
sorted_labels = list(sorted(unique_labels, key=natural_key))
class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)}
images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx]
if sort:
images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0]))
return images_and_targets, class_to_idx
class ReaderImageFolder(Reader):
def __init__(
self,
root,
class_map='',
input_key=None,
):
super().__init__()
self.root = root
class_to_idx = None
if class_map:
class_to_idx = load_class_map(class_map, root)
find_types = None
if input_key:
find_types = input_key.split(';')
self.samples, self.class_to_idx = find_images_and_targets(
root,
class_to_idx=class_to_idx,
types=find_types,
)
if len(self.samples) == 0:
raise RuntimeError(
f'Found 0 images in subfolders of {root}. '
f'Supported image extensions are {", ".join(get_img_extensions())}')
def __getitem__(self, index):
path, target = self.samples[index]
return open(path, 'rb'), target
def __len__(self):
return len(self.samples)
def _filename(self, index, basename=False, absolute=False):
filename = self.samples[index][0]
if basename:
filename = os.path.basename(filename)
elif not absolute:
filename = os.path.relpath(filename, self.root)
return filename
|
pytorch-image-models/timm/data/readers/reader_image_folder.py/0
|
{
"file_path": "pytorch-image-models/timm/data/readers/reader_image_folder.py",
"repo_id": "pytorch-image-models",
"token_count": 1510
}
| 231
|
""" Attention Pool 2D
Implementations of 2D spatial feature pooling using multi-head attention instead of average pool.
Based on idea in CLIP by OpenAI, licensed Apache 2.0
https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py
Hacked together by / Copyright 2021 Ross Wightman
"""
from typing import Optional, Union, Tuple
import torch
import torch.nn as nn
from. config import use_fused_attn
from .helpers import to_2tuple
from .pos_embed import resample_abs_pos_embed
from .pos_embed_sincos import apply_rot_embed, RotaryEmbedding
from .weight_init import trunc_normal_
class RotAttentionPool2d(nn.Module):
""" Attention based 2D feature pooling w/ rotary (relative) pos embedding.
This is a multi-head attention based replacement for (spatial) average pooling in NN architectures.
Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed.
https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py
NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from
train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
in_features: int,
out_features: Optional[int] = None,
ref_feat_size: Union[int, Tuple[int, int]] = 7,
embed_dim: Optional[int] = None,
head_dim: Optional[int] = 64,
num_heads: Optional[int] = None,
qkv_bias: bool = True,
qkv_separate: bool = False,
pool_type: str = 'token',
class_token: bool = False,
drop_rate: float = 0.,
):
super().__init__()
assert pool_type in ('', 'token')
self.embed_dim = embed_dim = embed_dim or in_features
self.in_features = in_features
self.out_features = out_features or in_features
ref_feat_size = to_2tuple(ref_feat_size)
if num_heads is not None:
assert embed_dim % num_heads == 0
head_dim = embed_dim // num_heads
else:
assert embed_dim % head_dim == 0
num_heads = embed_dim // head_dim
self.num_heads = num_heads
self.head_dim = head_dim
self.pool_type = pool_type.lower()
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
if class_token:
self.cls_token = nn.Parameter(torch.zeros(1, embed_dim))
else:
self.cls_token = None
if qkv_separate:
self.q = nn.Linear(in_features, embed_dim, bias=qkv_bias)
self.k = nn.Linear(in_features, embed_dim, bias=qkv_bias)
self.v = nn.Linear(in_features, embed_dim, bias=qkv_bias)
self.qkv = None
else:
self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias)
self.drop = nn.Dropout(drop_rate)
self.proj = nn.Linear(embed_dim, self.out_features)
self.pos_embed = RotaryEmbedding(self.head_dim, in_pixels=False, ref_feat_shape=ref_feat_size)
def init_weights(self, zero_init_last: bool = False):
if self.qkv is None:
in_features = self.q.in_features
trunc_normal_(self.q.weight, std=in_features ** -0.5)
nn.init.zeros_(self.q.bias)
trunc_normal_(self.k.weight, std=in_features ** -0.5)
nn.init.zeros_(self.k.bias)
trunc_normal_(self.v.weight, std=in_features ** -0.5)
nn.init.zeros_(self.v.bias)
else:
in_features = self.qkv.in_features
trunc_normal_(self.qkv.weight, std=in_features ** -0.5)
nn.init.zeros_(self.qkv.bias)
def reset(self, num_classes: Optional[int] = None, pool_type: Optional[str] = None):
# NOTE: this module is being used as a head, so need compatible reset()
if pool_type is not None:
assert pool_type in ('', 'token')
self.pool_type = pool_type
if num_classes is not None:
self.proj = nn.Linear(self.in_features, num_classes) if num_classes > 0 else nn.Identity()
self.out_features = num_classes if num_classes > 0 else self.embed_dim
def _pool(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor:
if self.pool_type == 'token':
x = x[:, 0]
else:
# if not pooled, return spatial output without token
x = x[:, 1:].reshape(x.shape[0], H, W, -1).permute(0, 3, 1, 2)
return x
def forward(self, x, pre_logits: bool = False):
B, _, H, W = x.shape
N = H * W
x = x.flatten(2).transpose(1, 2)
if self.cls_token is None:
x = torch.cat([x.mean(1, keepdim=True), x], dim=1)
else:
x = torch.cat([self.cls_token.expand(x.shape[0], -1, -1), x], dim=1)
if self.qkv is None:
q = self.q(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
k = self.k(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
v = self.v(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
else:
x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = x.unbind(0)
rse, rce = self.pos_embed.get_embed((H, W))
q = torch.cat([q[:, :, :1, :], apply_rot_embed(q[:, :, 1:, :], rse, rce)], dim=2).type_as(v)
k = torch.cat([k[:, :, :1, :], apply_rot_embed(k[:, :, 1:, :], rse, rce)], dim=2).type_as(v)
if self.fused_attn:
x = nn.functional.scaled_dot_product_attention(q, k, v)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N + 1, -1)
x = self.drop(x)
if pre_logits:
x = self._pool(x, H, W)
return x
x = self.proj(x)
x = self._pool(x, H, W)
return x
class AttentionPool2d(nn.Module):
""" Attention based 2D feature pooling w/ learned (absolute) pos embedding.
This is a multi-head attention based replacement for (spatial) average pooling in NN architectures.
It was based on impl in CLIP by OpenAI
https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py
NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network.
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
in_features: int,
feat_size: Union[int, Tuple[int, int]] = 7,
out_features: Optional[int] = None,
embed_dim: Optional[int] = None,
head_dim: Optional[int] = 64,
num_heads: Optional[int] = None,
qkv_bias: bool = True,
qkv_separate: bool = False,
pool_type: str = 'token',
class_token: bool = False,
drop_rate: float = 0.,
):
super().__init__()
assert pool_type in ('', 'token')
self.embed_dim = embed_dim = embed_dim or in_features
self.in_features = in_features
self.out_features = out_features or in_features
if num_heads is not None:
assert embed_dim % num_heads == 0
head_dim = embed_dim // num_heads
else:
assert embed_dim % head_dim == 0
num_heads = embed_dim // head_dim
self.feat_size = to_2tuple(feat_size)
self.seq_len = self.feat_size[0] * self.feat_size[1]
self.num_heads = num_heads
self.head_dim = head_dim
self.pool_type = pool_type
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
if class_token:
self.cls_token = nn.Parameter(torch.zeros(1, embed_dim))
else:
self.cls_token = None
if qkv_separate:
self.q = nn.Linear(in_features, embed_dim, bias=qkv_bias)
self.k = nn.Linear(in_features, embed_dim, bias=qkv_bias)
self.v = nn.Linear(in_features, embed_dim, bias=qkv_bias)
self.qkv = None
else:
self.q = self.k = self.v = None
self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias)
self.drop = nn.Dropout(drop_rate)
self.proj = nn.Linear(embed_dim, self.out_features)
self.pos_embed = nn.Parameter(torch.zeros(self.seq_len + 1, in_features))
self.init_weights()
def init_weights(self, zero_init_last: bool = False):
if self.qkv is None:
in_features = self.q.in_features
trunc_normal_(self.q.weight, std=in_features ** -0.5)
nn.init.zeros_(self.q.bias)
trunc_normal_(self.k.weight, std=in_features ** -0.5)
nn.init.zeros_(self.k.bias)
trunc_normal_(self.v.weight, std=in_features ** -0.5)
nn.init.zeros_(self.v.bias)
else:
in_features = self.qkv.in_features
trunc_normal_(self.qkv.weight, std=in_features ** -0.5)
nn.init.zeros_(self.qkv.bias)
trunc_normal_(self.pos_embed, std=in_features ** -0.5)
def reset(self, num_classes: Optional[int] = None, pool_type: Optional[str] = None):
# NOTE: this module is being used as a head, so need compatible reset()
if pool_type is not None:
assert pool_type in ('', 'token')
self.pool_type = pool_type
if num_classes is not None:
self.proj = nn.Linear(self.in_features, num_classes) if num_classes > 0 else nn.Identity()
self.out_features = num_classes if num_classes > 0 else self.embed_dim
def _pool(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor:
if self.pool_type == 'token':
x = x[:, 0]
else:
# if not pooled, return spatial output without token
x = x[:, 1:].reshape(x.shape[0], H, W, -1).permute(0, 3, 1, 2)
return x
def forward(self, x, pre_logits: bool = False):
B, _, H, W = x.shape
N = H * W
x = x.flatten(2).transpose(1, 2)
if self.cls_token is None:
x = torch.cat([x.mean(1, keepdim=True), x], dim=1)
else:
x = torch.cat([self.cls_token.expand(x.shape[0], -1, -1), x], dim=1)
pos_embed = resample_abs_pos_embed(self.pos_embed.unsqueeze(0), (H, W), num_prefix_tokens=1)
x = x + pos_embed
if self.qkv is None:
q = self.q(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
k = self.k(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
v = self.v(x).reshape(B, N + 1, self.num_heads, self.head_dim).transpose(1, 2)
else:
x = self.qkv(x).reshape(B, -1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = x.unbind(0)
if self.fused_attn:
x = nn.functional.scaled_dot_product_attention(q, k, v)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N + 1, -1)
x = self.drop(x)
if pre_logits:
x = self._pool(x, H, W)
return x
x = self.proj(x)
x = self._pool(x, H, W)
return x
|
pytorch-image-models/timm/layers/attention_pool2d.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/attention_pool2d.py",
"repo_id": "pytorch-image-models",
"token_count": 5737
}
| 232
|
""" EvoNorm in PyTorch
Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967
@inproceedings{NEURIPS2020,
author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin},
pages = {13539--13550},
publisher = {Curran Associates, Inc.},
title = {Evolving Normalization-Activation Layers},
url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf},
volume = {33},
year = {2020}
}
An attempt at getting decent performing EvoNorms running in PyTorch.
While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm
in terms of memory usage and throughput on GPUs.
I'm testing these modules on TPU w/ PyTorch XLA. Promising start but
currently working around some issues with builtin torch/tensor.var/std. Unlike
GPU, similar train speeds for EvoNormS variants and BatchNorm.
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import Sequence, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from .create_act import create_act_layer
from .trace_utils import _assert
def instance_std(x, eps: float = 1e-5):
std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype)
return std.expand(x.shape)
def instance_std_tpu(x, eps: float = 1e-5):
std = manual_var(x, dim=(2, 3)).add(eps).sqrt()
return std.expand(x.shape)
# instance_std = instance_std_tpu
def instance_rms(x, eps: float = 1e-5):
rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype)
return rms.expand(x.shape)
def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False):
xm = x.mean(dim=dim, keepdim=True)
if diff_sqm:
# difference of squared mean and mean squared, faster on TPU can be less stable
var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0)
else:
var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True)
return var
def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False):
B, C, H, W = x.shape
x_dtype = x.dtype
_assert(C % groups == 0, '')
if flatten:
x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues
std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype)
else:
x = x.reshape(B, groups, C // groups, H, W)
std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype)
return std.expand(x.shape).reshape(B, C, H, W)
def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False):
# This is a workaround for some stability / odd behaviour of .var and .std
# running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results
B, C, H, W = x.shape
_assert(C % groups == 0, '')
if flatten:
x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues
var = manual_var(x, dim=-1, diff_sqm=diff_sqm)
else:
x = x.reshape(B, groups, C // groups, H, W)
var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm)
return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W)
#group_std = group_std_tpu # FIXME TPU temporary
def group_rms(x, groups: int = 32, eps: float = 1e-5):
B, C, H, W = x.shape
_assert(C % groups == 0, '')
x_dtype = x.dtype
x = x.reshape(B, groups, C // groups, H, W)
rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype)
return rms.expand(x.shape).reshape(B, C, H, W)
class EvoNorm2dB0(nn.Module):
def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_):
super().__init__()
self.apply_act = apply_act # apply activation (non-linearity)
self.momentum = momentum
self.eps = eps
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
if self.v is not None:
nn.init.ones_(self.v)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
if self.v is not None:
if self.training:
var = x.float().var(dim=(0, 2, 3), unbiased=False)
# var = manual_var(x, dim=(0, 2, 3)).squeeze()
n = x.numel() / x.shape[1]
self.running_var.copy_(
self.running_var * (1 - self.momentum) +
var.detach() * self.momentum * (n / (n - 1)))
else:
var = self.running_var
left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x)
v = self.v.to(x_dtype).view(v_shape)
right = x * v + instance_std(x, self.eps)
x = x / left.max(right)
return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape)
class EvoNorm2dB1(nn.Module):
def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_):
super().__init__()
self.apply_act = apply_act # apply activation (non-linearity)
self.momentum = momentum
self.eps = eps
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
if self.apply_act:
if self.training:
var = x.float().var(dim=(0, 2, 3), unbiased=False)
n = x.numel() / x.shape[1]
self.running_var.copy_(
self.running_var * (1 - self.momentum) +
var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1)))
else:
var = self.running_var
var = var.to(x_dtype).view(v_shape)
left = var.add(self.eps).sqrt_()
right = (x + 1) * instance_rms(x, self.eps)
x = x / left.max(right)
return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
class EvoNorm2dB2(nn.Module):
def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_):
super().__init__()
self.apply_act = apply_act # apply activation (non-linearity)
self.momentum = momentum
self.eps = eps
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
if self.apply_act:
if self.training:
var = x.float().var(dim=(0, 2, 3), unbiased=False)
n = x.numel() / x.shape[1]
self.running_var.copy_(
self.running_var * (1 - self.momentum) +
var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1)))
else:
var = self.running_var
var = var.to(x_dtype).view(v_shape)
left = var.add(self.eps).sqrt_()
right = instance_rms(x, self.eps) - x
x = x / left.max(right)
return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
class EvoNorm2dS0(nn.Module):
def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_):
super().__init__()
self.apply_act = apply_act # apply activation (non-linearity)
if group_size:
assert num_features % group_size == 0
self.groups = num_features // group_size
else:
self.groups = groups
self.eps = eps
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
if self.v is not None:
nn.init.ones_(self.v)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
if self.v is not None:
v = self.v.view(v_shape).to(x_dtype)
x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps)
return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
class EvoNorm2dS0a(EvoNorm2dS0):
def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_):
super().__init__(
num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
d = group_std(x, self.groups, self.eps)
if self.v is not None:
v = self.v.view(v_shape).to(x_dtype)
x = x * (x * v).sigmoid()
x = x / d
return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
class EvoNorm2dS1(nn.Module):
def __init__(
self, num_features, groups=32, group_size=None,
apply_act=True, act_layer=None, eps=1e-5, **_):
super().__init__()
act_layer = act_layer or nn.SiLU
self.apply_act = apply_act # apply activation (non-linearity)
if act_layer is not None and apply_act:
self.act = create_act_layer(act_layer)
else:
self.act = nn.Identity()
if group_size:
assert num_features % group_size == 0
self.groups = num_features // group_size
else:
self.groups = groups
self.eps = eps
self.pre_act_norm = False
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
if self.apply_act:
x = self.act(x) / group_std(x, self.groups, self.eps)
return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
class EvoNorm2dS1a(EvoNorm2dS1):
def __init__(
self, num_features, groups=32, group_size=None,
apply_act=True, act_layer=None, eps=1e-3, **_):
super().__init__(
num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
x = self.act(x) / group_std(x, self.groups, self.eps)
return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
class EvoNorm2dS2(nn.Module):
def __init__(
self, num_features, groups=32, group_size=None,
apply_act=True, act_layer=None, eps=1e-5, **_):
super().__init__()
act_layer = act_layer or nn.SiLU
self.apply_act = apply_act # apply activation (non-linearity)
if act_layer is not None and apply_act:
self.act = create_act_layer(act_layer)
else:
self.act = nn.Identity()
if group_size:
assert num_features % group_size == 0
self.groups = num_features // group_size
else:
self.groups = groups
self.eps = eps
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
if self.apply_act:
x = self.act(x) / group_rms(x, self.groups, self.eps)
return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
class EvoNorm2dS2a(EvoNorm2dS2):
def __init__(
self, num_features, groups=32, group_size=None,
apply_act=True, act_layer=None, eps=1e-3, **_):
super().__init__(
num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps)
def forward(self, x):
_assert(x.dim() == 4, 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, -1, 1, 1)
x = self.act(x) / group_rms(x, self.groups, self.eps)
return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
|
pytorch-image-models/timm/layers/evo_norm.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/evo_norm.py",
"repo_id": "pytorch-image-models",
"token_count": 6684
}
| 233
|
""" Median Pool
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch.nn as nn
import torch.nn.functional as F
from .helpers import to_2tuple, to_4tuple
class MedianPool2d(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
super(MedianPool2d, self).__init__()
self.k = to_2tuple(kernel_size)
self.stride = to_2tuple(stride)
self.padding = to_4tuple(padding) # convert to l, r, t, b
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - (ih % self.stride[0]), 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - (iw % self.stride[1]), 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = (pl, pr, pt, pb)
else:
padding = self.padding
return padding
def forward(self, x):
x = F.pad(x, self._padding(x), mode='reflect')
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
return x
|
pytorch-image-models/timm/layers/median_pool.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/median_pool.py",
"repo_id": "pytorch-image-models",
"token_count": 883
}
| 234
|
import torch
import torch.nn as nn
class SpaceToDepth(nn.Module):
bs: torch.jit.Final[int]
def __init__(self, block_size=4):
super().__init__()
assert block_size == 4
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)
x = x.view(N, C * self.bs * self.bs, H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs)
return x
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs)
x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs)
return x
|
pytorch-image-models/timm/layers/space_to_depth.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/space_to_depth.py",
"repo_id": "pytorch-image-models",
"token_count": 568
}
| 235
|
""" EfficientNet, MobileNetV3, etc Blocks
Hacked together by / Copyright 2019, Ross Wightman
"""
from typing import Callable, Dict, Optional, Type
import torch
import torch.nn as nn
from torch.nn import functional as F
from timm.layers import create_conv2d, DropPath, make_divisible, create_act_layer, create_aa, to_2tuple, LayerType,\
ConvNormAct, get_norm_act_layer, MultiQueryAttention2d, Attention2d
__all__ = [
'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual',
'UniversalInvertedResidual', 'MobileAttention'
]
ModuleType = Type[nn.Module]
def num_groups(group_size: Optional[int], channels: int):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class SqueezeExcite(nn.Module):
""" Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family
Args:
in_chs (int): input channels to layer
rd_ratio (float): ratio of squeeze reduction
act_layer (nn.Module): activation layer of containing block
gate_layer (Callable): attention gate function
force_act_layer (nn.Module): override block's activation fn if this is set/bound
rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs
"""
def __init__(
self,
in_chs: int,
rd_ratio: float = 0.25,
rd_channels: Optional[int] = None,
act_layer: LayerType = nn.ReLU,
gate_layer: LayerType = nn.Sigmoid,
force_act_layer: Optional[LayerType] = None,
rd_round_fn: Optional[Callable] = None,
):
super(SqueezeExcite, self).__init__()
if rd_channels is None:
rd_round_fn = rd_round_fn or round
rd_channels = rd_round_fn(in_chs * rd_ratio)
act_layer = force_act_layer or act_layer
self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True)
self.act1 = create_act_layer(act_layer, inplace=True)
self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
return x * self.gate(x_se)
class ConvBnAct(nn.Module):
""" Conv + Norm Layer + Activation w/ optional skip connection
"""
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
group_size: int = 0,
pad_type: str = '',
skip: bool = False,
act_layer: LayerType = nn.ReLU,
norm_layer: LayerType = nn.BatchNorm2d,
aa_layer: Optional[LayerType] = None,
drop_path_rate: float = 0.,
):
super(ConvBnAct, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
groups = num_groups(group_size, in_chs)
self.has_skip = skip and stride == 1 and in_chs == out_chs
use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation
self.conv = create_conv2d(
in_chs, out_chs, kernel_size,
stride=1 if use_aa else stride,
dilation=dilation, groups=groups, padding=pad_type)
self.bn1 = norm_act_layer(out_chs, inplace=True)
self.aa = create_aa(aa_layer, channels=out_chs, stride=stride, enable=use_aa)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # output of conv after act, same as block coutput
return dict(module='bn1', hook_type='forward', num_chs=self.conv.out_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv.out_channels)
def forward(self, x):
shortcut = x
x = self.conv(x)
x = self.bn1(x)
x = self.aa(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class DepthwiseSeparableConv(nn.Module):
""" Depthwise-separable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion
(factor of 1.0). This is an alternative to having a IR with an optional first pw conv.
"""
def __init__(
self,
in_chs: int,
out_chs: int,
dw_kernel_size: int = 3,
stride: int = 1,
dilation: int = 1,
group_size: int = 1,
pad_type: str = '',
noskip: bool = False,
pw_kernel_size: int = 1,
pw_act: bool = False,
s2d: int = 0,
act_layer: LayerType = nn.ReLU,
norm_layer: LayerType = nn.BatchNorm2d,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[ModuleType] = None,
drop_path_rate: float = 0.,
):
super(DepthwiseSeparableConv, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
self.has_skip = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation
# Space to depth
if s2d == 1:
sd_chs = int(in_chs * 4)
self.conv_s2d = create_conv2d(in_chs, sd_chs, kernel_size=2, stride=2, padding='same')
self.bn_s2d = norm_act_layer(sd_chs, sd_chs)
dw_kernel_size = (dw_kernel_size + 1) // 2
dw_pad_type = 'same' if dw_kernel_size == 2 else pad_type
in_chs = sd_chs
use_aa = False # disable AA
else:
self.conv_s2d = None
self.bn_s2d = None
dw_pad_type = pad_type
groups = num_groups(group_size, in_chs)
self.conv_dw = create_conv2d(
in_chs, in_chs, dw_kernel_size,
stride=1 if use_aa else stride,
dilation=dilation, padding=dw_pad_type, groups=groups)
self.bn1 = norm_act_layer(in_chs, inplace=True)
self.aa = create_aa(aa_layer, channels=out_chs, stride=stride, enable=use_aa)
# Squeeze-and-excitation
self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity()
self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_act_layer(out_chs, inplace=True, apply_act=self.has_pw_act)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PW
return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv_pw.out_channels)
def forward(self, x):
shortcut = x
if self.conv_s2d is not None:
x = self.conv_s2d(x)
x = self.bn_s2d(x)
x = self.conv_dw(x)
x = self.bn1(x)
x = self.aa(x)
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE
Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often
referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in
* MNasNet - https://arxiv.org/abs/1807.11626
* EfficientNet - https://arxiv.org/abs/1905.11946
* MobileNet-V3 - https://arxiv.org/abs/1905.02244
"""
def __init__(
self,
in_chs: int,
out_chs: int,
dw_kernel_size: int = 3,
stride: int = 1,
dilation: int = 1,
group_size: int = 1,
pad_type: str = '',
noskip: bool = False,
exp_ratio: float = 1.0,
exp_kernel_size: int = 1,
pw_kernel_size: int = 1,
s2d: int = 0,
act_layer: LayerType = nn.ReLU,
norm_layer: LayerType = nn.BatchNorm2d,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[ModuleType] = None,
conv_kwargs: Optional[Dict] = None,
drop_path_rate: float = 0.,
):
super(InvertedResidual, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
conv_kwargs = conv_kwargs or {}
self.has_skip = (in_chs == out_chs and stride == 1) and not noskip
use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation
# Space to depth
if s2d == 1:
sd_chs = int(in_chs * 4)
self.conv_s2d = create_conv2d(in_chs, sd_chs, kernel_size=2, stride=2, padding='same')
self.bn_s2d = norm_act_layer(sd_chs, sd_chs)
dw_kernel_size = (dw_kernel_size + 1) // 2
dw_pad_type = 'same' if dw_kernel_size == 2 else pad_type
in_chs = sd_chs
use_aa = False # disable AA
else:
self.conv_s2d = None
self.bn_s2d = None
dw_pad_type = pad_type
mid_chs = make_divisible(in_chs * exp_ratio)
groups = num_groups(group_size, mid_chs)
# Point-wise expansion
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_act_layer(mid_chs, inplace=True)
# Depth-wise convolution
self.conv_dw = create_conv2d(
mid_chs, mid_chs, dw_kernel_size,
stride=1 if use_aa else stride,
dilation=dilation, groups=groups, padding=dw_pad_type, **conv_kwargs)
self.bn2 = norm_act_layer(mid_chs, inplace=True)
self.aa = create_aa(aa_layer, channels=mid_chs, stride=stride, enable=use_aa)
# Squeeze-and-excitation
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_act_layer(out_chs, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PWL
return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv_pwl.out_channels)
def forward(self, x):
shortcut = x
if self.conv_s2d is not None:
x = self.conv_s2d(x)
x = self.bn_s2d(x)
x = self.conv_pw(x)
x = self.bn1(x)
x = self.conv_dw(x)
x = self.bn2(x)
x = self.aa(x)
x = self.se(x)
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class LayerScale2d(nn.Module):
def __init__(self, dim: int, init_values: float = 1e-5, inplace: bool = False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class UniversalInvertedResidual(nn.Module):
""" Universal Inverted Residual Block (aka Universal Inverted Bottleneck, UIB)
For MobileNetV4 - https://arxiv.org/abs/, referenced from
https://github.com/tensorflow/models/blob/d93c7e932de27522b2fa3b115f58d06d6f640537/official/vision/modeling/layers/nn_blocks.py#L778
"""
def __init__(
self,
in_chs: int,
out_chs: int,
dw_kernel_size_start: int = 0,
dw_kernel_size_mid: int = 3,
dw_kernel_size_end: int = 0,
stride: int = 1,
dilation: int = 1,
group_size: int = 1,
pad_type: str = '',
noskip: bool = False,
exp_ratio: float = 1.0,
act_layer: LayerType = nn.ReLU,
norm_layer: LayerType = nn.BatchNorm2d,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[ModuleType] = None,
conv_kwargs: Optional[Dict] = None,
drop_path_rate: float = 0.,
layer_scale_init_value: Optional[float] = 1e-5,
):
super(UniversalInvertedResidual, self).__init__()
conv_kwargs = conv_kwargs or {}
self.has_skip = (in_chs == out_chs and stride == 1) and not noskip
if stride > 1:
assert dw_kernel_size_start or dw_kernel_size_mid or dw_kernel_size_end
# FIXME dilation isn't right w/ extra ks > 1 convs
if dw_kernel_size_start:
dw_start_stride = stride if not dw_kernel_size_mid else 1
dw_start_groups = num_groups(group_size, in_chs)
self.dw_start = ConvNormAct(
in_chs, in_chs, dw_kernel_size_start,
stride=dw_start_stride,
dilation=dilation, # FIXME
groups=dw_start_groups,
padding=pad_type,
apply_act=False,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
**conv_kwargs,
)
else:
self.dw_start = nn.Identity()
# Point-wise expansion
mid_chs = make_divisible(in_chs * exp_ratio)
self.pw_exp = ConvNormAct(
in_chs, mid_chs, 1,
padding=pad_type,
act_layer=act_layer,
norm_layer=norm_layer,
**conv_kwargs,
)
# Middle depth-wise convolution
if dw_kernel_size_mid:
groups = num_groups(group_size, mid_chs)
self.dw_mid = ConvNormAct(
mid_chs, mid_chs, dw_kernel_size_mid,
stride=stride,
dilation=dilation, # FIXME
groups=groups,
padding=pad_type,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
**conv_kwargs,
)
else:
# keeping mid as identity so it can be hooked more easily for features
self.dw_mid = nn.Identity()
# Squeeze-and-excitation
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
# Point-wise linear projection
self.pw_proj = ConvNormAct(
mid_chs, out_chs, 1,
padding=pad_type,
apply_act=False,
act_layer=act_layer,
norm_layer=norm_layer,
**conv_kwargs,
)
if dw_kernel_size_end:
dw_end_stride = stride if not dw_kernel_size_start and not dw_kernel_size_mid else 1
dw_end_groups = num_groups(group_size, out_chs)
if dw_end_stride > 1:
assert not aa_layer
self.dw_end = ConvNormAct(
out_chs, out_chs, dw_kernel_size_end,
stride=dw_end_stride,
dilation=dilation,
groups=dw_end_groups,
padding=pad_type,
apply_act=False,
act_layer=act_layer,
norm_layer=norm_layer,
**conv_kwargs,
)
else:
self.dw_end = nn.Identity()
if layer_scale_init_value is not None:
self.layer_scale = LayerScale2d(out_chs, layer_scale_init_value)
else:
self.layer_scale = nn.Identity()
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PWL
return dict(module='pw_proj.conv', hook_type='forward_pre', num_chs=self.pw_proj.conv.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.pw_proj.conv.out_channels)
def forward(self, x):
shortcut = x
x = self.dw_start(x)
x = self.pw_exp(x)
x = self.dw_mid(x)
x = self.se(x)
x = self.pw_proj(x)
x = self.dw_end(x)
x = self.layer_scale(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class MobileAttention(nn.Module):
""" Mobile Attention Block
For MobileNetV4 - https://arxiv.org/abs/, referenced from
https://github.com/tensorflow/models/blob/d93c7e932de27522b2fa3b115f58d06d6f640537/official/vision/modeling/layers/nn_blocks.py#L1504
"""
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dw_kernel_size: int = 3,
dilation: int = 1,
group_size: int = 1,
pad_type: str = '',
num_heads: int = 8,
key_dim: int = 64,
value_dim: int = 64,
use_multi_query: bool = False,
query_strides: int = (1, 1),
kv_stride: int = 1,
cpe_dw_kernel_size: int = 3,
noskip: bool = False,
act_layer: LayerType = nn.ReLU,
norm_layer: LayerType = nn.BatchNorm2d,
aa_layer: Optional[LayerType] = None,
drop_path_rate: float = 0.,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
layer_scale_init_value: Optional[float] = 1e-5,
use_bias: bool = False,
use_cpe: bool = False,
):
super(MobileAttention, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
self.has_skip = (stride == 1 and in_chs == out_chs) and not noskip
self.query_strides = to_2tuple(query_strides)
self.kv_stride = kv_stride
self.has_query_stride = any([s > 1 for s in self.query_strides])
# This CPE is different than the one suggested in the original paper.
# https://arxiv.org/abs/2102.10882
# 1. Rather than adding one CPE before the attention blocks, we add a CPE
# into every attention block.
# 2. We replace the expensive Conv2D by a Seperable DW Conv.
if use_cpe:
self.conv_cpe_dw = create_conv2d(
in_chs, in_chs,
kernel_size=cpe_dw_kernel_size,
dilation=dilation,
depthwise=True,
bias=True,
)
else:
self.conv_cpe_dw = None
self.norm = norm_act_layer(in_chs, apply_act=False)
if num_heads is None:
assert in_chs % key_dim == 0
num_heads = in_chs // key_dim
if use_multi_query:
self.attn = MultiQueryAttention2d(
in_chs,
dim_out=out_chs,
num_heads=num_heads,
key_dim=key_dim,
value_dim=value_dim,
query_strides=query_strides,
kv_stride=kv_stride,
dilation=dilation,
padding=pad_type,
dw_kernel_size=dw_kernel_size,
attn_drop=attn_drop,
proj_drop=proj_drop,
#bias=use_bias, # why not here if used w/ mhsa?
)
else:
self.attn = Attention2d(
in_chs,
dim_out=out_chs,
num_heads=num_heads,
attn_drop=attn_drop,
proj_drop=proj_drop,
bias=use_bias,
)
if layer_scale_init_value is not None:
self.layer_scale = LayerScale2d(out_chs, layer_scale_init_value)
else:
self.layer_scale = nn.Identity()
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PW
return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv_pw.out_channels)
def forward(self, x):
if self.conv_cpe_dw is not None:
x_cpe = self.conv_cpe_dw(x)
x = x + x_cpe
shortcut = x
x = self.norm(x)
x = self.attn(x)
x = self.layer_scale(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class CondConvResidual(InvertedResidual):
""" Inverted residual block w/ CondConv routing"""
def __init__(
self,
in_chs: int,
out_chs: int,
dw_kernel_size: int = 3,
stride: int = 1,
dilation: int = 1,
group_size: int = 1,
pad_type: str = '',
noskip: bool = False,
exp_ratio: float = 1.0,
exp_kernel_size: int = 1,
pw_kernel_size: int = 1,
act_layer: LayerType = nn.ReLU,
norm_layer: LayerType = nn.BatchNorm2d,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[ModuleType] = None,
num_experts: int = 0,
drop_path_rate: float = 0.,
):
self.num_experts = num_experts
conv_kwargs = dict(num_experts=self.num_experts)
super(CondConvResidual, self).__init__(
in_chs,
out_chs,
dw_kernel_size=dw_kernel_size,
stride=stride,
dilation=dilation,
group_size=group_size,
pad_type=pad_type,
noskip=noskip,
exp_ratio=exp_ratio,
exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
se_layer=se_layer,
conv_kwargs=conv_kwargs,
drop_path_rate=drop_path_rate,
)
self.routing_fn = nn.Linear(in_chs, self.num_experts)
def forward(self, x):
shortcut = x
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) # CondConv routing
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
x = self.conv_pw(x, routing_weights)
x = self.bn1(x)
x = self.conv_dw(x, routing_weights)
x = self.bn2(x)
x = self.se(x)
x = self.conv_pwl(x, routing_weights)
x = self.bn3(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class EdgeResidual(nn.Module):
""" Residual block with expansion convolution followed by pointwise-linear w/ stride
Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML`
- https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers
* MobileDet - https://arxiv.org/abs/2004.14525
* EfficientNet-X - https://arxiv.org/abs/2102.05610
* EfficientNet-V2 - https://arxiv.org/abs/2104.00298
"""
def __init__(
self,
in_chs: int,
out_chs: int,
exp_kernel_size: int = 3,
stride: int = 1,
dilation: int = 1,
group_size: int = 0,
pad_type: str = '',
force_in_chs: int = 0,
noskip: bool = False,
exp_ratio: float = 1.0,
pw_kernel_size: int = 1,
act_layer: LayerType = nn.ReLU,
norm_layer: LayerType = nn.BatchNorm2d,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[ModuleType] = None,
drop_path_rate: float = 0.,
):
super(EdgeResidual, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
if force_in_chs > 0:
mid_chs = make_divisible(force_in_chs * exp_ratio)
else:
mid_chs = make_divisible(in_chs * exp_ratio)
groups = num_groups(group_size, mid_chs) # NOTE: Using out_chs of conv_exp for groups calc
self.has_skip = (in_chs == out_chs and stride == 1) and not noskip
use_aa = aa_layer is not None and stride > 1 # FIXME handle dilation
# Expansion convolution
self.conv_exp = create_conv2d(
in_chs, mid_chs, exp_kernel_size,
stride=1 if use_aa else stride,
dilation=dilation, groups=groups, padding=pad_type)
self.bn1 = norm_act_layer(mid_chs, inplace=True)
self.aa = create_aa(aa_layer, channels=mid_chs, stride=stride, enable=use_aa)
# Squeeze-and-excitation
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_act_layer(out_chs, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, before PWL
return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv_pwl.out_channels)
def forward(self, x):
shortcut = x
x = self.conv_exp(x)
x = self.bn1(x)
x = self.aa(x)
x = self.se(x)
x = self.conv_pwl(x)
x = self.bn2(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
|
pytorch-image-models/timm/models/_efficientnet_blocks.py/0
|
{
"file_path": "pytorch-image-models/timm/models/_efficientnet_blocks.py",
"repo_id": "pytorch-image-models",
"token_count": 13538
}
| 236
|
""" BEiT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
Model from official source: https://github.com/microsoft/unilm/tree/master/beit
@inproceedings{beit,
title={{BEiT}: {BERT} Pre-Training of Image Transformers},
author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei},
booktitle={International Conference on Learning Representations},
year={2022},
url={https://openreview.net/forum?id=p-BhZSz59o4}
}
BEiT-v2 from https://github.com/microsoft/unilm/tree/master/beit2
@article{beitv2,
title={{BEiT v2}: Masked Image Modeling with Vector-Quantized Visual Tokenizers},
author={Zhiliang Peng and Li Dong and Hangbo Bao and Qixiang Ye and Furu Wei},
year={2022},
eprint={2208.06366},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
At this point only the 1k fine-tuned classification weights and model configs have been added,
see original source above for pre-training models and procedure.
Modifications by / Copyright 2021 Ross Wightman, original copyrights below
"""
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, SwiGLU, LayerNorm, DropPath, trunc_normal_, use_fused_attn
from timm.layers import resample_patch_embed, resample_abs_pos_embed, resize_rel_pos_bias_table, ndgrid
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._registry import generate_default_cfgs, register_model
__all__ = ['Beit']
def gen_relative_position_index(window_size: Tuple[int, int]) -> torch.Tensor:
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
window_area = window_size[0] * window_size[1]
coords = torch.stack(ndgrid(torch.arange(window_size[0]), torch.arange(window_size[1]))) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = num_relative_distance - 3
relative_position_index[0:, 0] = num_relative_distance - 2
relative_position_index[0, 0] = num_relative_distance - 1
return relative_position_index
class Attention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
qkv_bias_separate: bool = False,
attn_drop: float = 0.,
proj_drop: float = 0.,
window_size: Optional[Tuple[int, int]] = None,
attn_head_dim: Optional[int] = None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv_bias_separate = qkv_bias_separate
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False)
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.k_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
self.register_buffer("relative_position_index", gen_relative_position_index(window_size), persistent=False)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def _get_rel_pos_bias(self):
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias.unsqueeze(0)
def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
B, N, C = x.shape
if self.q_bias is None:
qkv = self.qkv(x)
else:
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias))
if self.qkv_bias_separate:
qkv = self.qkv(x)
qkv += qkv_bias
else:
qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim
if self.fused_attn:
rel_pos_bias = None
if self.relative_position_bias_table is not None:
rel_pos_bias = self._get_rel_pos_bias()
if shared_rel_pos_bias is not None:
rel_pos_bias = rel_pos_bias + shared_rel_pos_bias
elif shared_rel_pos_bias is not None:
rel_pos_bias = shared_rel_pos_bias
x = F.scaled_dot_product_attention(
q, k, v,
attn_mask=rel_pos_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
attn = attn + self._get_rel_pos_bias()
if shared_rel_pos_bias is not None:
attn = attn + shared_rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
qkv_bias: bool = False,
mlp_ratio: float = 4.,
scale_mlp: bool = False,
swiglu_mlp: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
init_values: Optional[float] = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
window_size: Optional[Tuple[int, int]] = None,
attn_head_dim: Optional[int] = None,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
window_size=window_size,
attn_head_dim=attn_head_dim,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
if swiglu_mlp:
self.mlp = SwiGLU(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
else:
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if init_values:
self.gamma_1 = nn.Parameter(init_values * torch.ones(dim))
self.gamma_2 = nn.Parameter(init_values * torch.ones(dim))
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
if self.gamma_1 is None:
x = x + self.drop_path1(self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias))
x = x + self.drop_path2(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias))
x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.window_area = window_size[0] * window_size[1]
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads))
# trunc_normal_(self.relative_position_bias_table, std=.02)
self.register_buffer("relative_position_index", gen_relative_position_index(window_size))
def forward(self):
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_area + 1, self.window_area + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class Beit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 16,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
qkv_bias: bool = True,
mlp_ratio: float = 4.,
swiglu_mlp: bool = False,
scale_mlp: bool = False,
drop_rate: float = 0.,
pos_drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
norm_layer: Callable = LayerNorm,
init_values: Optional[float] = None,
use_abs_pos_emb: bool = True,
use_rel_pos_bias: bool = False,
use_shared_rel_pos_bias: bool = False,
head_init_scale: float = 0.001,
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models
self.num_prefix_tokens = 1
self.grad_checkpointing = False
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if use_abs_pos_emb else None
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(
window_size=self.patch_embed.grid_size,
num_heads=num_heads,
)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
mlp_ratio=mlp_ratio,
scale_mlp=scale_mlp,
swiglu_mlp=swiglu_mlp,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values,
window_size=self.patch_embed.grid_size if use_rel_pos_bias else None,
)
for i in range(depth)])
self.feature_info = [
dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)]
use_fc_norm = self.global_pool == 'avg'
self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.fix_init_weight()
if isinstance(self.head, nn.Linear):
trunc_normal_(self.head.weight, std=.02)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
nwd = {'pos_embed', 'cls_token'}
for n, _ in self.named_parameters():
if 'relative_position_bias_table' in n:
nwd.add(n)
return nwd
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^cls_token|pos_embed|patch_embed|rel_pos_bias', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))],
)
return matcher
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
return_prefix_tokens: bool = False,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if an int, if is a sequence, select by matching indices
return_prefix_tokens: Return both prefix and spatial intermediate tokens
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.'
reshape = output_fmt == 'NCHW'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
# forward pass
B, _, height, width = x.shape
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index + 1]
for i, blk in enumerate(blocks):
x = blk(x, shared_rel_pos_bias=rel_pos_bias)
if i in take_indices:
# normalize intermediates with final norm layer if enabled
intermediates.append(self.norm(x) if norm else x)
# process intermediates
if self.num_prefix_tokens:
# split prefix (e.g. class, distill) and spatial feature tokens
prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates]
intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates]
if reshape:
# reshape to BCHW output format
H, W = self.patch_embed.dynamic_feat_size((height, width))
intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates]
if not torch.jit.is_scripting() and return_prefix_tokens:
# return_prefix not support in torchscript due to poor type handling
intermediates = list(zip(intermediates, prefix_tokens))
if intermediates_only:
return intermediates
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
self.blocks = self.blocks[:max_index + 1] # truncate blocks
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.fc_norm = nn.Identity()
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias)
else:
x = blk(x, shared_rel_pos_bias=rel_pos_bias)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'beit_base_patch16_224.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth',
hf_hub_id='timm/'),
'beit_base_patch16_384.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_384_pt22k_ft22kto1k.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0,
),
'beit_base_patch16_224.in22k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth',
hf_hub_id='timm/',
num_classes=21841,
),
'beit_large_patch16_224.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22kto1k.pth',
hf_hub_id='timm/'),
'beit_large_patch16_384.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_384_pt22k_ft22kto1k.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0,
),
'beit_large_patch16_512.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_512_pt22k_ft22kto1k.pth',
hf_hub_id='timm/',
input_size=(3, 512, 512), crop_pct=1.0,
),
'beit_large_patch16_224.in22k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth',
hf_hub_id='timm/',
num_classes=21841,
),
'beitv2_base_patch16_224.in1k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21kto1k.pth',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_base_patch16_224.in1k_ft_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft1k.pth',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_base_patch16_224.in1k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21k.pth',
hf_hub_id='timm/',
num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224.in1k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21kto1k.pth',
hf_hub_id='timm/',
crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224.in1k_ft_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft1k.pth',
hf_hub_id='timm/',
crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224.in1k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth',
hf_hub_id='timm/',
num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
})
def checkpoint_filter_fn(state_dict, model, interpolation='bicubic', antialias=True):
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('module', state_dict)
# beit v2 didn't strip module
out_dict = {}
for k, v in state_dict.items():
if 'relative_position_index' in k:
continue
if 'patch_embed.proj.weight' in k:
O, I, H, W = model.patch_embed.proj.weight.shape
if v.shape[-1] != W or v.shape[-2] != H:
v = resample_patch_embed(
v,
(H, W),
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
num_prefix_tokens = 1
v = resample_abs_pos_embed(
v,
new_size=model.patch_embed.grid_size,
num_prefix_tokens=num_prefix_tokens,
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
elif k.endswith('relative_position_bias_table'):
m = model.get_submodule(k[:-29])
if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]:
v = resize_rel_pos_bias_table(
v,
new_window_size=m.window_size,
new_bias_shape=m.relative_position_bias_table.shape,
)
out_dict[k] = v
return out_dict
def _create_beit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 3)
model = build_model_with_cfg(
Beit, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
return model
@register_model
def beit_base_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1)
model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_base_patch16_384(pretrained=False, **kwargs) -> Beit:
model_args = dict(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1)
model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_large_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_large_patch16_384(pretrained=False, **kwargs) -> Beit:
model_args = dict(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_large_patch16_512(pretrained=False, **kwargs) -> Beit:
model_args = dict(
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beitv2_base_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beitv2_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beitv2_large_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beitv2_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
|
pytorch-image-models/timm/models/beit.py/0
|
{
"file_path": "pytorch-image-models/timm/models/beit.py",
"repo_id": "pytorch-image-models",
"token_count": 14385
}
| 237
|
""" EfficientFormer
@article{li2022efficientformer,
title={EfficientFormer: Vision Transformers at MobileNet Speed},
author={Li, Yanyu and Yuan, Geng and Wen, Yang and Hu, Eric and Evangelidis, Georgios and Tulyakov,
Sergey and Wang, Yanzhi and Ren, Jian},
journal={arXiv preprint arXiv:2206.01191},
year={2022}
}
Based on Apache 2.0 licensed code at https://github.com/snap-research/EfficientFormer, Copyright (c) 2022 Snap Inc.
Modifications and timm support by / Copyright 2022, Ross Wightman
"""
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, to_2tuple, Mlp, ndgrid
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['EfficientFormer'] # model_registry will add each entrypoint fn to this
EfficientFormer_width = {
'l1': (48, 96, 224, 448),
'l3': (64, 128, 320, 512),
'l7': (96, 192, 384, 768),
}
EfficientFormer_depth = {
'l1': (3, 2, 6, 4),
'l3': (4, 4, 12, 6),
'l7': (6, 6, 18, 8),
}
class Attention(torch.nn.Module):
attention_bias_cache: Dict[str, torch.Tensor]
def __init__(
self,
dim=384,
key_dim=32,
num_heads=8,
attn_ratio=4,
resolution=7
):
super().__init__()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.key_attn_dim = key_dim * num_heads
self.val_dim = int(attn_ratio * key_dim)
self.val_attn_dim = self.val_dim * num_heads
self.attn_ratio = attn_ratio
self.qkv = nn.Linear(dim, self.key_attn_dim * 2 + self.val_attn_dim)
self.proj = nn.Linear(self.val_attn_dim, dim)
resolution = to_2tuple(resolution)
pos = torch.stack(ndgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1)
rel_pos = (pos[..., :, None] - pos[..., None, :]).abs()
rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1]
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1]))
self.register_buffer('attention_bias_idxs', rel_pos)
self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat)
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x): # x (B,N,C)
B, N, C = x.shape
qkv = self.qkv(x)
qkv = qkv.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
q, k, v = qkv.split([self.key_dim, self.key_dim, self.val_dim], dim=3)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn + self.get_attention_biases(x.device)
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim)
x = self.proj(x)
return x
class Stem4(nn.Sequential):
def __init__(self, in_chs, out_chs, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
super().__init__()
self.stride = 4
self.add_module('conv1', nn.Conv2d(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1))
self.add_module('norm1', norm_layer(out_chs // 2))
self.add_module('act1', act_layer())
self.add_module('conv2', nn.Conv2d(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1))
self.add_module('norm2', norm_layer(out_chs))
self.add_module('act2', act_layer())
class Downsample(nn.Module):
"""
Downsampling via strided conv w/ norm
Input: tensor in shape [B, C, H, W]
Output: tensor in shape [B, C, H/stride, W/stride]
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=2, padding=None, norm_layer=nn.BatchNorm2d):
super().__init__()
if padding is None:
padding = kernel_size // 2
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm = norm_layer(out_chs)
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
class Flat(nn.Module):
def __init__(self, ):
super().__init__()
def forward(self, x):
x = x.flatten(2).transpose(1, 2)
return x
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class ConvMlpWithNorm(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
drop=0.
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.norm1 = norm_layer(hidden_features) if norm_layer is not None else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.norm2 = norm_layer(out_features) if norm_layer is not None else nn.Identity()
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.norm1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.norm2(x)
x = self.drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class MetaBlock1d(nn.Module):
def __init__(
self,
dim,
mlp_ratio=4.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
proj_drop=0.,
drop_path=0.,
layer_scale_init_value=1e-5
):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Attention(dim)
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.ls1 = LayerScale(dim, layer_scale_init_value)
self.ls2 = LayerScale(dim, layer_scale_init_value)
def forward(self, x):
x = x + self.drop_path(self.ls1(self.token_mixer(self.norm1(x))))
x = x + self.drop_path(self.ls2(self.mlp(self.norm2(x))))
return x
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class MetaBlock2d(nn.Module):
def __init__(
self,
dim,
pool_size=3,
mlp_ratio=4.,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
proj_drop=0.,
drop_path=0.,
layer_scale_init_value=1e-5
):
super().__init__()
self.token_mixer = Pooling(pool_size=pool_size)
self.ls1 = LayerScale2d(dim, layer_scale_init_value)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = ConvMlpWithNorm(
dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
norm_layer=norm_layer,
drop=proj_drop,
)
self.ls2 = LayerScale2d(dim, layer_scale_init_value)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.ls1(self.token_mixer(x)))
x = x + self.drop_path2(self.ls2(self.mlp(x)))
return x
class EfficientFormerStage(nn.Module):
def __init__(
self,
dim,
dim_out,
depth,
downsample=True,
num_vit=1,
pool_size=3,
mlp_ratio=4.,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
norm_layer_cl=nn.LayerNorm,
proj_drop=.0,
drop_path=0.,
layer_scale_init_value=1e-5,
):
super().__init__()
self.grad_checkpointing = False
if downsample:
self.downsample = Downsample(in_chs=dim, out_chs=dim_out, norm_layer=norm_layer)
dim = dim_out
else:
assert dim == dim_out
self.downsample = nn.Identity()
blocks = []
if num_vit and num_vit >= depth:
blocks.append(Flat())
for block_idx in range(depth):
remain_idx = depth - block_idx - 1
if num_vit and num_vit > remain_idx:
blocks.append(
MetaBlock1d(
dim,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer_cl,
proj_drop=proj_drop,
drop_path=drop_path[block_idx],
layer_scale_init_value=layer_scale_init_value,
))
else:
blocks.append(
MetaBlock2d(
dim,
pool_size=pool_size,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop=proj_drop,
drop_path=drop_path[block_idx],
layer_scale_init_value=layer_scale_init_value,
))
if num_vit and num_vit == remain_idx:
blocks.append(Flat())
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class EfficientFormer(nn.Module):
def __init__(
self,
depths,
embed_dims=None,
in_chans=3,
num_classes=1000,
global_pool='avg',
downsamples=None,
num_vit=0,
mlp_ratios=4,
pool_size=3,
layer_scale_init_value=1e-5,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
norm_layer_cl=nn.LayerNorm,
drop_rate=0.,
proj_drop_rate=0.,
drop_path_rate=0.,
**kwargs
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.stem = Stem4(in_chans, embed_dims[0], norm_layer=norm_layer)
prev_dim = embed_dims[0]
# stochastic depth decay rule
self.num_stages = len(depths)
last_stage = self.num_stages - 1
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
downsamples = downsamples or (False,) + (True,) * (self.num_stages - 1)
stages = []
self.feature_info = []
for i in range(self.num_stages):
stage = EfficientFormerStage(
prev_dim,
embed_dims[i],
depths[i],
downsample=downsamples[i],
num_vit=num_vit if i == last_stage else 0,
pool_size=pool_size,
mlp_ratio=mlp_ratios,
act_layer=act_layer,
norm_layer_cl=norm_layer_cl,
norm_layer=norm_layer,
proj_drop=proj_drop_rate,
drop_path=dpr[i],
layer_scale_init_value=layer_scale_init_value,
)
prev_dim = embed_dims[i]
stages.append(stage)
self.feature_info += [dict(num_chs=embed_dims[i], reduction=2**(1+i), module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
# Classifier head
self.num_features = self.head_hidden_size = embed_dims[-1]
self.norm = norm_layer_cl(self.num_features)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# assuming model is always distilled (valid for current checkpoints, will split def if that changes)
self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
self.distilled_training = False # must set this True to train w/ distillation token
self.apply(self._init_weights)
# init for classification
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {k for k, _ in self.named_parameters() if 'attention_biases' in k}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head, self.head_dist
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages), indices)
# forward pass
x = self.stem(x)
B, C, H, W = x.shape
last_idx = self.num_stages - 1
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index + 1]
feat_idx = 0
for feat_idx, stage in enumerate(stages):
x = stage(x)
if feat_idx < last_idx:
B, C, H, W = x.shape
if feat_idx in take_indices:
if feat_idx == last_idx:
x_inter = self.norm(x) if norm else x
intermediates.append(x_inter.reshape(B, H // 2, W // 2, -1).permute(0, 3, 1, 2))
else:
intermediates.append(x)
if intermediates_only:
return intermediates
if feat_idx == last_idx:
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages), indices)
self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean(dim=1)
x = self.head_drop(x)
if pre_logits:
return x
x, x_dist = self.head(x), self.head_dist(x)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train/finetune, inference average the classifier predictions
return (x + x_dist) / 2
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'stem.0.weight' in state_dict:
return state_dict # non-original checkpoint, no remapping needed
out_dict = {}
import re
stage_idx = 0
for k, v in state_dict.items():
if k.startswith('patch_embed'):
k = k.replace('patch_embed.0', 'stem.conv1')
k = k.replace('patch_embed.1', 'stem.norm1')
k = k.replace('patch_embed.3', 'stem.conv2')
k = k.replace('patch_embed.4', 'stem.norm2')
if re.match(r'network\.(\d+)\.proj\.weight', k):
stage_idx += 1
k = re.sub(r'network.(\d+).(\d+)', f'stages.{stage_idx}.blocks.\\2', k)
k = re.sub(r'network.(\d+).proj', f'stages.{stage_idx}.downsample.conv', k)
k = re.sub(r'network.(\d+).norm', f'stages.{stage_idx}.downsample.norm', k)
k = re.sub(r'layer_scale_([0-9])', r'ls\1.gamma', k)
k = k.replace('dist_head', 'head_dist')
out_dict[k] = v
return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True,
'crop_pct': .95, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1', 'classifier': ('head', 'head_dist'),
**kwargs
}
default_cfgs = generate_default_cfgs({
'efficientformer_l1.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientformer_l3.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientformer_l7.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
})
def _create_efficientformer(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 4)
model = build_model_with_cfg(
EfficientFormer, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
return model
@register_model
def efficientformer_l1(pretrained=False, **kwargs) -> EfficientFormer:
model_args = dict(
depths=EfficientFormer_depth['l1'],
embed_dims=EfficientFormer_width['l1'],
num_vit=1,
)
return _create_efficientformer('efficientformer_l1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientformer_l3(pretrained=False, **kwargs) -> EfficientFormer:
model_args = dict(
depths=EfficientFormer_depth['l3'],
embed_dims=EfficientFormer_width['l3'],
num_vit=4,
)
return _create_efficientformer('efficientformer_l3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientformer_l7(pretrained=False, **kwargs) -> EfficientFormer:
model_args = dict(
depths=EfficientFormer_depth['l7'],
embed_dims=EfficientFormer_width['l7'],
num_vit=8,
)
return _create_efficientformer('efficientformer_l7', pretrained=pretrained, **dict(model_args, **kwargs))
|
pytorch-image-models/timm/models/efficientformer.py/0
|
{
"file_path": "pytorch-image-models/timm/models/efficientformer.py",
"repo_id": "pytorch-image-models",
"token_count": 10905
}
| 238
|
""" An PyTorch implementation of Hiera
Adapted for timm from originals at https://github.com/facebookresearch/hiera
"""
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
#
# Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles
#
# Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan,
# Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed,
# Judy Hoffman, Jitendra Malik, Yanghao Li, Christoph Feichtenhofer.
#
# Paper: https://arxiv.org/abs/2306.00989/
#
# References:
# slowfast: https://github.com/facebookresearch/SlowFast
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# --------------------------------------------------------
import math
from functools import partial
from typing import Callable, Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, Mlp, LayerScale, ClNormMlpClassifierHead, use_fused_attn, \
_assert, get_norm_layer, to_2tuple, init_weight_vit, init_weight_jax
from ._registry import generate_default_cfgs, register_model
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_function
from ._manipulate import named_apply
__all__ = ['Hiera']
def conv_nd(n: int) -> Type[nn.Module]:
"""
Returns a conv with nd (e.g., Conv2d for n=2). Work up to n=3.
If you wanted a 4d Hiera, you could probably just implement this for n=4. (no promises)
"""
return [nn.Identity, nn.Conv1d, nn.Conv2d, nn.Conv3d][n]
@register_notrace_function
def get_resized_mask(target_size: List[int], mask: torch.Tensor) -> torch.Tensor:
# target_size: [(T), (H), W]
# (spatial) mask: [B, C, (t), (h), w]
if mask is None:
return mask
_assert(len(mask.shape[2:]) == len(target_size), "mask spatial shape and target_size must match.")
if mask.shape[2:] != target_size:
return F.interpolate(mask.float(), size=target_size)
return mask
def undo_windowing(
x: torch.Tensor,
shape: List[int],
mu_shape: List[int],
) -> torch.Tensor:
"""
Restore spatial organization by undoing windowed organization of mask units.
Args:
x: organized by mask units windows, e.g. in 2d [B, #MUy*#MUx, MUy, MUx, C]
shape: current spatial shape, if it were not organized into mask unit
windows, e.g. in 2d [B, #MUy*MUy, #MUx*MUx, C].
mu_shape: current mask unit shape, e.g. in 2d [MUy, MUx]
Returns:
x: e.g. in 2d, [B, #MUy*MUy, #MUx*MUx, C]
"""
D = len(shape)
B, C = x.shape[0], x.shape[-1]
# [B, #MUy*#MUx, MUy, MUx, C] -> [B, #MUy, #MUx, MUy, MUx, C]
num_MUs = [s // mu for s, mu in zip(shape, mu_shape)]
x = x.view(B, *num_MUs, *mu_shape, C)
# [B, #MUy, #MUx, MUy, MUx, C] -> [B, #MUy*MUy, #MUx*MUx, C]
permute = (
[0]
+ sum([list(p) for p in zip(range(1, 1 + D), range(1 + D, 1 + 2 * D))], [])
+ [len(x.shape) - 1]
)
x = x.permute(permute).reshape(B, *shape, C)
return x
class Unroll(nn.Module):
"""
Reorders the tokens such that patches are contiguous in memory.
E.g., given [B, (H, W), C] and stride of (Sy, Sx), this will re-order the tokens as
[B, (Sy, Sx, H // Sy, W // Sx), C]
This allows operations like Max2d to be computed as x.view(B, Sx*Sy, -1, C).max(dim=1).
Not only is this faster, but it also makes it easy to support inputs of arbitrary
dimensions in addition to patch-wise sparsity.
Performing this operation multiple times in sequence puts entire windows as contiguous
in memory. For instance, if you applied the stride (2, 2) 3 times, entire windows of
size 8x8 would be contiguous in memory, allowing operations like mask unit attention
computed easily and efficiently, while also allowing max to be applied sequentially.
Note: This means that intermediate values of the model are not in HxW order, so they
need to be re-rolled if you want to use the intermediate values as a HxW feature map.
The last block of the network is fine though, since by then the strides are all consumed.
"""
def __init__(
self,
input_size: Tuple[int, ...],
patch_stride: Tuple[int, ...],
unroll_schedule: List[Tuple[int, ...]],
):
super().__init__()
self.size = [i // s for i, s in zip(input_size, patch_stride)]
self.schedule = unroll_schedule
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Input: Flattened patch embeddings [B, N, C]
Output: Patch embeddings [B, N, C] permuted such that [B, 4, N//4, C].max(1) etc. performs MaxPoolNd
"""
B, _, C = x.shape
cur_size = self.size
x = x.view(*([B] + cur_size + [C]))
for strides in self.schedule:
# Move patches with the given strides to the batch dimension
# Create a view of the tensor with the patch stride as separate dims
# For example in 2d: [B, H // Sy, Sy, W // Sx, Sx, C]
cur_size = [i // s for i, s in zip(cur_size, strides)]
new_shape = [B] + sum([[i, s] for i, s in zip(cur_size, strides)], []) + [C]
x = x.view(new_shape)
# Move the patch stride into the batch dimension
# For example in 2d: [B, Sy, Sx, H // Sy, W // Sx, C]
L = len(new_shape)
permute = [0] + list(range(2, L - 1, 2)) + list(range(1, L - 1, 2)) + [L - 1]
x = x.permute(permute)
# Now finally flatten the relevant dims into the batch dimension
x = x.flatten(0, len(strides))
B *= math.prod(strides)
x = x.reshape(-1, math.prod(self.size), C)
return x
class Reroll(nn.Module):
"""
Undos the "unroll" operation so that you can use intermediate features.
"""
def __init__(
self,
input_size: Tuple[int, ...],
patch_stride: Tuple[int, ...],
unroll_schedule: List[Tuple[int, ...]],
stage_ends: List[int],
q_pool: int,
):
super().__init__()
self.size = [i // s for i, s in zip(input_size, patch_stride)]
# The first stage has to reverse everything
# The next stage has to reverse all but the first unroll, etc.
self.schedule = {}
size = self.size
for i in range(stage_ends[-1] + 1):
self.schedule[i] = unroll_schedule, size
# schedule unchanged if no pooling at a stage end
if i in stage_ends[:q_pool]:
if len(unroll_schedule) > 0:
size = [n // s for n, s in zip(size, unroll_schedule[0])]
unroll_schedule = unroll_schedule[1:]
def forward(
self,
x: torch.Tensor,
block_idx: int,
mask: torch.Tensor = None
) -> torch.Tensor:
"""
Roll the given tensor back up to spatial order assuming it's from the given block.
If no mask is provided:
- Returns [B, H, W, C] for 2d, [B, T, H, W, C] for 3d, etc.
If a mask is provided:
- Returns [B, #MUs, MUy, MUx, C] for 2d, etc.
"""
schedule, size = self.schedule[block_idx]
B, N, C = x.shape
D = len(size)
cur_mu_shape = [1] * D
for strides in schedule:
# Extract the current patch from N
x = x.view(B, *strides, N // math.prod(strides), *cur_mu_shape, C)
# Move that patch into the current MU
# Example in 2d: [B, Sy, Sx, N//(Sy*Sx), MUy, MUx, C] -> [B, N//(Sy*Sx), Sy, MUy, Sx, MUx, C]
L = len(x.shape)
permute = (
[0, 1 + D]
+ sum([list(p) for p in zip(range(1, 1 + D), range(1 + D + 1, L - 1))], [])
+ [L - 1]
)
x = x.permute(permute)
# Reshape to [B, N//(Sy*Sx), *MU, C]
for i in range(D):
cur_mu_shape[i] *= strides[i]
x = x.reshape(B, -1, *cur_mu_shape, C)
N = x.shape[1]
# Current shape (e.g., 2d: [B, #MUy*#MUx, MUy, MUx, C])
x = x.view(B, N, *cur_mu_shape, C)
# If masked, return [B, #MUs, MUy, MUx, C]
if mask is not None:
return x
# If not masked, we can return [B, H, W, C]
x = undo_windowing(x, size, cur_mu_shape)
return x
class MaskUnitAttention(nn.Module):
"""
Computes either Mask Unit or Global Attention. Also is able to perform q pooling.
Note: this assumes the tokens have already been flattened and unrolled into mask units.
See `Unroll` for more details.
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
dim_out: int,
heads: int,
q_stride: int = 1,
window_size: int = 0,
use_mask_unit_attn: bool = False,
):
"""
Args:
- dim, dim_out: The input and output feature dimensions.
- heads: The number of attention heads.
- q_stride: If greater than 1, pool q with this stride. The stride should be flattened (e.g., 2x2 = 4).
- window_size: The current (flattened) size of a mask unit *after* pooling (if any).
- use_mask_unit_attn: Use Mask Unit or Global Attention.
"""
super().__init__()
self.dim = dim
self.dim_out = dim_out
self.heads = heads
self.q_stride = q_stride
self.head_dim = dim_out // heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, 3 * dim_out)
self.proj = nn.Linear(dim_out, dim_out)
self.window_size = window_size
self.use_mask_unit_attn = use_mask_unit_attn
def forward(self, x: torch.Tensor) -> torch.Tensor:
""" Input should be of shape [batch, tokens, channels]. """
B, N, _ = x.shape
num_windows = (N // (self.q_stride * self.window_size)) if self.use_mask_unit_attn else 1
qkv = self.qkv(x).reshape(B, -1, num_windows, 3, self.heads, self.head_dim).permute(3, 0, 4, 2, 1, 5)
q, k, v = qkv.unbind(0)
if self.q_stride > 1:
# Refer to Unroll to see how this performs a maxpool-Nd
q = q.view(B, self.heads, num_windows, self.q_stride, -1, self.head_dim).amax(dim=3)
if self.fused_attn:
# Note: the original paper did *not* use SDPA, it's a free boost!
x = F.scaled_dot_product_attention(q, k, v)
else:
attn = (q * self.scale) @ k.transpose(-1, -2)
attn = attn.softmax(dim=-1)
x = attn @ v
x = x.transpose(1, 3).reshape(B, -1, self.dim_out)
x = self.proj(x)
return x
class HieraBlock(nn.Module):
def __init__(
self,
dim: int,
dim_out: int,
heads: int,
mlp_ratio: float = 4.0,
drop_path: float = 0.0,
init_values: Optional[float] = None,
norm_layer: nn.Module = nn.LayerNorm,
act_layer: nn.Module = nn.GELU,
q_stride: int = 1,
window_size: int = 0,
use_expand_proj: bool = True,
use_mask_unit_attn: bool = False,
):
super().__init__()
self.dim = dim
self.dim_out = dim_out
self.norm1 = norm_layer(dim)
if dim != dim_out:
self.do_expand = True
if use_expand_proj:
self.proj = nn.Linear(dim, dim_out)
else:
assert dim_out == dim * 2
self.proj = None
else:
self.do_expand = False
self.proj = None
self.attn = MaskUnitAttention(
dim,
dim_out,
heads,
q_stride,
window_size,
use_mask_unit_attn
)
self.ls1 = LayerScale(dim_out, init_values=init_values) if init_values is not None else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0 else nn.Identity()
self.norm2 = norm_layer(dim_out)
self.mlp = Mlp(dim_out, int(dim_out * mlp_ratio), act_layer=act_layer)
self.ls2 = LayerScale(dim_out, init_values=init_values) if init_values is not None else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0 else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Attention + Q Pooling
x_norm = self.norm1(x)
if self.do_expand:
if self.proj is not None:
x = self.proj(x_norm)
x = x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).amax(dim=1) # max-pool
else:
x = torch.cat([
x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).amax(dim=1), # max-pool
x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).mean(dim=1), # avg-pool
],
dim=-1,
)
x = x + self.drop_path1(self.ls1(self.attn(x_norm)))
# MLP
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class PatchEmbed(nn.Module):
"""Patch embed that supports any number of spatial dimensions (1d, 2d, 3d)."""
def __init__(
self,
dim_in: int,
dim_out: int,
kernel: Tuple[int, ...],
stride: Tuple[int, ...],
padding: Tuple[int, ...],
reshape: bool = True,
):
super().__init__()
# Support any number of spatial dimensions
self.spatial_dims = len(kernel)
self.reshape = reshape
self.proj = conv_nd(self.spatial_dims)(
dim_in,
dim_out,
kernel_size=kernel,
stride=stride,
padding=padding,
)
def forward(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if mask is not None:
mask = get_resized_mask(target_size=x.shape[2:], mask=mask)
x = self.proj(x * mask.to(torch.bool))
else:
x = self.proj(x)
if self.reshape:
x = x.reshape(x.shape[0], x.shape[1], -1).transpose(2, 1)
return x
class Hiera(nn.Module):
def __init__(
self,
img_size: Tuple[int, ...] = (224, 224),
in_chans: int = 3,
embed_dim: int = 96, # initial embed dim
num_heads: int = 1, # initial number of heads
num_classes: int = 1000,
global_pool: str = 'avg',
stages: Tuple[int, ...] = (2, 3, 16, 3),
q_pool: int = 3, # number of q_pool stages
q_stride: Tuple[int, ...] = (2, 2),
mask_unit_size: Tuple[int, ...] = (8, 8), # must divide q_stride ** (#stages-1)
# mask_unit_attn: which stages use mask unit attention?
mask_unit_attn: Tuple[bool, ...] = (True, True, False, False),
use_expand_proj: bool = True,
dim_mul: float = 2.0,
head_mul: float = 2.0,
patch_kernel: Tuple[int, ...] = (7, 7),
patch_stride: Tuple[int, ...] = (4, 4),
patch_padding: Tuple[int, ...] = (3, 3),
mlp_ratio: float = 4.0,
drop_path_rate: float = 0.0,
init_values: Optional[float] = None,
fix_init: bool = True,
weight_init: str = '',
norm_layer: Union[str, nn.Module] = "LayerNorm",
drop_rate: float = 0.0,
patch_drop_rate: float = 0.0,
head_init_scale: float = 0.001,
sep_pos_embed: bool = False,
abs_win_pos_embed: bool = False,
global_pos_size: Tuple[int, int] = (14, 14),
):
super().__init__()
self.num_classes = num_classes
self.grad_checkpointing = False
norm_layer = get_norm_layer(norm_layer)
if isinstance(img_size, int):
img_size = to_2tuple(img_size)
self.patch_stride = patch_stride
self.tokens_spatial_shape = [i // s for i, s in zip(img_size, patch_stride)]
num_tokens = math.prod(self.tokens_spatial_shape)
flat_mu_size = math.prod(mask_unit_size)
flat_q_stride = math.prod(q_stride)
assert q_pool < len(stages)
self.q_pool, self.q_stride = q_pool, q_stride
self.mu_size, self.mask_unit_size = flat_mu_size, mask_unit_size
self.mask_spatial_shape = [i // s for i, s in zip(self.tokens_spatial_shape, self.mask_unit_size)]
self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
self.patch_drop_rate = patch_drop_rate
self.patch_embed = PatchEmbed(
in_chans,
embed_dim,
patch_kernel,
patch_stride,
patch_padding,
)
self.pos_embed: Optional[nn.Parameter] = None
self.pos_embed_win: Optional[nn.Parameter] = None
self.pos_embed_spatial: Optional[nn.Parameter] = None
self.pos_embed_temporal: Optional[nn.Parameter] = None
if sep_pos_embed:
self.pos_embed_spatial = nn.Parameter(
torch.zeros(1, self.tokens_spatial_shape[1] * self.tokens_spatial_shape[2], embed_dim)
)
self.pos_embed_temporal = nn.Parameter(
torch.zeros(1, self.tokens_spatial_shape[0], embed_dim)
)
else:
if abs_win_pos_embed:
# absolute win, params NCHW to make tile & interpolate more natural before add & reshape
self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *global_pos_size))
self.pos_embed_win = nn.Parameter(torch.zeros(1, embed_dim, *mask_unit_size))
else:
self.pos_embed = nn.Parameter(torch.zeros(1, num_tokens, embed_dim))
# Setup roll and reroll modules
self.unroll = Unroll(
img_size,
patch_stride,
[q_stride] * len(self.stage_ends[:-1])
)
self.reroll = Reroll(
img_size,
patch_stride,
[q_stride] * len(self.stage_ends[:-1]),
self.stage_ends,
q_pool,
)
# q_pool locations
q_pool_blocks = [x + 1 for x in self.stage_ends[:q_pool]]
# Transformer blocks
cur_stage = 0
depth = sum(stages)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList()
self.feature_info = []
for i in range(depth):
dim_out = embed_dim
# Mask unit or global attention.
# Lag by 1 block, so that global attention,
# applied post pooling on lower resolution
use_mask_unit_attn = mask_unit_attn[cur_stage]
if i - 1 in self.stage_ends:
dim_out = int(embed_dim * dim_mul)
num_heads = int(num_heads * head_mul)
cur_stage += 1
if i in q_pool_blocks:
flat_mu_size //= flat_q_stride
block = HieraBlock(
dim=embed_dim,
dim_out=dim_out,
heads=num_heads,
mlp_ratio=mlp_ratio,
drop_path=dpr[i],
init_values=init_values,
norm_layer=norm_layer,
q_stride=(flat_q_stride if i in q_pool_blocks else 1),
window_size=flat_mu_size,
use_expand_proj=use_expand_proj,
use_mask_unit_attn=use_mask_unit_attn,
)
embed_dim = dim_out
if i in self.stage_ends:
self.feature_info += [
dict(num_chs=dim_out, reduction=2**(cur_stage+2), module=f'blocks.{self.stage_ends[cur_stage]}')]
self.blocks.append(block)
self.num_features = self.head_hidden_size = embed_dim
self.head = ClNormMlpClassifierHead(
embed_dim,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
norm_layer=norm_layer,
input_fmt='NLC',
)
# Initialize everything
if sep_pos_embed:
nn.init.trunc_normal_(self.pos_embed_spatial, std=0.02)
nn.init.trunc_normal_(self.pos_embed_temporal, std=0.02)
else:
if self.pos_embed is not None:
nn.init.trunc_normal_(self.pos_embed, std=0.02)
if self.pos_embed_win is not None:
nn.init.trunc_normal_(self.pos_embed_win, std=0.02)
if weight_init != 'skip':
init_fn = init_weight_jax if weight_init == 'jax' else init_weight_vit
init_fn = partial(init_fn, classifier_name='head.fc')
named_apply(init_fn, self)
if fix_init:
self.fix_init_weight()
if isinstance(self.head.fc, nn.Linear):
self.head.fc.weight.data.mul_(head_init_scale)
self.head.fc.bias.data.mul_(head_init_scale)
def fix_init_weight(self):
def rescale(param, _layer_id):
param.div_(math.sqrt(2.0 * _layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
@torch.jit.ignore
def no_weight_decay(self):
if self.pos_embed is not None:
return ["pos_embed"]
elif self.pos_embed_abs is not None:
return ['pos_embed_abs', 'pos_embed_win']
else:
return ["pos_embed_spatial", "pos_embed_temporal"]
@torch.jit.ignore
def group_matcher(self, coarse: bool = False) -> Dict:
return dict(
stem=r'^pos_embed|pos_embed_spatial|pos_embed_temporal|pos_embed_abs|pos_embed_win|patch_embed',
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True) -> None:
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, reset_other: bool = False):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool, reset_other=reset_other)
def get_random_mask(self, x: torch.Tensor, mask_ratio: float) -> torch.Tensor:
"""
Generates a random mask, mask_ratio fraction are dropped.
1 is *keep*, 0 is *remove*. Useful for MAE, FLIP, etc.
"""
B = x.shape[0]
# Tokens selected for masking at mask unit level
num_windows = math.prod(self.mask_spatial_shape) # num_mask_units
len_keep = int(num_windows * (1 - mask_ratio))
noise = torch.rand(B, num_windows, device=x.device)
# Sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=1)
# Generate the binary mask: 1 is *keep*, 0 is *remove*
# Note this is opposite to original MAE
mask = torch.zeros([B, num_windows], device=x.device)
mask[:, :len_keep] = 1
# Unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
return mask.bool()
def _pos_embed(self, x) -> torch.Tensor:
if self.pos_embed_win is not None:
# absolute win position embedding, from
# Window Attention is Bugged: How not to Interpolate Position Embeddings (https://arxiv.org/abs/2311.05613)
pos_embed_win = self.pos_embed_win.tile(self.mask_spatial_shape)
pos_embed = F.interpolate(
self.pos_embed,
size=pos_embed_win.shape[-2:],
mode='bicubic',
antialias=True,
)
pos_embed = pos_embed + pos_embed_win
pos_embed = pos_embed.flatten(2).transpose(1, 2)
elif self.pos_embed is not None:
pos_embed = self.pos_embed
else:
pos_embed = (
self.pos_embed_spatial.repeat(1, self.tokens_spatial_shape[0], 1)
+
torch.repeat_interleave(
self.pos_embed_temporal,
self.tokens_spatial_shape[1] * self.tokens_spatial_shape[2],
dim=1,
)
)
x = x + pos_embed
return x
def forward_intermediates(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = True,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
coarse: bool = True,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert not norm, 'normalization of features not supported'
assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW, NHWC.'
if coarse:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
take_indices = [self.stage_ends[i] for i in take_indices]
max_index = self.stage_ends[max_index]
else:
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
if mask is not None:
patch_mask = mask.view(x.shape[0], 1, *self.mask_spatial_shape) # B, C, *mask_spatial_shape
else:
patch_mask = None
x = self.patch_embed(x, mask=patch_mask)
x = self._pos_embed(x)
x = self.unroll(x)
# Discard masked tokens
if mask is not None:
x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view(x.shape[0], -1, x.shape[-1])
intermediates = []
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index + 1]
for i, blk in enumerate(blocks):
x = blk(x)
if i in take_indices:
x_int = self.reroll(x, i, mask=mask)
intermediates.append(x_int.permute(0, 3, 1, 2) if output_fmt == 'NCHW' else x_int)
if intermediates_only:
return intermediates
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
coarse: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
if coarse:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
max_index = self.stage_ends[max_index]
else:
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
self.blocks = self.blocks[:max_index + 1] # truncate blocks
if prune_head:
self.head.reset(0, reset_other=True)
return take_indices
def forward_features(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
return_intermediates: bool = False,
) -> torch.Tensor:
"""
mask should be a boolean tensor of shape [B, #MUt*#MUy*#MUx] where #MU are the number of mask units in that dim.
Note: 1 in mask is *keep*, 0 is *remove*; mask.sum(dim=-1) should be the same across the batch.
"""
if self.training and self.patch_drop_rate > 0:
# using mask for something like 'patch dropout' via mask-units in supervised train / fine-tune
assert mask is None
mask = self.get_random_mask(x, mask_ratio=self.patch_drop_rate)
if mask is not None:
patch_mask = mask.view(x.shape[0], 1, *self.mask_spatial_shape) # B, C, *mask_spatial_shape
else:
patch_mask = None
x = self.patch_embed(x, mask=patch_mask)
x = self._pos_embed(x)
x = self.unroll(x)
# Discard masked tokens
if mask is not None:
x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view(x.shape[0], -1, x.shape[-1])
intermediates = []
for i, blk in enumerate(self.blocks):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x)
else:
x = blk(x)
if return_intermediates and i in self.stage_ends:
intermediates.append(self.reroll(x, i, mask=mask))
# x may not always be in spatial order here.
# e.g. if q_pool = 2, mask_unit_size = (8, 8), and
# q_stride = (2, 2), not all unrolls were consumed,
# intermediates[-1] is x in spatial order
if return_intermediates:
return x, intermediates
return x
def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor:
x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
return x
def forward(
self,
x: torch.Tensor,
mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x = self.forward_features(x, mask=mask)
if mask is None:
x = self.forward_head(x)
return x
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
"hiera_tiny_224.mae_in1k_ft_in1k": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
),
"hiera_tiny_224.mae": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
num_classes=0,
),
"hiera_small_224.mae_in1k_ft_in1k": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
),
"hiera_small_224.mae": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
num_classes=0,
),
"hiera_base_224.mae_in1k_ft_in1k": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
),
"hiera_base_224.mae": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
num_classes=0,
),
"hiera_base_plus_224.mae_in1k_ft_in1k": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
),
"hiera_base_plus_224.mae": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
num_classes=0,
),
"hiera_large_224.mae_in1k_ft_in1k": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
),
"hiera_large_224.mae": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
num_classes=0,
),
"hiera_huge_224.mae_in1k_ft_in1k": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
),
"hiera_huge_224.mae": _cfg(
hf_hub_id='timm/',
license='cc-by-nc-4.0',
num_classes=0,
),
"hiera_small_abswin_256.sbb2_e200_in12k_ft_in1k": _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), crop_pct=0.95,
),
"hiera_small_abswin_256.sbb2_pd_e200_in12k_ft_in1k": _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), crop_pct=0.95,
),
"hiera_small_abswin_256.sbb2_e200_in12k": _cfg(
hf_hub_id='timm/',
num_classes=11821,
input_size=(3, 256, 256), crop_pct=0.95,
),
"hiera_small_abswin_256.sbb2_pd_e200_in12k": _cfg(
hf_hub_id='timm/',
num_classes=11821,
input_size=(3, 256, 256), crop_pct=0.95,
),
"hiera_base_abswin_256.untrained": _cfg(
# hf_hub_id='timm/',
input_size=(3, 256, 256), crop_pct=0.95,
),
})
def checkpoint_filter_fn(state_dict, model=None):
state_dict = state_dict.get('model_state', state_dict)
output = {}
for k, v in state_dict.items():
# if k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
# # To resize pos embedding when using model at different size from pretrained weights
# from timm.layers import resample_abs_pos_embed
# v = resample_abs_pos_embed(
# v,
# new_size=(64, 64),
# num_prefix_tokens=0,
# verbose=True,
# )
if 'head.projection.' in k:
k = k.replace('head.projection.', 'head.fc.')
if k.startswith('encoder_norm.'):
k = k.replace('encoder_norm.', 'head.norm.')
elif k.startswith('norm.'):
k = k.replace('norm.', 'head.norm.')
if k == 'pos_embed_abs':
k = 'pos_embed'
output[k] = v
return output
def _create_hiera(variant: str, pretrained: bool = False, **kwargs) -> Hiera:
out_indices = kwargs.pop('out_indices', 4)
return build_model_with_cfg(
Hiera,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
@register_model
def hiera_tiny_224(pretrained=False, **kwargs):
model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 7, 2))
return _create_hiera('hiera_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def hiera_small_224(pretrained=False, **kwargs):
model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 11, 2))
return _create_hiera('hiera_small_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def hiera_base_224(pretrained=False, **kwargs):
model_args = dict(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3))
return _create_hiera('hiera_base_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def hiera_base_plus_224(pretrained=False, **kwargs):
model_args = dict(embed_dim=112, num_heads=2, stages=(2, 3, 16, 3))
return _create_hiera('hiera_base_plus_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def hiera_large_224(pretrained=False, **kwargs):
model_args = dict(embed_dim=144, num_heads=2, stages=(2, 6, 36, 4))
return _create_hiera('hiera_large_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def hiera_huge_224(pretrained=False, **kwargs):
model_args = dict(embed_dim=256, num_heads=4, stages=(2, 6, 36, 4))
return _create_hiera('hiera_huge_224', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def hiera_small_abswin_256(pretrained=False, **kwargs):
model_args = dict(
embed_dim=96, num_heads=1, stages=(1, 2, 11, 2), abs_win_pos_embed=True, global_pos_size=(16, 16),
init_values=1e-5, weight_init='jax', use_expand_proj=False,
)
return _create_hiera('hiera_small_abswin_256', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def hiera_base_abswin_256(pretrained=False, **kwargs):
model_args = dict(
embed_dim=96, num_heads=1, stages=(2, 3, 16, 3), abs_win_pos_embed=True, init_values=1e-5, weight_init='jax')
return _create_hiera('hiera_base_abswin_256', pretrained=pretrained, **dict(model_args, **kwargs))
|
pytorch-image-models/timm/models/hiera.py/0
|
{
"file_path": "pytorch-image-models/timm/models/hiera.py",
"repo_id": "pytorch-image-models",
"token_count": 18107
}
| 239
|
""" NasNet-A (Large)
nasnetalarge implementation grabbed from Cadene's pretrained models
https://github.com/Cadene/pretrained-models.pytorch
"""
from functools import partial
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['NASNetALarge']
class ActConvBn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''):
super(ActConvBn, self).__init__()
self.act = nn.ReLU()
self.conv = create_conv2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1)
def forward(self, x):
x = self.act(x)
x = self.conv(x)
x = self.bn(x)
return x
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''):
super(SeparableConv2d, self).__init__()
self.depthwise_conv2d = create_conv2d(
in_channels, in_channels, kernel_size=kernel_size,
stride=stride, padding=padding, groups=in_channels)
self.pointwise_conv2d = create_conv2d(
in_channels, out_channels, kernel_size=1, padding=0)
def forward(self, x):
x = self.depthwise_conv2d(x)
x = self.pointwise_conv2d(x)
return x
class BranchSeparables(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False):
super(BranchSeparables, self).__init__()
middle_channels = out_channels if stem_cell else in_channels
self.act_1 = nn.ReLU()
self.separable_1 = SeparableConv2d(
in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type)
self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1)
self.act_2 = nn.ReLU(inplace=True)
self.separable_2 = SeparableConv2d(
middle_channels, out_channels, kernel_size, stride=1, padding=pad_type)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1)
def forward(self, x):
x = self.act_1(x)
x = self.separable_1(x)
x = self.bn_sep_1(x)
x = self.act_2(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class CellStem0(nn.Module):
def __init__(self, stem_size, num_channels=42, pad_type=''):
super(CellStem0, self).__init__()
self.num_channels = num_channels
self.stem_size = stem_size
self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1)
self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type)
self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True)
self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type)
self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True)
self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type)
self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type)
def forward(self, x):
x1 = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x1)
x_comb_iter_0_right = self.comb_iter_0_right(x)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x1)
x_comb_iter_1_right = self.comb_iter_1_right(x)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x1)
x_comb_iter_2_right = self.comb_iter_2_right(x)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x1)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class CellStem1(nn.Module):
def __init__(self, stem_size, num_channels, pad_type=''):
super(CellStem1, self).__init__()
self.num_channels = num_channels
self.stem_size = stem_size
self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1)
self.act = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False))
self.path_2 = nn.Sequential()
self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1)
self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type)
self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type)
self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type)
self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type)
self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type)
def forward(self, x_conv0, x_stem_0):
x_left = self.conv_1x1(x_stem_0)
x_relu = self.act(x_conv0)
# path 1
x_path1 = self.path_1(x_relu)
# path 2
x_path2 = self.path_2(x_relu)
# final path
x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_comb_iter_0_left = self.comb_iter_0_left(x_left)
x_comb_iter_0_right = self.comb_iter_0_right(x_right)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_right)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_left)
x_comb_iter_2_right = self.comb_iter_2_right(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_left)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class FirstCell(nn.Module):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(FirstCell, self).__init__()
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1)
self.act = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False))
self.path_2 = nn.Sequential()
self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1)
self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type)
self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type)
self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
def forward(self, x, x_prev):
x_relu = self.act(x_prev)
x_path1 = self.path_1(x_relu)
x_path2 = self.path_2(x_relu)
x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NormalCell(nn.Module):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(NormalCell, self).__init__()
self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type)
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type)
self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type)
self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type)
self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell0(nn.Module):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(ReductionCell0, self).__init__()
self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type)
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type)
self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type)
self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type)
self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell1(nn.Module):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(ReductionCell1, self).__init__()
self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type)
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type)
self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type)
self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type)
self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type)
self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type)
self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NASNetALarge(nn.Module):
"""NASNetALarge (6 @ 4032) """
def __init__(
self,
num_classes=1000,
in_chans=3,
stem_size=96,
channel_multiplier=2,
num_features=4032,
output_stride=32,
drop_rate=0.,
global_pool='avg',
pad_type='same',
):
super(NASNetALarge, self).__init__()
self.num_classes = num_classes
self.stem_size = stem_size
self.num_features = self.head_hidden_size = num_features
self.channel_multiplier = channel_multiplier
assert output_stride == 32
channels = self.num_features // 24
# 24 is default value for the architecture
self.conv0 = ConvNormAct(
in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2,
norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False)
self.cell_stem_0 = CellStem0(
self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type)
self.cell_stem_1 = CellStem1(
self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type)
self.cell_0 = FirstCell(
in_chs_left=channels, out_chs_left=channels // 2,
in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type)
self.cell_1 = NormalCell(
in_chs_left=2 * channels, out_chs_left=channels,
in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type)
self.cell_2 = NormalCell(
in_chs_left=6 * channels, out_chs_left=channels,
in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type)
self.cell_3 = NormalCell(
in_chs_left=6 * channels, out_chs_left=channels,
in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type)
self.cell_4 = NormalCell(
in_chs_left=6 * channels, out_chs_left=channels,
in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type)
self.cell_5 = NormalCell(
in_chs_left=6 * channels, out_chs_left=channels,
in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type)
self.reduction_cell_0 = ReductionCell0(
in_chs_left=6 * channels, out_chs_left=2 * channels,
in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_6 = FirstCell(
in_chs_left=6 * channels, out_chs_left=channels,
in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_7 = NormalCell(
in_chs_left=8 * channels, out_chs_left=2 * channels,
in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_8 = NormalCell(
in_chs_left=12 * channels, out_chs_left=2 * channels,
in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_9 = NormalCell(
in_chs_left=12 * channels, out_chs_left=2 * channels,
in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_10 = NormalCell(
in_chs_left=12 * channels, out_chs_left=2 * channels,
in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.cell_11 = NormalCell(
in_chs_left=12 * channels, out_chs_left=2 * channels,
in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type)
self.reduction_cell_1 = ReductionCell1(
in_chs_left=12 * channels, out_chs_left=4 * channels,
in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_12 = FirstCell(
in_chs_left=12 * channels, out_chs_left=2 * channels,
in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_13 = NormalCell(
in_chs_left=16 * channels, out_chs_left=4 * channels,
in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_14 = NormalCell(
in_chs_left=24 * channels, out_chs_left=4 * channels,
in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_15 = NormalCell(
in_chs_left=24 * channels, out_chs_left=4 * channels,
in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_16 = NormalCell(
in_chs_left=24 * channels, out_chs_left=4 * channels,
in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.cell_17 = NormalCell(
in_chs_left=24 * channels, out_chs_left=4 * channels,
in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type)
self.act = nn.ReLU(inplace=True)
self.feature_info = [
dict(num_chs=96, reduction=2, module='conv0'),
dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'),
dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'),
dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'),
dict(num_chs=4032, reduction=32, module='act'),
]
self.global_pool, self.head_drop, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^conv0|cell_stem_[01]',
blocks=[
(r'^cell_(\d+)', None),
(r'^reduction_cell_0', (6,)),
(r'^reduction_cell_1', (12,)),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.last_linear
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x_conv0 = self.conv0(x)
x_stem_0 = self.cell_stem_0(x_conv0)
x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0)
x_cell_0 = self.cell_0(x_stem_1, x_stem_0)
x_cell_1 = self.cell_1(x_cell_0, x_stem_1)
x_cell_2 = self.cell_2(x_cell_1, x_cell_0)
x_cell_3 = self.cell_3(x_cell_2, x_cell_1)
x_cell_4 = self.cell_4(x_cell_3, x_cell_2)
x_cell_5 = self.cell_5(x_cell_4, x_cell_3)
x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4)
x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4)
x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0)
x_cell_8 = self.cell_8(x_cell_7, x_cell_6)
x_cell_9 = self.cell_9(x_cell_8, x_cell_7)
x_cell_10 = self.cell_10(x_cell_9, x_cell_8)
x_cell_11 = self.cell_11(x_cell_10, x_cell_9)
x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10)
x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10)
x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1)
x_cell_14 = self.cell_14(x_cell_13, x_cell_12)
x_cell_15 = self.cell_15(x_cell_14, x_cell_13)
x_cell_16 = self.cell_16(x_cell_15, x_cell_14)
x_cell_17 = self.cell_17(x_cell_16, x_cell_15)
x = self.act(x_cell_17)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.last_linear(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_nasnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
NASNetALarge,
variant,
pretrained,
feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model
**kwargs,
)
default_cfgs = generate_default_cfgs({
'nasnetalarge.tf_in1k': {
'hf_hub_id': 'timm/',
'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nasnetalarge-dc4a7b8b.pth',
'input_size': (3, 331, 331),
'pool_size': (11, 11),
'crop_pct': 0.911,
'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5),
'std': (0.5, 0.5, 0.5),
'num_classes': 1000,
'first_conv': 'conv0.conv',
'classifier': 'last_linear',
},
})
@register_model
def nasnetalarge(pretrained=False, **kwargs) -> NASNetALarge:
"""NASNet-A large model architecture.
"""
model_kwargs = dict(pad_type='same', **kwargs)
return _create_nasnet('nasnetalarge', pretrained, **model_kwargs)
|
pytorch-image-models/timm/models/nasnet.py/0
|
{
"file_path": "pytorch-image-models/timm/models/nasnet.py",
"repo_id": "pytorch-image-models",
"token_count": 13276
}
| 240
|
""" ReXNet
A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` -
https://arxiv.org/abs/2007.00992
Adapted from original impl at https://github.com/clovaai/rexnet
Copyright (c) 2020-present NAVER Corp. MIT license
Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman
Copyright 2020 Ross Wightman
"""
from functools import partial
from math import ceil
from typing import Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead, create_act_layer, ConvNormAct, DropPath, make_divisible, SEModule
from ._builder import build_model_with_cfg
from ._efficientnet_builder import efficientnet_init_weights
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['RexNet'] # model_registry will add each entrypoint fn to this
SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d)
class LinearBottleneck(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride,
dilation=(1, 1),
exp_ratio=1.0,
se_ratio=0.,
ch_div=1,
act_layer='swish',
dw_act_layer='relu6',
drop_path=None,
):
super(LinearBottleneck, self).__init__()
self.use_shortcut = stride == 1 and dilation[0] == dilation[1] and in_chs <= out_chs
self.in_channels = in_chs
self.out_channels = out_chs
if exp_ratio != 1.:
dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div)
self.conv_exp = ConvNormAct(in_chs, dw_chs, act_layer=act_layer)
else:
dw_chs = in_chs
self.conv_exp = None
self.conv_dw = ConvNormAct(
dw_chs,
dw_chs,
kernel_size=3,
stride=stride,
dilation=dilation[0],
groups=dw_chs,
apply_act=False,
)
if se_ratio > 0:
self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div))
else:
self.se = None
self.act_dw = create_act_layer(dw_act_layer)
self.conv_pwl = ConvNormAct(dw_chs, out_chs, 1, apply_act=False)
self.drop_path = drop_path
def feat_channels(self, exp=False):
return self.conv_dw.out_channels if exp else self.out_channels
def forward(self, x):
shortcut = x
if self.conv_exp is not None:
x = self.conv_exp(x)
x = self.conv_dw(x)
if self.se is not None:
x = self.se(x)
x = self.act_dw(x)
x = self.conv_pwl(x)
if self.use_shortcut:
if self.drop_path is not None:
x = self.drop_path(x)
x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1)
return x
def _block_cfg(
width_mult=1.0,
depth_mult=1.0,
initial_chs=16,
final_chs=180,
se_ratio=0.,
ch_div=1,
):
layers = [1, 2, 2, 3, 3, 5]
strides = [1, 2, 2, 2, 1, 2]
layers = [ceil(element * depth_mult) for element in layers]
strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], [])
exp_ratios = [1] * layers[0] + [6] * sum(layers[1:])
depth = sum(layers[:]) * 3
base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs
# The following channel configuration is a simple instance to make each layer become an expand layer.
out_chs_list = []
for i in range(depth // 3):
out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div))
base_chs += final_chs / (depth // 3 * 1.0)
se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:])
return list(zip(out_chs_list, exp_ratios, strides, se_ratios))
def _build_blocks(
block_cfg,
prev_chs,
width_mult,
ch_div=1,
output_stride=32,
act_layer='swish',
dw_act_layer='relu6',
drop_path_rate=0.,
):
feat_chs = [prev_chs]
feature_info = []
curr_stride = 2
dilation = 1
features = []
num_blocks = len(block_cfg)
for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg):
next_dilation = dilation
if stride > 1:
fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}'
feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)]
if curr_stride >= output_stride:
next_dilation = dilation * stride
stride = 1
block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule
drop_path = DropPath(block_dpr) if block_dpr > 0. else None
features.append(LinearBottleneck(
in_chs=prev_chs,
out_chs=chs,
exp_ratio=exp_ratio,
stride=stride,
dilation=(dilation, next_dilation),
se_ratio=se_ratio,
ch_div=ch_div,
act_layer=act_layer,
dw_act_layer=dw_act_layer,
drop_path=drop_path,
))
curr_stride *= stride
dilation = next_dilation
prev_chs = chs
feat_chs += [features[-1].feat_channels()]
pen_chs = make_divisible(1280 * width_mult, divisor=ch_div)
feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')]
features.append(ConvNormAct(prev_chs, pen_chs, act_layer=act_layer))
return features, feature_info
class RexNet(nn.Module):
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
output_stride=32,
initial_chs=16,
final_chs=180,
width_mult=1.0,
depth_mult=1.0,
se_ratio=1/12.,
ch_div=1,
act_layer='swish',
dw_act_layer='relu6',
drop_rate=0.2,
drop_path_rate=0.,
):
super(RexNet, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
assert output_stride in (32, 16, 8)
stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32
stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div)
self.stem = ConvNormAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer)
block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div)
features, self.feature_info = _build_blocks(
block_cfg,
stem_chs,
width_mult,
ch_div,
output_stride,
act_layer,
dw_act_layer,
drop_path_rate,
)
self.num_features = self.head_hidden_size = features[-1].out_channels
self.features = nn.Sequential(*features)
self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate)
efficientnet_init_weights(self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^features\.(\d+)',
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.features, x, flatten=True)
else:
x = self.features(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_rexnet(variant, pretrained, **kwargs):
feature_cfg = dict(flatten_sequential=True)
return build_model_with_cfg(
RexNet,
variant,
pretrained,
feature_cfg=feature_cfg,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
'rexnet_100.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_130.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_150.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_200.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_300.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnetr_100.untrained': _cfg(),
'rexnetr_130.untrained': _cfg(),
'rexnetr_150.untrained': _cfg(),
'rexnetr_200.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
'rexnetr_300.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
'rexnetr_200.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
'rexnetr_300.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
})
@register_model
def rexnet_100(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.0x"""
return _create_rexnet('rexnet_100', pretrained, **kwargs)
@register_model
def rexnet_130(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.3x"""
return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs)
@register_model
def rexnet_150(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.5x"""
return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs)
@register_model
def rexnet_200(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 2.0x"""
return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs)
@register_model
def rexnet_300(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 3.0x"""
return _create_rexnet('rexnet_300', pretrained, width_mult=3.0, **kwargs)
@register_model
def rexnetr_100(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.0x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs)
@register_model
def rexnetr_130(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.3x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs)
@register_model
def rexnetr_150(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.5x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs)
@register_model
def rexnetr_200(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 2.0x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs)
@register_model
def rexnetr_300(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 3.0x w/ rounded (mod 16) channels"""
return _create_rexnet('rexnetr_300', pretrained, width_mult=3.0, ch_div=16, **kwargs)
|
pytorch-image-models/timm/models/rexnet.py/0
|
{
"file_path": "pytorch-image-models/timm/models/rexnet.py",
"repo_id": "pytorch-image-models",
"token_count": 5803
}
| 241
|
""" Relative Position Vision Transformer (ViT) in PyTorch
NOTE: these models are experimental / WIP, expect changes
Hacked together by / Copyright 2022, Ross Wightman
"""
import logging
import math
from functools import partial
from typing import List, Optional, Tuple, Type, Union
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import torch
import torch.nn as nn
from torch.jit import Final
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import PatchEmbed, Mlp, DropPath, RelPosMlp, RelPosBias, use_fused_attn, LayerType
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import named_apply
from ._registry import generate_default_cfgs, register_model
from .vision_transformer import get_init_weights_vit
__all__ = ['VisionTransformerRelPos'] # model_registry will add each entrypoint fn to this
_logger = logging.getLogger(__name__)
class RelPosAttention(nn.Module):
fused_attn: Final[bool]
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_norm=False,
rel_pos_cls=None,
attn_drop=0.,
proj_drop=0.,
norm_layer=nn.LayerNorm,
):
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.rel_pos = rel_pos_cls(num_heads=num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
q = self.q_norm(q)
k = self.k_norm(k)
if self.fused_attn:
if self.rel_pos is not None:
attn_bias = self.rel_pos.get_bias()
elif shared_rel_pos is not None:
attn_bias = shared_rel_pos
else:
attn_bias = None
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if self.rel_pos is not None:
attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class RelPosBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_norm=False,
rel_pos_cls=None,
init_values=None,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = RelPosAttention(
dim,
num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
rel_pos_cls=rel_pos_cls,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ResPostRelPosBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_norm=False,
rel_pos_cls=None,
init_values=None,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.init_values = init_values
self.attn = RelPosAttention(
dim,
num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
rel_pos_cls=rel_pos_cls,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.init_weights()
def init_weights(self):
# NOTE this init overrides that base model init with specific changes for the block type
if self.init_values is not None:
nn.init.constant_(self.norm1.weight, self.init_values)
nn.init.constant_(self.norm2.weight, self.init_values)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.norm1(self.attn(x, shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.norm2(self.mlp(x)))
return x
class VisionTransformerRelPos(nn.Module):
""" Vision Transformer w/ Relative Position Bias
Differing from classic vit, this impl
* uses relative position index (swin v1 / beit) or relative log coord + mlp (swin v2) pos embed
* defaults to no class token (can be enabled)
* defaults to global avg pool for head (can be changed)
* layer-scale (residual branch gain) enabled
"""
def __init__(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 16,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: Literal['', 'avg', 'token', 'map'] = 'avg',
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
qk_norm: bool = False,
init_values: Optional[float] = 1e-6,
class_token: bool = False,
fc_norm: bool = False,
rel_pos_type: str = 'mlp',
rel_pos_dim: Optional[int] = None,
shared_rel_pos: bool = False,
drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
weight_init: Literal['skip', 'jax', 'moco', ''] = 'skip',
fix_init: bool = False,
embed_layer: Type[nn.Module] = PatchEmbed,
norm_layer: Optional[LayerType] = None,
act_layer: Optional[LayerType] = None,
block_fn: Type[nn.Module] = RelPosBlock
):
"""
Args:
img_size: input image size
patch_size: patch size
in_chans: number of input channels
num_classes: number of classes for classification head
global_pool: type of global pooling for final sequence (default: 'avg')
embed_dim: embedding dimension
depth: depth of transformer
num_heads: number of attention heads
mlp_ratio: ratio of mlp hidden dim to embedding dim
qkv_bias: enable bias for qkv if True
qk_norm: Enable normalization of query and key in attention
init_values: layer-scale init values
class_token: use class token (default: False)
fc_norm: use pre classifier norm instead of pre-pool
rel_pos_type: type of relative position
shared_rel_pos: share relative pos across all blocks
drop_rate: dropout rate
proj_drop_rate: projection dropout rate
attn_drop_rate: attention dropout rate
drop_path_rate: stochastic depth rate
weight_init: weight init scheme
fix_init: apply weight initialization fix (scaling w/ layer index)
embed_layer: patch embedding layer
norm_layer: normalization layer
act_layer: MLP activation layer
"""
super().__init__()
assert global_pool in ('', 'avg', 'token')
assert class_token or global_pool != 'token'
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models
self.num_prefix_tokens = 1 if class_token else 0
self.grad_checkpointing = False
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
feat_size = self.patch_embed.grid_size
r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size
rel_pos_args = dict(window_size=feat_size, prefix_tokens=self.num_prefix_tokens)
if rel_pos_type.startswith('mlp'):
if rel_pos_dim:
rel_pos_args['hidden_dim'] = rel_pos_dim
if 'swin' in rel_pos_type:
rel_pos_args['mode'] = 'swin'
rel_pos_cls = partial(RelPosMlp, **rel_pos_args)
else:
rel_pos_cls = partial(RelPosBias, **rel_pos_args)
self.shared_rel_pos = None
if shared_rel_pos:
self.shared_rel_pos = rel_pos_cls(num_heads=num_heads)
# NOTE shared rel pos currently mutually exclusive w/ per-block, but could support both...
rel_pos_cls = None
self.cls_token = nn.Parameter(torch.zeros(1, self.num_prefix_tokens, embed_dim)) if class_token else None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
rel_pos_cls=rel_pos_cls,
init_values=init_values,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
)
for i in range(depth)])
self.feature_info = [
dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)]
self.norm = norm_layer(embed_dim) if not fc_norm else nn.Identity()
# Classifier Head
self.fc_norm = norm_layer(embed_dim) if fc_norm else nn.Identity()
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if weight_init != 'skip':
self.init_weights(weight_init)
if fix_init:
self.fix_init_weight()
def init_weights(self, mode=''):
assert mode in ('jax', 'moco', '')
if self.cls_token is not None:
nn.init.normal_(self.cls_token, std=1e-6)
named_apply(get_init_weights_vit(mode), self)
def fix_init_weight(self):
def rescale(param, _layer_id):
param.div_(math.sqrt(2.0 * _layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
@torch.jit.ignore
def no_weight_decay(self):
return {'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'token')
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
return_prefix_tokens: bool = False,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
return_prefix_tokens: Return both prefix and spatial intermediate tokens
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.'
reshape = output_fmt == 'NCHW'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
# forward pass
B, _, height, width = x.shape
x = self.patch_embed(x)
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index + 1]
for i, blk in enumerate(blocks):
x = blk(x, shared_rel_pos=shared_rel_pos)
if i in take_indices:
# normalize intermediates with final norm layer if enabled
intermediates.append(self.norm(x) if norm else x)
# process intermediates
if self.num_prefix_tokens:
# split prefix (e.g. class, distill) and spatial feature tokens
prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates]
intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates]
if reshape:
# reshape to BCHW output format
H, W = self.patch_embed.dynamic_feat_size((height, width))
intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates]
if not torch.jit.is_scripting() and return_prefix_tokens:
# return_prefix not support in torchscript due to poor type handling
intermediates = list(zip(intermediates, prefix_tokens))
if intermediates_only:
return intermediates
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
self.blocks = self.blocks[:max_index + 1] # truncate blocks
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.fc_norm = nn.Identity()
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.patch_embed(x)
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, shared_rel_pos=shared_rel_pos)
else:
x = blk(x, shared_rel_pos=shared_rel_pos)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_vision_transformer_relpos(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 3)
model = build_model_with_cfg(
VisionTransformerRelPos, variant, pretrained,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'vit_relpos_base_patch32_plus_rpn_256.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_replos_base_patch32_plus_rpn_256-sw-dd486f51.pth',
hf_hub_id='timm/',
input_size=(3, 256, 256)),
'vit_relpos_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240)),
'vit_relpos_small_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_small_patch16_224-sw-ec2778b4.pth',
hf_hub_id='timm/'),
'vit_relpos_medium_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_224-sw-11c174af.pth',
hf_hub_id='timm/'),
'vit_relpos_base_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_224-sw-49049aed.pth',
hf_hub_id='timm/'),
'vit_srelpos_small_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_small_patch16_224-sw-6cdb8849.pth',
hf_hub_id='timm/'),
'vit_srelpos_medium_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_medium_patch16_224-sw-ad702b8c.pth',
hf_hub_id='timm/'),
'vit_relpos_medium_patch16_cls_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_cls_224-sw-cfe8e259.pth',
hf_hub_id='timm/'),
'vit_relpos_base_patch16_cls_224.untrained': _cfg(),
'vit_relpos_base_patch16_clsgap_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_gapcls_224-sw-1a341d6c.pth',
hf_hub_id='timm/'),
'vit_relpos_small_patch16_rpn_224.untrained': _cfg(),
'vit_relpos_medium_patch16_rpn_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_rpn_224-sw-5d2befd8.pth',
hf_hub_id='timm/'),
'vit_relpos_base_patch16_rpn_224.untrained': _cfg(),
})
@register_model
def vit_relpos_base_patch32_plus_rpn_256(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/32+) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch32_plus_rpn_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_plus_240(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16+) w/ relative log-coord position, no class token
"""
model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, no class token
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=True)
model = _create_vision_transformer_relpos(
'vit_relpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=True)
model = _create_vision_transformer_relpos(
'vit_relpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_srelpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=False,
rel_pos_dim=384, shared_rel_pos=True)
model = _create_vision_transformer_relpos(
'vit_srelpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_srelpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False,
rel_pos_dim=512, shared_rel_pos=True)
model = _create_vision_transformer_relpos(
'vit_srelpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_medium_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-M/16) w/ relative log-coord position, class token present
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False,
rel_pos_dim=256, class_token=True, global_pool='token')
model = _create_vision_transformer_relpos(
'vit_relpos_medium_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, class token present
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, class_token=True, global_pool='token')
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_clsgap_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, class token present
NOTE this config is a bit of a mistake, class token was enabled but global avg-pool w/ fc-norm was not disabled
Leaving here for comparisons w/ a future re-train as it performs quite well.
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True, class_token=True)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_clsgap_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_small_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_small_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_medium_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_medium_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
|
pytorch-image-models/timm/models/vision_transformer_relpos.py/0
|
{
"file_path": "pytorch-image-models/timm/models/vision_transformer_relpos.py",
"repo_id": "pytorch-image-models",
"token_count": 13354
}
| 242
|
""" PyTorch LARS / LARC Optimizer
An implementation of LARS (SGD) + LARC in PyTorch
Based on:
* PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100
* NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py
Additional cleanup and modifications to properly support PyTorch XLA.
Copyright 2021 Ross Wightman
"""
import torch
from torch.optim.optimizer import Optimizer
class Lars(Optimizer):
""" LARS for PyTorch
Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): learning rate (default: 1.0).
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001)
eps (float): eps for division denominator (default: 1e-8)
trust_clip (bool): enable LARC trust ratio clipping (default: False)
always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False)
"""
def __init__(
self,
params,
lr=1.0,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
trust_coeff=0.001,
eps=1e-8,
trust_clip=False,
always_adapt=False,
):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
trust_coeff=trust_coeff,
eps=eps,
trust_clip=trust_clip,
always_adapt=always_adapt,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
device = self.param_groups[0]['params'][0].device
one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
trust_coeff = group['trust_coeff']
eps = group['eps']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
# apply LARS LR adaptation, LARC clipping, weight decay
# ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py
if weight_decay != 0 or group['always_adapt']:
w_norm = p.norm(2.0)
g_norm = grad.norm(2.0)
trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps)
# FIXME nested where required since logical and/or not working in PT XLA
trust_ratio = torch.where(
w_norm > 0,
torch.where(g_norm > 0, trust_ratio, one_tensor),
one_tensor,
)
if group['trust_clip']:
trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor)
grad.add_(p, alpha=weight_decay)
grad.mul_(trust_ratio)
# apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(grad).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(grad, alpha=1. - dampening)
if nesterov:
grad = grad.add(buf, alpha=momentum)
else:
grad = buf
p.add_(grad, alpha=-group['lr'])
return loss
|
pytorch-image-models/timm/optim/lars.py/0
|
{
"file_path": "pytorch-image-models/timm/optim/lars.py",
"repo_id": "pytorch-image-models",
"token_count": 2571
}
| 243
|
""" Polynomial Scheduler
Polynomial LR schedule with warmup, noise.
Hacked together by / Copyright 2021 Ross Wightman
"""
import math
import logging
from typing import List
import torch
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class PolyLRScheduler(Scheduler):
""" Polynomial LR Scheduler w/ warmup, noise, and k-decay
k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
t_initial: int,
power: float = 0.5,
lr_min: float = 0.,
cycle_mul: float = 1.,
cycle_decay: float = 1.,
cycle_limit: int = 1,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
k_decay=1.0,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
t_in_epochs=t_in_epochs,
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize
)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1:
_logger.warning("Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.power = power
self.lr_min = lr_min
self.cycle_mul = cycle_mul
self.cycle_decay = cycle_decay
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.k_decay = k_decay
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t: int) -> List[float]:
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.cycle_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul))
t_i = self.cycle_mul ** i * self.t_initial
t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.cycle_decay ** i
lr_max_values = [v * gamma for v in self.base_values]
k = self.k_decay
if i < self.cycle_limit:
lrs = [
self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_cycle_length(self, cycles=0):
cycles = max(1, cycles or self.cycle_limit)
if self.cycle_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
|
pytorch-image-models/timm/scheduler/poly_lr.py/0
|
{
"file_path": "pytorch-image-models/timm/scheduler/poly_lr.py",
"repo_id": "pytorch-image-models",
"token_count": 1979
}
| 244
|
""" Misc utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import argparse
import ast
import re
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def add_bool_arg(parser, name, default=False, help=''):
dest_name = name.replace('-', '_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest_name, action='store_true', help=help)
group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help)
parser.set_defaults(**{dest_name: default})
class ParseKwargs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
kw = {}
for value in values:
key, value = value.split('=')
try:
kw[key] = ast.literal_eval(value)
except ValueError:
kw[key] = str(value) # fallback to string (avoid need to escape on command line)
setattr(namespace, self.dest, kw)
|
pytorch-image-models/timm/utils/misc.py/0
|
{
"file_path": "pytorch-image-models/timm/utils/misc.py",
"repo_id": "pytorch-image-models",
"token_count": 451
}
| 245
|
# Rust builder
FROM lukemathwalker/cargo-chef:latest-rust-1.80 AS chef
WORKDIR /usr/src
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
FROM chef AS planner
COPY Cargo.lock Cargo.lock
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
COPY benchmark benchmark
COPY router router
COPY backends backends
COPY launcher launcher
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \
unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \
unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \
rm -f $PROTOC_ZIP
COPY --from=planner /usr/src/recipe.json recipe.json
RUN cargo chef cook --profile release-opt --recipe-path recipe.json
ARG GIT_SHA
ARG DOCKER_LABEL
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
COPY benchmark benchmark
COPY router router
COPY backends backends
COPY launcher launcher
RUN cargo build --profile release-opt
# Text Generation Inference base image for RoCm
FROM rocm/dev-ubuntu-22.04:6.1.1_hip_update AS base
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
ccache \
curl \
git \
make \
libssl-dev \
g++ \
# Needed to build VLLM & flash.
rocthrust-dev \
hipsparse-dev \
hipblas-dev \
hipblaslt-dev \
rocblas-dev \
hiprand-dev \
rocrand-dev \
miopen-hip-dev \
hipfft-dev \
hipcub-dev \
hipsolver-dev \
rccl-dev \
cmake \
python3-dev && \
rm -rf /var/lib/apt/lists/*
# Keep in sync with `server/pyproject.toml
ARG MAMBA_VERSION=23.1.0-1
ARG PYTORCH_VERSION='2.3.0'
ARG ROCM_VERSION='6.0.2'
ARG PYTHON_VERSION='3.10.10'
# Automatically set by buildx
ARG TARGETPLATFORM
ENV PATH /opt/conda/bin:$PATH
# TGI seem to require libssl.so.1.1 instead of libssl.so.3 so we can't use ubuntu 22.04. Ubuntu 20.04 has python==3.8, and TGI requires python>=3.9, hence the need for miniconda.
# Install mamba
# translating Docker's TARGETPLATFORM into mamba arches
RUN case ${TARGETPLATFORM} in \
"linux/arm64") MAMBA_ARCH=aarch64 ;; \
*) MAMBA_ARCH=x86_64 ;; \
esac && \
curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh"
RUN chmod +x ~/mambaforge.sh && \
bash ~/mambaforge.sh -b -p /opt/conda && \
mamba init && \
rm ~/mambaforge.sh
# Install flash-attention, torch dependencies
RUN pip install numpy einops ninja --no-cache-dir
RUN conda install intel::mkl-static intel::mkl-include
RUN pip uninstall -y triton && \
git clone --depth 1 --single-branch https://github.com/ROCm/triton.git && \
cd triton/python && \
pip install .
RUN git clone --depth 1 --recursive --single-branch --branch 2.3-patched https://github.com/fxmarty/pytorch.git pytorch && cd pytorch && pip install -r requirements.txt --no-cache-dir
ARG _GLIBCXX_USE_CXX11_ABI="1"
ARG CMAKE_PREFIX_PATH="/opt/conda"
ARG PYTORCH_ROCM_ARCH="gfx90a;gfx942"
ARG BUILD_CAFFE2="0" \
BUILD_CAFFE2_OPS="0" \
USE_CUDA="0" \
USE_ROCM="1" \
BUILD_TEST="0" \
USE_FBGEMM="0" \
USE_NNPACK="0" \
USE_QNNPACK="0" \
USE_XNNPACK="0" \
USE_FLASH_ATTENTION="1" \
USE_MEM_EFF_ATTENTION="0"
RUN cd pytorch && python tools/amd_build/build_amd.py && python setup.py install
# Set AS recommended: https://github.com/ROCm/triton/wiki/A-script-to-set-program-execution-environment-in-ROCm
ENV HIP_FORCE_DEV_KERNARG=1
# On MI250 and MI300, performances for flash with Triton FA are slightly better than CK.
# However, Triton requires a tunning for each prompt length, which is prohibitive.
ENV ROCM_USE_FLASH_ATTN_V2_TRITON=0
FROM base AS kernel-builder
# # Build vllm kernels
FROM kernel-builder AS vllm-builder
WORKDIR /usr/src
COPY server/Makefile-vllm Makefile
# Build specific version of vllm
RUN make build-vllm-rocm
# Build Flash Attention v2 kernels
FROM kernel-builder AS flash-att-v2-builder
WORKDIR /usr/src
COPY server/Makefile-flash-att-v2 Makefile
# Build specific version of flash attention v2
RUN make build-flash-attention-v2-rocm
# Build Transformers CUDA kernels (gpt-neox and bloom)
FROM kernel-builder AS custom-kernels-builder
WORKDIR /usr/src
COPY server/custom_kernels/ .
RUN python setup.py build
# Build exllama kernels
FROM kernel-builder AS exllama-kernels-builder
WORKDIR /usr/src
COPY server/exllama_kernels/ .
RUN python setup.py build
# Build exllama v2 kernels
FROM kernel-builder AS exllamav2-kernels-builder
WORKDIR /usr/src
COPY server/exllamav2_kernels/ .
RUN python setup.py build
FROM base AS base-copy
# Text Generation Inference base env
ENV HF_HOME=/data \
HF_HUB_ENABLE_HF_TRANSFER=1 \
PORT=80
# Copy builds artifacts from vllm builder
COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from flash attention v2 builder
COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from custom kernels builder
COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from exllama kernels builder
COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from exllamav2 kernels builder
COPY --from=exllamav2-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Install server
COPY proto proto
COPY server server
COPY server/Makefile server/Makefile
RUN cd server && \
make gen-server && \
pip install -r requirements_rocm.txt && \
pip install ".[accelerate, peft, outlines]" --no-cache-dir
# Install benchmarker
COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark
# Install router
COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router
# Install launcher
COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher
# AWS Sagemaker compatible image
FROM base AS sagemaker
COPY sagemaker-entrypoint.sh entrypoint.sh
RUN chmod +x entrypoint.sh
ENTRYPOINT ["./entrypoint.sh"]
# Final image
FROM base-copy
COPY ./tgi-entrypoint.sh /tgi-entrypoint.sh
RUN chmod +x /tgi-entrypoint.sh
ENTRYPOINT ["/tgi-entrypoint.sh"]
CMD ["--json-output"]
|
text-generation-inference/Dockerfile_amd/0
|
{
"file_path": "text-generation-inference/Dockerfile_amd",
"repo_id": "text-generation-inference",
"token_count": 2713
}
| 246
|
#[allow(clippy::derive_partial_eq_without_eq)]
mod pb;
mod client;
mod sharded_client;
pub use client::Client;
pub use pb::generate::v3::{
input_chunk::Chunk, Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType,
HealthResponse, Image, InfoResponse, Input, InputChunk, NextTokenChooserParameters, Request,
StoppingCriteriaParameters, Tokens,
};
pub use sharded_client::ShardedClient;
|
text-generation-inference/backends/client/src/v3/mod.rs/0
|
{
"file_path": "text-generation-inference/backends/client/src/v3/mod.rs",
"repo_id": "text-generation-inference",
"token_count": 142
}
| 247
|
//
// Created by mfuntowicz on 7/23/24.
//
#ifndef TGI_TRTLLM_BACKEND_HARDWARE_H
#define TGI_TRTLLM_BACKEND_HARDWARE_H
#include <cstdint>
#include <limits>
#include <fmt/base.h>
#include <spdlog/spdlog.h>
#include <nvml.h>
namespace huggingface::hardware::cuda {
#define AMPERE_SM_MAJOR 8
#define HOPPER_SM_MAJOR 8
/**
* Store information about the version of the CUDA Compute Capabilities detected on the device
*/
struct CudaComputeCapabilities {
int32_t major;
int32_t minor;
[[nodiscard]] constexpr bool isPostAmpere() const { return major >= AMPERE_SM_MAJOR; }
[[nodiscard]] constexpr bool isPostHopper() const { return major >= HOPPER_SM_MAJOR; }
};
CudaComputeCapabilities GetCudaComputeCapabilities() {
// Get the compute capabilities of the current hardware
nvmlDevice_t device;
CudaComputeCapabilities capabilities{0, 0};
if (nvmlDeviceGetHandleByIndex_v2(0, &device) == NVML_SUCCESS) {
SPDLOG_DEBUG("Successfully acquired nvmlDevice_t = 0");
if (nvmlDeviceGetCudaComputeCapability(device, &capabilities.major, &capabilities.minor) == NVML_SUCCESS) {
SPDLOG_INFO("Detected sm_{:d}{:d} compute capabilities", capabilities.major, capabilities.minor);
}
}
return capabilities;
}
/**
* Return the number of GPU detected. If no GPU is detected, return size_t::max()
* @return
*/
std::optional<size_t> GetNumDevices() {
uint32_t numGpus = 0;
if (nvmlDeviceGetCount_v2(&numGpus) == NVML_SUCCESS) {
return std::optional(numGpus);
} else {
return std::nullopt;
}
}
}
#endif //TGI_TRTLLM_BACKEND_HARDWARE_H
|
text-generation-inference/backends/trtllm/include/hardware.h/0
|
{
"file_path": "text-generation-inference/backends/trtllm/include/hardware.h",
"repo_id": "text-generation-inference",
"token_count": 774
}
| 248
|
use crate::client::{ClientError, Result};
/// Multi shard Client
use crate::client::{Health, ShardInfo};
use crate::client::grpc_client::{DecodeTimings, PrefillTimings};
use crate::client::{
Batch, CachedBatch, Client, Generation, GrammarType, HealthResponse,
NextTokenChooserParameters, Request, StoppingCriteriaParameters,
};
use crate::client::{Chunk, InfoResponse, Input};
use async_trait::async_trait;
use futures::future::join_all;
use tonic::transport::Uri;
use tracing::instrument;
#[derive(Debug, Clone)]
/// Text Generation Inference gRPC multi client
pub struct ShardedClient {
clients: Vec<Client>,
}
impl ShardedClient {
fn new(clients: Vec<Client>) -> Self {
Self { clients }
}
/// Create a new ShardedClient from a master client. The master client will communicate with
/// the other shards and returns all uris/unix sockets with the `service_discovery` gRPC method.
async fn from_master_client(mut master_client: Client) -> Result<Self> {
// Get all uris/unix sockets from the master client
let uris = master_client.service_discovery().await?;
let futures = uris.into_iter().map(Client::connect_uds);
let clients: Result<Vec<Client>> = join_all(futures).await.into_iter().collect();
Ok(Self::new(clients?))
}
/// Returns a client connected to the given uri
#[allow(dead_code)]
pub async fn connect(uri: Uri) -> Result<Self> {
let master_client = Client::connect(uri).await?;
Self::from_master_client(master_client).await
}
/// Returns a client connected to the given unix socket
pub async fn connect_uds(path: String) -> Result<Self> {
let master_client = Client::connect_uds(path).await?;
Self::from_master_client(master_client).await
}
/// Get the model info
#[instrument(skip(self))]
pub async fn info(&mut self) -> Result<ShardInfo> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| client.info())
.collect();
join_all(futures).await.pop().unwrap().map(ShardInfo::from)
}
/// GRPC health check
#[instrument(skip(self))]
pub async fn health(&mut self) -> Result<HealthResponse> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| client.health())
.collect();
join_all(futures).await.pop().unwrap()
}
/// Clear the past generations cache
#[instrument(skip(self))]
pub async fn clear_cache(&mut self, batch_id: Option<u64>) -> Result<()> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| client.clear_cache(batch_id))
.collect();
join_all(futures).await.into_iter().collect()
}
/// Filter a cached batch
#[instrument(skip(self))]
pub async fn filter_batch(
&mut self,
batch_id: u64,
request_ids: Vec<u64>,
) -> Result<Option<CachedBatch>> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.filter_batch(batch_id, request_ids.clone())))
.collect();
// all shards return the same message
join_all(futures).await.pop().unwrap()
}
/// Warmup on a max size batch
///
/// Returns the maximum amount of tokens supported by the hardware
#[instrument(skip(self))]
pub async fn warmup(
&mut self,
max_input_length: u32,
max_prefill_tokens: u32,
max_total_tokens: u32,
max_batch_size: Option<usize>,
) -> Result<Option<u32>> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| {
Box::pin(client.warmup(
max_input_length,
max_prefill_tokens,
max_total_tokens,
max_batch_size,
))
})
.collect();
// Take the minimum value
let results = join_all(futures)
.await
.into_iter()
.collect::<Result<Vec<Option<u32>>>>()?;
Ok(results.into_iter().flatten().min())
}
/// Generate one token for each request in the given batch
///
/// Returns Generation for each request in batch
/// and the next cached batch
#[instrument(skip_all, fields(id = & batch.id, size = & batch.size))]
pub async fn prefill(
&mut self,
batch: Batch,
) -> Result<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.prefill(batch.clone())))
.collect();
#[allow(clippy::type_complexity)]
let results: Result<Vec<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)>> =
join_all(futures).await.into_iter().collect();
let mut results = results?;
let (mut generations, next_batch, mut timings) =
results.pop().ok_or(ClientError::EmptyResults)?;
// Merge generations from different model shards
for (mut shard_generations, _, shard_timings) in results.into_iter() {
generations.append(&mut shard_generations);
// Return the timings of the slowest shard
if shard_timings.total > timings.total {
timings = shard_timings;
}
}
Ok((generations, next_batch, timings))
}
/// Generate one token for each request in the given cached batches
///
/// Returns Generation for each request in batches
/// and the next cached batch
#[instrument(skip_all, fields(size = batches.iter().map(| batch | {batch.size}).sum::< u32 > ()))]
pub async fn decode(
&mut self,
batches: Vec<CachedBatch>,
) -> Result<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.decode(batches.clone())))
.collect();
#[allow(clippy::type_complexity)]
let results: Result<Vec<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)>> =
join_all(futures).await.into_iter().collect();
let mut results = results?;
let (mut generations, next_batch, mut timings) =
results.pop().ok_or(ClientError::EmptyResults)?;
// Merge generations from different model shards
for (mut shard_generations, _, shard_timings) in results.into_iter() {
generations.append(&mut shard_generations);
// Return the timings of the slowest shard
if shard_timings.total > timings.total {
timings = shard_timings;
}
}
Ok((generations, next_batch, timings))
}
}
impl From<InfoResponse> for ShardInfo {
fn from(value: InfoResponse) -> Self {
Self {
requires_padding: value.requires_padding,
dtype: value.dtype,
device_type: value.device_type,
window_size: value.window_size,
speculate: value.speculate,
}
}
}
#[async_trait]
impl Health for ShardedClient {
async fn device_health(&self) -> Result<()> {
self.clone().health().await?;
Ok(())
}
async fn model_health(&self) -> Result<()> {
// Dummy batch of 1 token and 1 generated token
let liveness_request = Request {
id: u64::MAX,
inputs: "liveness".to_string(),
input_chunks: Some(Input {
chunks: vec![Chunk::Text("liveness".into()).into()],
}),
truncate: 10,
add_special_tokens: true,
prefill_logprobs: false,
parameters: Some(NextTokenChooserParameters {
temperature: 1.0,
top_k: 0,
top_p: 1.0,
typical_p: 1.0,
do_sample: false,
seed: 0,
repetition_penalty: 1.0,
frequency_penalty: 0.0,
watermark: false,
grammar: String::new(),
grammar_type: GrammarType::None as i32,
}),
stopping_parameters: Some(StoppingCriteriaParameters {
max_new_tokens: 1,
stop_sequences: vec![],
ignore_eos_token: false,
}),
top_n_tokens: 0,
// Block 0 is reserved for health checks
blocks: vec![0],
slots: (0..16).collect(),
prefix_len: 0,
adapter_id: None,
};
let batch = Batch {
id: u64::MAX,
requests: vec![liveness_request],
size: 1,
max_tokens: 2,
max_blocks: 1,
};
self.clone().prefill(batch).await?;
Ok(())
}
}
|
text-generation-inference/backends/v3/src/client/sharded_client.rs/0
|
{
"file_path": "text-generation-inference/backends/v3/src/client/sharded_client.rs",
"repo_id": "text-generation-inference",
"token_count": 4198
}
| 249
|
# Legacy warning ⚠️
The inference clients from [huggingface_hub](https://huggingface.co/docs/huggingface_hub/guides/inference) are recommended over `text_generation`.
# Text Generation
The Hugging Face Text Generation Python library provides a convenient way of interfacing with a
`text-generation-inference` instance running on
[Hugging Face Inference Endpoints](https://huggingface.co/inference-endpoints) or on the Hugging Face Hub.
## Get Started
### Install
```shell
pip install text-generation
```
### Inference API Usage
```python
from text_generation import InferenceAPIClient
client = InferenceAPIClient("bigscience/bloomz")
text = client.generate("Why is the sky blue?").generated_text
print(text)
# ' Rayleigh scattering'
# Token Streaming
text = ""
for response in client.generate_stream("Why is the sky blue?"):
if not response.token.special:
text += response.token.text
print(text)
# ' Rayleigh scattering'
```
or with the asynchronous client:
```python
from text_generation import InferenceAPIAsyncClient
client = InferenceAPIAsyncClient("bigscience/bloomz")
response = await client.generate("Why is the sky blue?")
print(response.generated_text)
# ' Rayleigh scattering'
# Token Streaming
text = ""
async for response in client.generate_stream("Why is the sky blue?"):
if not response.token.special:
text += response.token.text
print(text)
# ' Rayleigh scattering'
```
Check all currently deployed models on the Huggingface Inference API with `Text Generation` support:
```python
from text_generation.inference_api import deployed_models
print(deployed_models())
```
### Hugging Face Inference Endpoint usage
```python
from text_generation import Client
endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud"
client = Client(endpoint_url)
text = client.generate("Why is the sky blue?").generated_text
print(text)
# ' Rayleigh scattering'
# Token Streaming
text = ""
for response in client.generate_stream("Why is the sky blue?"):
if not response.token.special:
text += response.token.text
print(text)
# ' Rayleigh scattering'
```
or with the asynchronous client:
```python
from text_generation import AsyncClient
endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud"
client = AsyncClient(endpoint_url)
response = await client.generate("Why is the sky blue?")
print(response.generated_text)
# ' Rayleigh scattering'
# Token Streaming
text = ""
async for response in client.generate_stream("Why is the sky blue?"):
if not response.token.special:
text += response.token.text
print(text)
# ' Rayleigh scattering'
```
### Types
```python
# enum for grammar type
class GrammarType(Enum):
Json = "json"
Regex = "regex"
# Grammar type and value
class Grammar:
# Grammar type
type: GrammarType
# Grammar value
value: Union[str, dict]
class Parameters:
# Activate logits sampling
do_sample: bool
# Maximum number of generated tokens
max_new_tokens: int
# The parameter for repetition penalty. 1.0 means no penalty.
# See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
repetition_penalty: Optional[float]
# The parameter for frequency penalty. 1.0 means no penalty
# Penalize new tokens based on their existing frequency in the text so far,
# decreasing the model's likelihood to repeat the same line verbatim.
frequency_penalty: Optional[float]
# Whether to prepend the prompt to the generated text
return_full_text: bool
# Stop generating tokens if a member of `stop_sequences` is generated
stop: List[str]
# Random sampling seed
seed: Optional[int]
# The value used to module the logits distribution.
temperature: Optional[float]
# The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_k: Optional[int]
# If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
# higher are kept for generation.
top_p: Optional[float]
# truncate inputs tokens to the given size
truncate: Optional[int]
# Typical Decoding mass
# See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
typical_p: Optional[float]
# Generate best_of sequences and return the one if the highest token logprobs
best_of: Optional[int]
# Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
watermark: bool
# Get generation details
details: bool
# Get decoder input token logprobs and ids
decoder_input_details: bool
# Return the N most likely tokens at each step
top_n_tokens: Optional[int]
# grammar to use for generation
grammar: Optional[Grammar]
class Request:
# Prompt
inputs: str
# Generation parameters
parameters: Optional[Parameters]
# Whether to stream output tokens
stream: bool
# Decoder input tokens
class InputToken:
# Token ID from the model tokenizer
id: int
# Token text
text: str
# Logprob
# Optional since the logprob of the first token cannot be computed
logprob: Optional[float]
# Generated tokens
class Token:
# Token ID from the model tokenizer
id: int
# Token text
text: str
# Logprob
logprob: Optional[float]
# Is the token a special token
# Can be used to ignore tokens when concatenating
special: bool
# Generation finish reason
class FinishReason(Enum):
# number of generated tokens == `max_new_tokens`
Length = "length"
# the model generated its end of sequence token
EndOfSequenceToken = "eos_token"
# the model generated a text included in `stop_sequences`
StopSequence = "stop_sequence"
# Additional sequences when using the `best_of` parameter
class BestOfSequence:
# Generated text
generated_text: str
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int]
# Decoder input tokens, empty if decoder_input_details is False
prefill: List[InputToken]
# Generated tokens
tokens: List[Token]
# Most likely tokens
top_tokens: Optional[List[List[Token]]]
# `generate` details
class Details:
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int]
# Decoder input tokens, empty if decoder_input_details is False
prefill: List[InputToken]
# Generated tokens
tokens: List[Token]
# Most likely tokens
top_tokens: Optional[List[List[Token]]]
# Additional sequences when using the `best_of` parameter
best_of_sequences: Optional[List[BestOfSequence]]
# `generate` return value
class Response:
# Generated text
generated_text: str
# Generation details
details: Details
# `generate_stream` details
class StreamDetails:
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int]
# `generate_stream` return value
class StreamResponse:
# Generated token
token: Token
# Most likely tokens
top_tokens: Optional[List[Token]]
# Complete generated text
# Only available when the generation is finished
generated_text: Optional[str]
# Generation details
# Only available when the generation is finished
details: Optional[StreamDetails]
# Inference API currently deployed model
class DeployedModel:
model_id: str
sha: str
```
|
text-generation-inference/clients/python/README.md/0
|
{
"file_path": "text-generation-inference/clients/python/README.md",
"repo_id": "text-generation-inference",
"token_count": 2491
}
| 250
|
- sections:
- local: index
title: Text Generation Inference
- local: quicktour
title: Quick Tour
- local: installation_nvidia
title: Using TGI with Nvidia GPUs
- local: installation_amd
title: Using TGI with AMD GPUs
- local: installation_gaudi
title: Using TGI with Intel Gaudi
- local: installation_inferentia
title: Using TGI with AWS Inferentia
- local: installation_intel
title: Using TGI with Intel GPUs
- local: installation
title: Installation from source
- local: supported_models
title: Supported Models and Hardware
- local: architecture
title: Internal Architecture
- local: usage_statistics
title: Usage Statistics
title: Getting started
- sections:
- local: basic_tutorials/consuming_tgi
title: Consuming TGI
- local: basic_tutorials/preparing_model
title: Preparing Model for Serving
- local: basic_tutorials/gated_model_access
title: Serving Private & Gated Models
- local: basic_tutorials/using_cli
title: Using TGI CLI
- local: basic_tutorials/non_core_models
title: Non-core Model Serving
- local: basic_tutorials/safety
title: Safety
- local: basic_tutorials/using_guidance
title: Using Guidance, JSON, tools
- local: basic_tutorials/visual_language_models
title: Visual Language Models
- local: basic_tutorials/monitoring
title: Monitoring TGI with Prometheus and Grafana
- local: basic_tutorials/train_medusa
title: Train Medusa
title: Tutorials
- sections:
- local: reference/launcher
title: All TGI CLI options
- local: reference/metrics
title: Exported Metrics
- local: reference/api_reference
title: API Reference
title: Reference
- sections:
- local: conceptual/streaming
title: Streaming
- local: conceptual/quantization
title: Quantization
- local: conceptual/tensor_parallelism
title: Tensor Parallelism
- local: conceptual/paged_attention
title: PagedAttention
- local: conceptual/safetensors
title: Safetensors
- local: conceptual/flash_attention
title: Flash Attention
- local: conceptual/speculation
title: Speculation (Medusa, ngram)
- local: conceptual/guidance
title: How Guidance Works (via outlines)
- local: conceptual/lora
title: LoRA (Low-Rank Adaptation)
title: Conceptual Guides
|
text-generation-inference/docs/source/_toctree.yml/0
|
{
"file_path": "text-generation-inference/docs/source/_toctree.yml",
"repo_id": "text-generation-inference",
"token_count": 747
}
| 251
|
# Quantization
TGI offers many quantization schemes to run LLMs effectively and fast based on your use-case. TGI supports GPTQ, AWQ, bits-and-bytes, EETQ, Marlin, EXL2 and fp8 quantization.
To leverage GPTQ, AWQ, Marlin and EXL2 quants, you must provide pre-quantized weights. Whereas for bits-and-bytes, EETQ and fp8, weights are quantized by TGI on the fly.
We recommend using the official quantization scripts for creating your quants:
1. [AWQ](https://github.com/casper-hansen/AutoAWQ/blob/main/examples/quantize.py)
2. [GPTQ/ Marlin](https://github.com/AutoGPTQ/AutoGPTQ/blob/main/examples/quantization/basic_usage.py)
3. [EXL2](https://github.com/turboderp/exllamav2/blob/master/doc/convert.md)
For on-the-fly quantization you simply need to pass one of the supported quantization types and TGI takes care of the rest.
## Quantization with bitsandbytes, EETQ & fp8
bitsandbytes is a library used to apply 8-bit and 4-bit quantization to models. Unlike GPTQ quantization, bitsandbytes doesn't require a calibration dataset or any post-processing – weights are automatically quantized on load. However, inference with bitsandbytes is slower than GPTQ or FP16 precision.
8-bit quantization enables multi-billion parameter scale models to fit in smaller hardware without degrading performance too much.
In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇
```bash
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize bitsandbytes
```
4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load.
In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇
```bash
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize bitsandbytes-nf4
```
You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
Similarly you can use pass you can pass `--quantize eetq` or `--quantize fp8` for respective quantization schemes.
In addition to this, TGI allows creating GPTQ quants directly by passing the model weights and a calibration dataset.
## Quantization with GPTQ
GPTQ is a post-training quantization method to make the model smaller. It quantizes the layers by finding a compressed version of that weight, that will yield a minimum mean squared error like below 👇
Given a layer \\(l\\) with weight matrix \\(W_{l}\\) and layer input \\(X_{l}\\), find quantized weight \\(\\hat{W}_{l}\\):
$$({\hat{W}_{l}}^{*} = argmin_{\hat{W_{l}}} ||W_{l}X-\hat{W}_{l}X||^{2}_{2})$$
TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇
```bash
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize gptq
```
Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI.
To quantize a given model using GPTQ with a calibration dataset, simply run
```bash
text-generation-server quantize tiiuae/falcon-40b /data/falcon-40b-gptq
# Add --upload-to-model-id MYUSERNAME/falcon-40b to push the created model to the hub directly
```
This will create a new directory with the quantized files which you can use with,
```bash
text-generation-launcher --model-id /data/falcon-40b-gptq/ --sharded true --num-shard 2 --quantize gptq
```
You can learn more about the quantization options by running `text-generation-server quantize --help`.
If you wish to do more with GPTQ models (e.g. train an adapter on top), you can read about transformers GPTQ integration [here](https://huggingface.co/blog/gptq-integration).
You can learn more about GPTQ from the [paper](https://arxiv.org/pdf/2210.17323.pdf).
|
text-generation-inference/docs/source/conceptual/quantization.md/0
|
{
"file_path": "text-generation-inference/docs/source/conceptual/quantization.md",
"repo_id": "text-generation-inference",
"token_count": 1430
}
| 252
|
# Supported Models and Hardware
Text Generation Inference enables serving optimized models on specific hardware for the highest performance. The following sections list which models (VLMs & LLMs) are supported.
## Supported Models
- [Deepseek V2](https://huggingface.co/deepseek-ai/DeepSeek-V2)
- [Idefics 2](https://huggingface.co/HuggingFaceM4/idefics2-8b) (Multimodal)
- [Llava Next (1.6)](https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf) (Multimodal)
- [Llama](https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f)
- [Phi 3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct)
- [Gemma](https://huggingface.co/google/gemma-7b)
- [PaliGemma](https://huggingface.co/google/paligemma-3b-pt-224)
- [Gemma2](https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315)
- [Cohere](https://huggingface.co/CohereForAI/c4ai-command-r-plus)
- [Dbrx](https://huggingface.co/databricks/dbrx-instruct)
- [Mamba](https://huggingface.co/state-spaces/mamba-2.8b-slimpj)
- [Mistral](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)
- [Mixtral](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)
- [Gpt Bigcode](https://huggingface.co/bigcode/gpt_bigcode-santacoder)
- [Phi](https://huggingface.co/microsoft/phi-1_5)
- [Baichuan](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat)
- [Falcon](https://huggingface.co/tiiuae/falcon-7b-instruct)
- [StarCoder 2](https://huggingface.co/bigcode/starcoder2-15b-instruct-v0.1)
- [Qwen 2](https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f)
- [Opt](https://huggingface.co/facebook/opt-6.7b)
- [T5](https://huggingface.co/google/flan-t5-xxl)
- [Galactica](https://huggingface.co/facebook/galactica-120b)
- [SantaCoder](https://huggingface.co/bigcode/santacoder)
- [Bloom](https://huggingface.co/bigscience/bloom-560m)
- [Mpt](https://huggingface.co/mosaicml/mpt-7b-instruct)
- [Gpt2](https://huggingface.co/openai-community/gpt2)
- [Gpt Neox](https://huggingface.co/EleutherAI/gpt-neox-20b)
- [Gptj](https://huggingface.co/EleutherAI/gpt-j-6b)
- [Idefics](https://huggingface.co/HuggingFaceM4/idefics-9b) (Multimodal)
If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models:
```python
# for causal LMs/text-generation models
AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")`
# or, for text-to-text generation models
AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")
```
If you wish to serve a supported model that already exists on a local folder, just point to the local folder.
```bash
text-generation-launcher --model-id <PATH-TO-LOCAL-BLOOM>
```
|
text-generation-inference/docs/source/supported_models.md/0
|
{
"file_path": "text-generation-inference/docs/source/supported_models.md",
"repo_id": "text-generation-inference",
"token_count": 1141
}
| 253
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.703125,
"text": "What"
},
{
"id": 338,
"logprob": -1.4765625,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8583984,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7548828,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9306641,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4550781,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.5732422,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5761719,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5888672,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.026504517,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4287109,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15856934,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62646484,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq.json",
"repo_id": "text-generation-inference",
"token_count": 1236
}
| 254
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 2,
"logprob": null,
"text": "<bos>"
},
{
"id": 2015,
"logprob": -9.640625,
"text": "Test"
},
{
"id": 3853,
"logprob": -10.34375,
"text": " request"
}
],
"seed": null,
"tokens": [
{
"id": 604,
"logprob": -2.4296875,
"special": false,
"text": " for"
},
{
"id": 573,
"logprob": -2.4453125,
"special": false,
"text": " the"
},
{
"id": 2412,
"logprob": -2.8632812,
"special": false,
"text": " following"
},
{
"id": 235292,
"logprob": -2.1328125,
"special": false,
"text": ":"
},
{
"id": 109,
"logprob": -0.76660156,
"special": false,
"text": "\n\n"
},
{
"id": 235287,
"logprob": -1.3837891,
"special": false,
"text": "*"
},
{
"id": 235248,
"logprob": -1.9746094,
"special": false,
"text": " "
},
{
"id": 199,
"logprob": -1.4189453,
"special": false,
"text": "<strong>"
},
{
"id": 1232,
"logprob": -4.34375,
"special": false,
"text": "Name"
},
{
"id": 208,
"logprob": -0.8852539,
"special": false,
"text": "</strong>"
}
],
"top_tokens": null
},
"generated_text": " for the following:\n\n* <strong>Name</strong>"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma_gptq/test_flash_gemma_gptq.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma_gptq/test_flash_gemma_gptq.json",
"repo_id": "text-generation-inference",
"token_count": 1053
}
| 255
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 128000,
"logprob": null,
"text": "<|begin_of_text|>"
},
{
"id": 2323,
"logprob": -9.421875,
"text": "Test"
},
{
"id": 1715,
"logprob": -10.546875,
"text": " request"
}
],
"seed": null,
"tokens": [
{
"id": 369,
"logprob": -2.1816406,
"special": false,
"text": " for"
},
{
"id": 279,
"logprob": -2.6992188,
"special": false,
"text": " the"
},
{
"id": 220,
"logprob": -3.6308594,
"special": false,
"text": " "
},
{
"id": 679,
"logprob": -1.7900391,
"special": false,
"text": "201"
},
{
"id": 24,
"logprob": -1.3554688,
"special": false,
"text": "9"
},
{
"id": 12,
"logprob": -2.0039062,
"special": false,
"text": "-"
},
{
"id": 2366,
"logprob": -0.4489746,
"special": false,
"text": "202"
},
{
"id": 15,
"logprob": -0.037109375,
"special": false,
"text": "0"
},
{
"id": 2978,
"logprob": -0.8100586,
"special": false,
"text": " school"
},
{
"id": 1060,
"logprob": -0.013015747,
"special": false,
"text": " year"
}
],
"top_tokens": null
},
"generated_text": " for the 2019-2020 school year"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_fp8/test_flash_llama_fp8.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_fp8/test_flash_llama_fp8.json",
"repo_id": "text-generation-inference",
"token_count": 1053
}
| 256
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 3735,
"logprob": -12.9140625,
"text": "Test"
},
{
"id": 2159,
"logprob": -10.7578125,
"text": "request"
}
],
"seed": 0,
"tokens": [
{
"id": 28747,
"logprob": 0.0,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -0.1307373,
"special": false,
"text": " Let"
},
{
"id": 332,
"logprob": -2.3359375,
"special": false,
"text": " u"
},
{
"id": 347,
"logprob": 0.0,
"special": false,
"text": " be"
},
{
"id": 325,
"logprob": -1.0234375,
"special": false,
"text": " ("
},
{
"id": 28734,
"logprob": -2.0292969,
"special": false,
"text": "0"
},
{
"id": 648,
"logprob": -1.0439453,
"special": false,
"text": " +"
},
{
"id": 28705,
"logprob": -0.24499512,
"special": false,
"text": " "
},
{
"id": 28770,
"logprob": -0.5073242,
"special": false,
"text": "3"
},
{
"id": 387,
"logprob": -1.5507812,
"special": false,
"text": " -"
}
],
"top_tokens": null
},
"generated_text": "Test request: Let u be (0 + 3 -"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_all_params.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 1041
}
| 257
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 1459,
"logprob": -5.6289062,
"text": " print"
},
{
"id": 81,
"logprob": -1.6005859,
"text": "_"
},
{
"id": 7656,
"logprob": -5.9921875,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 2262,
"logprob": -0.7705078,
"special": false,
"text": "():"
},
{
"id": 284,
"logprob": -0.2590332,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": -0.39379883,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": -0.61376953,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": -0.47338867,
"special": false,
"text": "Hello"
},
{
"id": 10896,
"logprob": -1.5068359,
"special": false,
"text": " World"
},
{
"id": 657,
"logprob": -0.80810547,
"special": false,
"text": "\")"
},
{
"id": 203,
"logprob": -0.7397461,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": -0.35229492,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": -1.0371094,
"special": false,
"text": "def"
}
]
},
"generated_text": "():\n print(\"Hello World\")\n\ndef"
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder.json",
"repo_id": "text-generation-inference",
"token_count": 1111
}
| 258
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 330,
"logprob": -0.08660889,
"special": false,
"text": " A"
},
{
"id": 13088,
"logprob": -0.7089844,
"special": false,
"text": " chicken"
},
{
"id": 349,
"logprob": -0.32885742,
"special": false,
"text": " is"
},
{
"id": 6398,
"logprob": -0.05126953,
"special": false,
"text": " sitting"
},
{
"id": 356,
"logprob": -0.35229492,
"special": false,
"text": " on"
},
{
"id": 264,
"logprob": -0.12561035,
"special": false,
"text": " a"
},
{
"id": 17972,
"logprob": -0.038085938,
"special": false,
"text": " pile"
},
{
"id": 302,
"logprob": -0.00018656254,
"special": false,
"text": " of"
},
{
"id": 2445,
"logprob": -0.07293701,
"special": false,
"text": " money"
},
{
"id": 28723,
"logprob": -0.004852295,
"special": false,
"text": "."
}
],
"top_tokens": null
},
"generated_text": " A chicken is sitting on a pile of money."
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_simple.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_simple.json",
"repo_id": "text-generation-inference",
"token_count": 868
}
| 259
|
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3798828,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36328125,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0947266,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8286133,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6826172,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.7290039,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3789062,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36279297,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0966797,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8276367,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6845703,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.72753906,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3789062,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36279297,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0966797,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8276367,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6845703,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.72753906,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3789062,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36279297,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0966797,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8276367,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6845703,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.72753906,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
}
]
|
text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_load.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_load.json",
"repo_id": "text-generation-inference",
"token_count": 2874
}
| 260
|
import pytest
@pytest.fixture(scope="module")
def bloom_560m_sharded_handle(launcher):
with launcher("bigscience/bloom-560m", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def bloom_560m_sharded(bloom_560m_sharded_handle):
await bloom_560m_sharded_handle.health(240)
return bloom_560m_sharded_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_bloom_560m_sharded(bloom_560m_sharded, response_snapshot):
response = await bloom_560m_sharded.generate(
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
top_p=0.9,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_bloom_560m_sharded_load(
bloom_560m_sharded, generate_load, response_snapshot
):
responses = await generate_load(
bloom_560m_sharded,
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
|
text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py",
"repo_id": "text-generation-inference",
"token_count": 527
}
| 261
|
import pytest
@pytest.fixture(scope="module")
def flash_llama_marlin_handle(launcher):
with launcher(
"neuralmagic/llama-2-7b-chat-marlin", num_shard=2, quantize="marlin"
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_marlin(flash_llama_marlin_handle):
await flash_llama_marlin_handle.health(300)
return flash_llama_marlin_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_marlin(flash_llama_marlin, response_snapshot):
response = await flash_llama_marlin.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_marlin_all_params(flash_llama_marlin, response_snapshot):
response = await flash_llama_marlin.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_marlin_load(
flash_llama_marlin, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_marlin, "Test request", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
|
text-generation-inference/integration-tests/models/test_flash_llama_marlin.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_flash_llama_marlin.py",
"repo_id": "text-generation-inference",
"token_count": 748
}
| 262
|
import pytest
import base64
# TODO fix the server parsser to count inline image tokens correctly
def get_chicken():
with open("integration-tests/images/chicken_on_money.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
return f"data:image/png;base64,{encoded_string.decode('utf-8')}"
def get_cow_beach():
with open("integration-tests/images/cow_beach.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
return f"data:image/png;base64,{encoded_string.decode('utf-8')}"
@pytest.fixture(scope="module")
def flash_idefics2_next_handle(launcher):
with launcher(
"HuggingFaceM4/idefics2-8b",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_idefics2_next(flash_idefics2_next_handle):
await flash_idefics2_next_handle.health(300)
return flash_idefics2_next_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_idefics2_next_simple(flash_idefics2_next, response_snapshot):
chicken = get_chicken()
response = await flash_idefics2_next.generate(
f"User:Write me a short story<end_of_utterance> \nAssistant:",
max_new_tokens=10,
)
assert (
response.generated_text == " A chicken is sitting on a pile of money."
), f"{repr(response.generated_text)}"
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_idefics2_two_images(flash_idefics2_next, response_snapshot):
chicken = get_chicken()
cow_beach = get_cow_beach()
response = await flash_idefics2_next.generate(
f"User:Where are the cow and chicken?<end_of_utterance> \nAssistant:",
max_new_tokens=20,
)
assert (
response.generated_text
== " The cow is standing on the beach and the chicken is sitting on a pile of money."
), f"{repr(response.generated_text)}"
assert response.details.generated_tokens == 19
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_idefics2_next_all_params(flash_idefics2_next, response_snapshot):
response = await flash_idefics2_next.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_idefics2_next_load(
flash_idefics2_next, generate_load, response_snapshot
):
chicken = get_chicken()
responses = await generate_load(
flash_idefics2_next,
f"User:Write me a short story<end_of_utterance> \nAssistant:",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert generated_texts[0] == " A chicken is sitting on a pile of money."
assert len(generated_texts) == 4
assert all([r.generated_text == generated_texts[0] for r in responses])
assert responses == response_snapshot
|
text-generation-inference/integration-tests/models/test_idefics2.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_idefics2.py",
"repo_id": "text-generation-inference",
"token_count": 1397
}
| 263
|
use std::error::Error;
use vergen::EmitBuilder;
fn main() -> Result<(), Box<dyn Error>> {
// Emit cargo and rustc compile time values
EmitBuilder::builder().all_cargo().all_rustc().emit()?;
// Try to get the git sha from the local git repository
if EmitBuilder::builder()
.fail_on_error()
.git_sha(false)
.emit()
.is_err()
{
// Unable to get the git sha
if let Ok(sha) = std::env::var("GIT_SHA") {
// Set it from an env var
println!("cargo:rustc-env=VERGEN_GIT_SHA={sha}");
}
}
// Set docker label if present
if let Ok(label) = std::env::var("DOCKER_LABEL") {
// Set it from an env var
println!("cargo:rustc-env=DOCKER_LABEL={label}");
}
Ok(())
}
|
text-generation-inference/launcher/build.rs/0
|
{
"file_path": "text-generation-inference/launcher/build.rs",
"repo_id": "text-generation-inference",
"token_count": 363
}
| 264
|
// pub(crate) mod v2;
mod chat_template;
pub mod tool_grammar;
use crate::validation::{ValidGenerateRequest, Validation, ValidationError};
use crate::Tool;
use crate::{
ChatTemplateVersions, FinishReason, GenerateRequest, HubProcessorConfig, HubTokenizerConfig,
Message, PrefillToken, Token,
};
use async_trait::async_trait;
use chat_template::ChatTemplate;
use futures::future::try_join_all;
use minijinja::ErrorKind;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::{OwnedSemaphorePermit, Semaphore, TryAcquireError};
use tokio::time::Instant;
use tokio_stream::wrappers::UnboundedReceiverStream;
use tokio_stream::StreamExt;
use tracing::instrument;
#[async_trait]
pub trait Backend {
fn schedule(
&self,
request: ValidGenerateRequest,
) -> Result<UnboundedReceiverStream<Result<InferStreamResponse, InferError>>, InferError>;
async fn health(&self, current_health: bool) -> bool;
}
/// Inference struct
#[derive(Clone)]
pub struct Infer {
/// Validation
validation: Validation,
/// Request backend
backend: Arc<dyn Backend + Send + Sync>,
/// Chat template
chat_template: Option<ChatTemplate>,
/// Inference limit
limit_concurrent_requests: Arc<Semaphore>,
/// Backend health
backend_health: Arc<AtomicBool>,
}
impl Infer {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
backend: impl Backend + Send + Sync + 'static,
validation: Validation,
max_concurrent_requests: usize,
tokenizer_config: HubTokenizerConfig,
processor_config: HubProcessorConfig,
) -> Self {
let chat_template = tokenizer_config
.chat_template
.or(processor_config.chat_template)
.and_then(|t| match t {
ChatTemplateVersions::Single(template) => Some(template),
ChatTemplateVersions::Multiple(templates) => templates
.into_iter()
.find(|t| t.name == "default")
.map(|t| t.template),
})
.map(|t| ChatTemplate::new(t, tokenizer_config.bos_token, tokenizer_config.eos_token));
// Inference limit with a semaphore
let semaphore = Arc::new(Semaphore::new(max_concurrent_requests));
// Backend health
let backend_health = Arc::new(AtomicBool::new(false));
Self {
validation,
backend: Arc::new(backend),
chat_template,
limit_concurrent_requests: semaphore,
backend_health,
}
}
/// Add a new request to the queue and return a stream of InferStreamResponse
#[instrument(skip_all)]
pub(crate) async fn generate_stream<'a>(
&'a self,
request: GenerateRequest,
) -> Result<GenerateStreamResponse, InferError> {
// Limit concurrent requests by acquiring a permit from the semaphore
let permit = self
.clone()
.limit_concurrent_requests
.try_acquire_owned()
.map_err(|err| {
metrics::counter!("tgi_request_failure", "err" => "overloaded").increment(1);
tracing::error!("{err}");
err
})?;
// Validate request
let valid_request = self.validation.validate(request).await.map_err(|err| {
metrics::counter!("tgi_request_failure", "err" => "validation").increment(1);
tracing::error!("{err}");
err
})?;
let input_length = valid_request.input_length;
let generation_stream = self.backend.schedule(valid_request)?;
Ok((permit, input_length, generation_stream))
}
/// Tokenizer the input
#[instrument(skip_all)]
pub(crate) async fn tokenize(
&self,
request: GenerateRequest,
) -> Result<Option<tokenizers::Encoding>, InferError> {
// Tokenize request
let inputs = request.inputs;
let add_special_tokens = request.add_special_tokens;
let truncate = request.parameters.truncate;
let encoding = self
.validation
.tokenize(inputs, add_special_tokens, truncate)
.await
.map_err(|err| {
tracing::error!("Tokenization {err}");
err
})?;
// Return Encoding
Ok(encoding.map(|(encoding, _)| encoding))
}
/// Apply the chat template to the chat request
#[instrument(skip_all)]
pub(crate) fn apply_chat_template(
&self,
guideline: Option<String>,
messages: Vec<Message>,
tools_and_prompt: Option<(Vec<Tool>, String)>,
) -> Result<String, InferError> {
self.chat_template
.as_ref()
.ok_or_else(|| InferError::TemplateError(ErrorKind::TemplateNotFound.into()))?
.apply(guideline.as_deref(), messages, tools_and_prompt)
.map_err(|e| {
metrics::counter!("tgi_request_failure", "err" => "template").increment(1);
tracing::error!("{e}");
e
})
}
/// Add a new request to the queue and return a InferResponse
#[instrument(skip_all)]
pub(crate) async fn generate(
&self,
request: GenerateRequest,
) -> Result<InferResponse, InferError> {
let use_top_tokens = request.parameters.top_n_tokens.is_some_and(|x| x > 0);
// Create stream and keep semaphore permit as long as generate lives
let (_permit, _input_length, stream) = self.generate_stream(request).await?;
// Return values
let mut result_prefill = Vec::new();
let mut result_tokens = Vec::new();
let mut result_top_tokens = Vec::new();
let mut result_generated_text = None;
let mut result_start = None;
let mut result_queued = None;
let mut stream = Box::pin(stream);
// Iterate on stream
while let Some(response) = stream.next().await {
match response? {
// Add prefill tokens
InferStreamResponse::Prefill(prefill_tokens) => {
result_prefill = prefill_tokens;
}
// Push last token
InferStreamResponse::Intermediate { token, top_tokens } => {
result_tokens.push(token);
result_top_tokens.push(top_tokens);
}
// Final message
// Set return values
InferStreamResponse::End {
token,
generated_text,
start,
queued,
top_tokens,
} => {
result_tokens.push(token);
result_top_tokens.push(top_tokens);
result_generated_text = Some(generated_text);
result_start = Some(start);
result_queued = Some(queued)
}
}
}
// Check that we received a `InferStreamResponse::End` message
if let (Some(generated_text), Some(queued), Some(start)) =
(result_generated_text, result_queued, result_start)
{
Ok(InferResponse {
prefill: result_prefill,
_input_length,
tokens: result_tokens,
generated_text,
queued,
start,
top_tokens: if use_top_tokens {
result_top_tokens
} else {
Vec::new()
},
})
} else {
let err = InferError::IncompleteGeneration;
metrics::counter!("tgi_request_failure", "err" => "incomplete").increment(1);
tracing::error!("{err}");
Err(err)
}
}
/// Add best_of new requests to the queue and return a InferResponse of the sequence with
/// the highest log probability per token
#[instrument(skip(self, request))]
pub(crate) async fn generate_best_of(
&self,
request: GenerateRequest,
best_of: usize,
) -> Result<(InferResponse, Vec<InferResponse>), InferError> {
// validate best_of parameter separately
let best_of = self.validation.validate_best_of(best_of)?;
// create multiple generate requests
let mut infer_responses: Vec<InferResponse> =
try_join_all((0..best_of).map(|_| self.generate(request.clone()))).await?;
// get the sequence with the highest log probability per token
let mut max_index = 0;
let mut max_logprob: f32 = f32::MIN;
for (i, response) in infer_responses.iter().enumerate() {
// mean logprobs of the generated tokens
let sequence_logprob = response
.tokens
.iter()
.map(|token| token.logprob)
.sum::<f32>()
/ response.tokens.len() as f32;
// set best sequence
if sequence_logprob > max_logprob {
max_index = i;
max_logprob = sequence_logprob;
}
}
let best_response = infer_responses.remove(max_index);
Ok((best_response, infer_responses))
}
#[instrument(skip(self))]
pub(crate) async fn health(&self) -> bool {
let health = self
.backend
.health(self.backend_health.load(Ordering::SeqCst))
.await;
self.backend_health.store(health, Ordering::SeqCst);
health
}
}
/// Type alias for generation responses
pub(crate) type GenerateStreamResponse = (
OwnedSemaphorePermit,
u32, // input_length
UnboundedReceiverStream<Result<InferStreamResponse, InferError>>,
);
#[derive(Debug)]
pub struct GeneratedText {
pub text: String,
pub generated_tokens: u32,
pub finish_reason: FinishReason,
pub seed: Option<u64>,
}
#[derive(Debug)]
pub enum InferStreamResponse {
// Optional first message
Prefill(Vec<PrefillToken>),
// Intermediate messages
Intermediate {
token: Token,
top_tokens: Vec<Token>,
},
// Last message
End {
token: Token,
top_tokens: Vec<Token>,
generated_text: GeneratedText,
start: Instant,
queued: Instant,
},
}
#[derive(Debug)]
pub(crate) struct InferResponse {
/// input_length is the input as perceived by the rust tokenizer in the
/// validation pathway. It is redundant with prefill.len() but prefill
/// has data only if the user asked for it. This will always be filled.
pub(crate) _input_length: u32,
pub(crate) prefill: Vec<PrefillToken>,
pub(crate) tokens: Vec<Token>,
pub(crate) generated_text: GeneratedText,
pub(crate) queued: Instant,
pub(crate) start: Instant,
pub(crate) top_tokens: Vec<Vec<Token>>,
}
#[derive(Debug, Error)]
pub enum InferError {
#[error("Request failed during generation: {0}")]
GenerationError(String),
#[error("Model is overloaded")]
Overloaded(#[from] TryAcquireError),
#[error("Input validation error: {0}")]
ValidationError(#[from] ValidationError),
#[error("Incomplete generation")]
IncompleteGeneration,
#[error("Template error: {0}")]
TemplateError(#[from] minijinja::Error),
#[error("Missing template vatiable: {0}")]
MissingTemplateVariable(String),
#[error("Tool error: {0}")]
ToolError(String),
}
impl InferError {
pub(crate) fn error_type(&self) -> &str {
match self {
InferError::GenerationError(_) => "generation",
InferError::Overloaded(_) => "overloaded",
InferError::ValidationError(_) => "validation",
InferError::IncompleteGeneration => "incomplete_generation",
InferError::TemplateError(_) => "template_error",
InferError::MissingTemplateVariable(_) => "missing_template_variable",
InferError::ToolError(_) => "tool_error",
}
}
}
|
text-generation-inference/router/src/infer/mod.rs/0
|
{
"file_path": "text-generation-inference/router/src/infer/mod.rs",
"repo_id": "text-generation-inference",
"token_count": 5577
}
| 265
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.