text
stringlengths 7
1.24M
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
519
|
|---|---|---|---|
# Copyright 2024 Stability AI, Katherine Crowson and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, logging
from ..utils.torch_utils import randn_tensor
from .scheduling_utils import SchedulerMixin
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class FlowMatchHeunDiscreteSchedulerOutput(BaseOutput):
"""
Output class for the scheduler's `step` function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
"""
prev_sample: torch.FloatTensor
class FlowMatchHeunDiscreteScheduler(SchedulerMixin, ConfigMixin):
"""
Heun scheduler.
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
methods the library implements for all schedulers such as loading and saving.
Args:
num_train_timesteps (`int`, defaults to 1000):
The number of diffusion steps to train the model.
timestep_spacing (`str`, defaults to `"linspace"`):
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
shift (`float`, defaults to 1.0):
The shift value for the timestep schedule.
"""
_compatibles = []
order = 2
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
shift: float = 1.0,
):
timesteps = np.linspace(1, num_train_timesteps, num_train_timesteps, dtype=np.float32)[::-1].copy()
timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32)
sigmas = timesteps / num_train_timesteps
sigmas = shift * sigmas / (1 + (shift - 1) * sigmas)
self.timesteps = sigmas * num_train_timesteps
self._step_index = None
self._begin_index = None
self.sigmas = sigmas.to("cpu") # to avoid too much CPU/GPU communication
self.sigma_min = self.sigmas[-1].item()
self.sigma_max = self.sigmas[0].item()
@property
def step_index(self):
"""
The index counter for current timestep. It will increase 1 after each scheduler step.
"""
return self._step_index
@property
def begin_index(self):
"""
The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
"""
return self._begin_index
# Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
def set_begin_index(self, begin_index: int = 0):
"""
Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
Args:
begin_index (`int`):
The begin index for the scheduler.
"""
self._begin_index = begin_index
def scale_noise(
self,
sample: torch.FloatTensor,
timestep: Union[float, torch.FloatTensor],
noise: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Forward process in flow-matching
Args:
sample (`torch.FloatTensor`):
The input sample.
timestep (`int`, *optional*):
The current timestep in the diffusion chain.
Returns:
`torch.FloatTensor`:
A scaled input sample.
"""
if self.step_index is None:
self._init_step_index(timestep)
sigma = self.sigmas[self.step_index]
sample = sigma * noise + (1.0 - sigma) * sample
return sample
def _sigma_to_t(self, sigma):
return sigma * self.config.num_train_timesteps
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
"""
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
Args:
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
"""
self.num_inference_steps = num_inference_steps
timesteps = np.linspace(
self._sigma_to_t(self.sigma_max), self._sigma_to_t(self.sigma_min), num_inference_steps
)
sigmas = timesteps / self.config.num_train_timesteps
sigmas = self.config.shift * sigmas / (1 + (self.config.shift - 1) * sigmas)
sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device)
timesteps = sigmas * self.config.num_train_timesteps
timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)])
self.timesteps = timesteps.to(device=device)
sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)])
self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]])
# empty dt and derivative
self.prev_derivative = None
self.dt = None
self._step_index = None
self._begin_index = None
def index_for_timestep(self, timestep, schedule_timesteps=None):
if schedule_timesteps is None:
schedule_timesteps = self.timesteps
indices = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
pos = 1 if len(indices) > 1 else 0
return indices[pos].item()
def _init_step_index(self, timestep):
if self.begin_index is None:
if isinstance(timestep, torch.Tensor):
timestep = timestep.to(self.timesteps.device)
self._step_index = self.index_for_timestep(timestep)
else:
self._step_index = self._begin_index
@property
def state_in_first_order(self):
return self.dt is None
def step(
self,
model_output: torch.FloatTensor,
timestep: Union[float, torch.FloatTensor],
sample: torch.FloatTensor,
s_churn: float = 0.0,
s_tmin: float = 0.0,
s_tmax: float = float("inf"),
s_noise: float = 1.0,
generator: Optional[torch.Generator] = None,
return_dict: bool = True,
) -> Union[FlowMatchHeunDiscreteSchedulerOutput, Tuple]:
"""
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.FloatTensor`):
The direct output from learned diffusion model.
timestep (`float`):
The current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
A current instance of a sample created by the diffusion process.
s_churn (`float`):
s_tmin (`float`):
s_tmax (`float`):
s_noise (`float`, defaults to 1.0):
Scaling factor for noise added to the sample.
generator (`torch.Generator`, *optional*):
A random number generator.
return_dict (`bool`):
Whether or not to return a [`~schedulers.scheduling_Heun_discrete.HeunDiscreteSchedulerOutput`] or
tuple.
Returns:
[`~schedulers.scheduling_Heun_discrete.HeunDiscreteSchedulerOutput`] or `tuple`:
If return_dict is `True`, [`~schedulers.scheduling_Heun_discrete.HeunDiscreteSchedulerOutput`] is
returned, otherwise a tuple is returned where the first element is the sample tensor.
"""
if (
isinstance(timestep, int)
or isinstance(timestep, torch.IntTensor)
or isinstance(timestep, torch.LongTensor)
):
raise ValueError(
(
"Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
" `HeunDiscreteScheduler.step()` is not supported. Make sure to pass"
" one of the `scheduler.timesteps` as a timestep."
),
)
if self.step_index is None:
self._init_step_index(timestep)
# Upcast to avoid precision issues when computing prev_sample
sample = sample.to(torch.float32)
if self.state_in_first_order:
sigma = self.sigmas[self.step_index]
sigma_next = self.sigmas[self.step_index + 1]
else:
# 2nd order / Heun's method
sigma = self.sigmas[self.step_index - 1]
sigma_next = self.sigmas[self.step_index]
gamma = min(s_churn / (len(self.sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0
noise = randn_tensor(
model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator
)
eps = noise * s_noise
sigma_hat = sigma * (gamma + 1)
if gamma > 0:
sample = sample + eps * (sigma_hat**2 - sigma**2) ** 0.5
if self.state_in_first_order:
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
denoised = sample - model_output * sigma
# 2. convert to an ODE derivative for 1st order
derivative = (sample - denoised) / sigma_hat
# 3. Delta timestep
dt = sigma_next - sigma_hat
# store for 2nd order step
self.prev_derivative = derivative
self.dt = dt
self.sample = sample
else:
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
denoised = sample - model_output * sigma_next
# 2. 2nd order / Heun's method
derivative = (sample - denoised) / sigma_next
derivative = 0.5 * (self.prev_derivative + derivative)
# 3. take prev timestep & sample
dt = self.dt
sample = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
self.prev_derivative = None
self.dt = None
self.sample = None
prev_sample = sample + derivative * dt
# Cast sample back to model compatible dtype
prev_sample = prev_sample.to(model_output.dtype)
# upon completion increase step index by one
self._step_index += 1
if not return_dict:
return (prev_sample,)
return FlowMatchHeunDiscreteSchedulerOutput(prev_sample=prev_sample)
def __len__(self):
return self.config.num_train_timesteps
|
diffusers/src/diffusers/schedulers/scheduling_flow_match_heun_discrete.py/0
|
{
"file_path": "diffusers/src/diffusers/schedulers/scheduling_flow_match_heun_discrete.py",
"repo_id": "diffusers",
"token_count": 5232
}
| 151
|
# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from ..utils.torch_utils import randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UnCLIPSchedulerOutput(BaseOutput):
"""
Output class for the scheduler's `step` function output.
Args:
prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
"""
prev_sample: torch.Tensor
pred_original_sample: Optional[torch.Tensor] = None
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(
num_diffusion_timesteps,
max_beta=0.999,
alpha_transform_type="cosine",
):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
Choose from `cosine` or `exp`
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(t):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(t):
return math.exp(t * -12.0)
else:
raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}")
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
class UnCLIPScheduler(SchedulerMixin, ConfigMixin):
"""
NOTE: do not use this scheduler. The DDPM scheduler has been updated to support the changes made here. This
scheduler will be removed and replaced with DDPM.
This is a modified DDPM Scheduler specifically for the karlo unCLIP model.
This scheduler has some minor variations in how it calculates the learned range variance and dynamically
re-calculates betas based off the timesteps it is skipping.
The scheduler also uses a slightly different step ratio when computing timesteps to use for inference.
See [`~DDPMScheduler`] for more information on DDPM scheduling
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
variance_type (`str`):
options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small_log`
or `learned_range`.
clip_sample (`bool`, default `True`):
option to clip predicted sample between `-clip_sample_range` and `clip_sample_range` for numerical
stability.
clip_sample_range (`float`, default `1.0`):
The range to clip the sample between. See `clip_sample`.
prediction_type (`str`, default `epsilon`, optional):
prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process)
or `sample` (directly predicting the noisy sample`)
"""
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
variance_type: str = "fixed_small_log",
clip_sample: bool = True,
clip_sample_range: Optional[float] = 1.0,
prediction_type: str = "epsilon",
beta_schedule: str = "squaredcos_cap_v2",
):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'")
self.betas = betas_for_alpha_bar(num_train_timesteps)
self.alphas = 1.0 - self.betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.one = torch.tensor(1.0)
# standard deviation of the initial noise distribution
self.init_noise_sigma = 1.0
# setable values
self.num_inference_steps = None
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy())
self.variance_type = variance_type
def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
sample (`torch.Tensor`): input sample
timestep (`int`, optional): current timestep
Returns:
`torch.Tensor`: scaled input sample
"""
return sample
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
"""
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
Note that this scheduler uses a slightly different step ratio than the other diffusers schedulers. The
different step ratio is to mimic the original karlo implementation and does not affect the quality or accuracy
of the results.
Args:
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
"""
self.num_inference_steps = num_inference_steps
step_ratio = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
self.timesteps = torch.from_numpy(timesteps).to(device)
def _get_variance(self, t, prev_timestep=None, predicted_variance=None, variance_type=None):
if prev_timestep is None:
prev_timestep = t - 1
alpha_prod_t = self.alphas_cumprod[t]
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
beta = self.betas[t]
else:
beta = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
variance = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
variance_type = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
variance = torch.log(torch.clamp(variance, min=1e-20))
variance = torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
min_log = variance.log()
max_log = beta.log()
frac = (predicted_variance + 1) / 2
variance = frac * max_log + (1 - frac) * min_log
return variance
def step(
self,
model_output: torch.Tensor,
timestep: int,
sample: torch.Tensor,
prev_timestep: Optional[int] = None,
generator=None,
return_dict: bool = True,
) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.Tensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.Tensor`):
current instance of sample being created by diffusion process.
prev_timestep (`int`, *optional*): The previous timestep to predict the previous sample at.
Used to dynamically compute beta. If not given, `t-1` is used and the pre-computed beta is used.
generator: random number generator.
return_dict (`bool`): option for returning tuple rather than UnCLIPSchedulerOutput class
Returns:
[`~schedulers.scheduling_utils.UnCLIPSchedulerOutput`] or `tuple`:
[`~schedulers.scheduling_utils.UnCLIPSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
t = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
else:
predicted_variance = None
# 1. compute alphas, betas
if prev_timestep is None:
prev_timestep = t - 1
alpha_prod_t = self.alphas_cumprod[t]
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
beta = self.betas[t]
alpha = self.alphas[t]
else:
beta = 1 - alpha_prod_t / alpha_prod_t_prev
alpha = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
elif self.config.prediction_type == "sample":
pred_original_sample = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
" for the UnCLIPScheduler."
)
# 3. Clip "predicted x_0"
if self.config.clip_sample:
pred_original_sample = torch.clamp(
pred_original_sample, -self.config.clip_sample_range, self.config.clip_sample_range
)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * beta) / beta_prod_t
current_sample_coeff = alpha ** (0.5) * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
variance = 0
if t > 0:
variance_noise = randn_tensor(
model_output.shape, dtype=model_output.dtype, generator=generator, device=model_output.device
)
variance = self._get_variance(
t,
predicted_variance=predicted_variance,
prev_timestep=prev_timestep,
)
if self.variance_type == "fixed_small_log":
variance = variance
elif self.variance_type == "learned_range":
variance = (0.5 * variance).exp()
else:
raise ValueError(
f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
" for the UnCLIPScheduler."
)
variance = variance * variance_noise
pred_prev_sample = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
def add_noise(
self,
original_samples: torch.Tensor,
noise: torch.Tensor,
timesteps: torch.IntTensor,
) -> torch.Tensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
# Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement
# for the subsequent add_noise calls
self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device)
alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype)
timesteps = timesteps.to(original_samples.device)
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
|
diffusers/src/diffusers/schedulers/scheduling_unclip.py/0
|
{
"file_path": "diffusers/src/diffusers/schedulers/scheduling_unclip.py",
"repo_id": "diffusers",
"token_count": 6237
}
| 152
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class AudioDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "librosa"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "librosa"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "librosa"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "librosa"])
class Mel(metaclass=DummyObject):
_backends = ["torch", "librosa"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "librosa"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "librosa"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "librosa"])
|
diffusers/src/diffusers/utils/dummy_torch_and_librosa_objects.py/0
|
{
"file_path": "diffusers/src/diffusers/utils/dummy_torch_and_librosa_objects.py",
"repo_id": "diffusers",
"token_count": 397
}
| 153
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PEFT utilities: Utilities related to peft library
"""
import collections
import importlib
from typing import Optional
from packaging import version
from .import_utils import is_peft_available, is_torch_available
if is_torch_available():
import torch
def recurse_remove_peft_layers(model):
r"""
Recursively replace all instances of `LoraLayer` with corresponding new layers in `model`.
"""
from peft.tuners.tuners_utils import BaseTunerLayer
has_base_layer_pattern = False
for module in model.modules():
if isinstance(module, BaseTunerLayer):
has_base_layer_pattern = hasattr(module, "base_layer")
break
if has_base_layer_pattern:
from peft.utils import _get_submodules
key_list = [key for key, _ in model.named_modules() if "lora" not in key]
for key in key_list:
try:
parent, target, target_name = _get_submodules(model, key)
except AttributeError:
continue
if hasattr(target, "base_layer"):
setattr(parent, target_name, target.get_base_layer())
else:
# This is for backwards compatibility with PEFT <= 0.6.2.
# TODO can be removed once that PEFT version is no longer supported.
from peft.tuners.lora import LoraLayer
for name, module in model.named_children():
if len(list(module.children())) > 0:
## compound module, go inside it
recurse_remove_peft_layers(module)
module_replaced = False
if isinstance(module, LoraLayer) and isinstance(module, torch.nn.Linear):
new_module = torch.nn.Linear(
module.in_features,
module.out_features,
bias=module.bias is not None,
).to(module.weight.device)
new_module.weight = module.weight
if module.bias is not None:
new_module.bias = module.bias
module_replaced = True
elif isinstance(module, LoraLayer) and isinstance(module, torch.nn.Conv2d):
new_module = torch.nn.Conv2d(
module.in_channels,
module.out_channels,
module.kernel_size,
module.stride,
module.padding,
module.dilation,
module.groups,
).to(module.weight.device)
new_module.weight = module.weight
if module.bias is not None:
new_module.bias = module.bias
module_replaced = True
if module_replaced:
setattr(model, name, new_module)
del module
if torch.cuda.is_available():
torch.cuda.empty_cache()
return model
def scale_lora_layers(model, weight):
"""
Adjust the weightage given to the LoRA layers of the model.
Args:
model (`torch.nn.Module`):
The model to scale.
weight (`float`):
The weight to be given to the LoRA layers.
"""
from peft.tuners.tuners_utils import BaseTunerLayer
if weight == 1.0:
return
for module in model.modules():
if isinstance(module, BaseTunerLayer):
module.scale_layer(weight)
def unscale_lora_layers(model, weight: Optional[float] = None):
"""
Removes the previously passed weight given to the LoRA layers of the model.
Args:
model (`torch.nn.Module`):
The model to scale.
weight (`float`, *optional*):
The weight to be given to the LoRA layers. If no scale is passed the scale of the lora layer will be
re-initialized to the correct value. If 0.0 is passed, we will re-initialize the scale with the correct
value.
"""
from peft.tuners.tuners_utils import BaseTunerLayer
if weight == 1.0:
return
for module in model.modules():
if isinstance(module, BaseTunerLayer):
if weight is not None and weight != 0:
module.unscale_layer(weight)
elif weight is not None and weight == 0:
for adapter_name in module.active_adapters:
# if weight == 0 unscale should re-set the scale to the original value.
module.set_scale(adapter_name, 1.0)
def get_peft_kwargs(rank_dict, network_alpha_dict, peft_state_dict, is_unet=True):
rank_pattern = {}
alpha_pattern = {}
r = lora_alpha = list(rank_dict.values())[0]
if len(set(rank_dict.values())) > 1:
# get the rank occuring the most number of times
r = collections.Counter(rank_dict.values()).most_common()[0][0]
# for modules with rank different from the most occuring rank, add it to the `rank_pattern`
rank_pattern = dict(filter(lambda x: x[1] != r, rank_dict.items()))
rank_pattern = {k.split(".lora_B.")[0]: v for k, v in rank_pattern.items()}
if network_alpha_dict is not None and len(network_alpha_dict) > 0:
if len(set(network_alpha_dict.values())) > 1:
# get the alpha occuring the most number of times
lora_alpha = collections.Counter(network_alpha_dict.values()).most_common()[0][0]
# for modules with alpha different from the most occuring alpha, add it to the `alpha_pattern`
alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, network_alpha_dict.items()))
if is_unet:
alpha_pattern = {
".".join(k.split(".lora_A.")[0].split(".")).replace(".alpha", ""): v
for k, v in alpha_pattern.items()
}
else:
alpha_pattern = {".".join(k.split(".down.")[0].split(".")[:-1]): v for k, v in alpha_pattern.items()}
else:
lora_alpha = set(network_alpha_dict.values()).pop()
# layer names without the Diffusers specific
target_modules = list({name.split(".lora")[0] for name in peft_state_dict.keys()})
use_dora = any("lora_magnitude_vector" in k for k in peft_state_dict)
lora_config_kwargs = {
"r": r,
"lora_alpha": lora_alpha,
"rank_pattern": rank_pattern,
"alpha_pattern": alpha_pattern,
"target_modules": target_modules,
"use_dora": use_dora,
}
return lora_config_kwargs
def get_adapter_name(model):
from peft.tuners.tuners_utils import BaseTunerLayer
for module in model.modules():
if isinstance(module, BaseTunerLayer):
return f"default_{len(module.r)}"
return "default_0"
def set_adapter_layers(model, enabled=True):
from peft.tuners.tuners_utils import BaseTunerLayer
for module in model.modules():
if isinstance(module, BaseTunerLayer):
# The recent version of PEFT needs to call `enable_adapters` instead
if hasattr(module, "enable_adapters"):
module.enable_adapters(enabled=enabled)
else:
module.disable_adapters = not enabled
def delete_adapter_layers(model, adapter_name):
from peft.tuners.tuners_utils import BaseTunerLayer
for module in model.modules():
if isinstance(module, BaseTunerLayer):
if hasattr(module, "delete_adapter"):
module.delete_adapter(adapter_name)
else:
raise ValueError(
"The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1"
)
# For transformers integration - we need to pop the adapter from the config
if getattr(model, "_hf_peft_config_loaded", False) and hasattr(model, "peft_config"):
model.peft_config.pop(adapter_name, None)
# In case all adapters are deleted, we need to delete the config
# and make sure to set the flag to False
if len(model.peft_config) == 0:
del model.peft_config
model._hf_peft_config_loaded = None
def set_weights_and_activate_adapters(model, adapter_names, weights):
from peft.tuners.tuners_utils import BaseTunerLayer
def get_module_weight(weight_for_adapter, module_name):
if not isinstance(weight_for_adapter, dict):
# If weight_for_adapter is a single number, always return it.
return weight_for_adapter
for layer_name, weight_ in weight_for_adapter.items():
if layer_name in module_name:
return weight_
parts = module_name.split(".")
# e.g. key = "down_blocks.1.attentions.0"
key = f"{parts[0]}.{parts[1]}.attentions.{parts[3]}"
block_weight = weight_for_adapter.get(key, 1.0)
return block_weight
# iterate over each adapter, make it active and set the corresponding scaling weight
for adapter_name, weight in zip(adapter_names, weights):
for module_name, module in model.named_modules():
if isinstance(module, BaseTunerLayer):
# For backward compatbility with previous PEFT versions
if hasattr(module, "set_adapter"):
module.set_adapter(adapter_name)
else:
module.active_adapter = adapter_name
module.set_scale(adapter_name, get_module_weight(weight, module_name))
# set multiple active adapters
for module in model.modules():
if isinstance(module, BaseTunerLayer):
# For backward compatbility with previous PEFT versions
if hasattr(module, "set_adapter"):
module.set_adapter(adapter_names)
else:
module.active_adapter = adapter_names
def check_peft_version(min_version: str) -> None:
r"""
Checks if the version of PEFT is compatible.
Args:
version (`str`):
The version of PEFT to check against.
"""
if not is_peft_available():
raise ValueError("PEFT is not installed. Please install it with `pip install peft`")
is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) > version.parse(min_version)
if not is_peft_version_compatible:
raise ValueError(
f"The version of PEFT you are using is not compatible, please use a version that is greater"
f" than {min_version}"
)
|
diffusers/src/diffusers/utils/peft_utils.py/0
|
{
"file_path": "diffusers/src/diffusers/utils/peft_utils.py",
"repo_id": "diffusers",
"token_count": 4779
}
| 154
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import tempfile
import unittest
from itertools import product
import numpy as np
import torch
from diffusers import (
AutoencoderKL,
DDIMScheduler,
FlowMatchEulerDiscreteScheduler,
LCMScheduler,
UNet2DConditionModel,
)
from diffusers.utils.import_utils import is_peft_available
from diffusers.utils.testing_utils import (
floats_tensor,
require_peft_backend,
require_peft_version_greater,
skip_mps,
torch_device,
)
if is_peft_available():
from peft import LoraConfig
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import get_peft_model_state_dict
def state_dicts_almost_equal(sd1, sd2):
sd1 = dict(sorted(sd1.items()))
sd2 = dict(sorted(sd2.items()))
models_are_equal = True
for ten1, ten2 in zip(sd1.values(), sd2.values()):
if (ten1 - ten2).abs().max() > 1e-3:
models_are_equal = False
return models_are_equal
def check_if_lora_correctly_set(model) -> bool:
"""
Checks if the LoRA layers are correctly set with peft
"""
for module in model.modules():
if isinstance(module, BaseTunerLayer):
return True
return False
@require_peft_backend
class PeftLoraLoaderMixinTests:
pipeline_class = None
scheduler_cls = None
scheduler_kwargs = None
uses_flow_matching = False
has_two_text_encoders = False
has_three_text_encoders = False
text_encoder_cls, text_encoder_id = None, None
text_encoder_2_cls, text_encoder_2_id = None, None
text_encoder_3_cls, text_encoder_3_id = None, None
tokenizer_cls, tokenizer_id = None, None
tokenizer_2_cls, tokenizer_2_id = None, None
tokenizer_3_cls, tokenizer_3_id = None, None
unet_kwargs = None
transformer_cls = None
transformer_kwargs = None
vae_kwargs = None
def get_dummy_components(self, scheduler_cls=None, use_dora=False):
if self.unet_kwargs and self.transformer_kwargs:
raise ValueError("Both `unet_kwargs` and `transformer_kwargs` cannot be specified.")
if self.has_two_text_encoders and self.has_three_text_encoders:
raise ValueError("Both `has_two_text_encoders` and `has_three_text_encoders` cannot be True.")
scheduler_cls = self.scheduler_cls if scheduler_cls is None else scheduler_cls
rank = 4
torch.manual_seed(0)
if self.unet_kwargs is not None:
unet = UNet2DConditionModel(**self.unet_kwargs)
else:
transformer = self.transformer_cls(**self.transformer_kwargs)
scheduler = scheduler_cls(**self.scheduler_kwargs)
torch.manual_seed(0)
vae = AutoencoderKL(**self.vae_kwargs)
text_encoder = self.text_encoder_cls.from_pretrained(self.text_encoder_id)
tokenizer = self.tokenizer_cls.from_pretrained(self.tokenizer_id)
if self.text_encoder_2_cls is not None:
text_encoder_2 = self.text_encoder_2_cls.from_pretrained(self.text_encoder_2_id)
tokenizer_2 = self.tokenizer_2_cls.from_pretrained(self.tokenizer_2_id)
if self.text_encoder_3_cls is not None:
text_encoder_3 = self.text_encoder_3_cls.from_pretrained(self.text_encoder_3_id)
tokenizer_3 = self.tokenizer_3_cls.from_pretrained(self.tokenizer_3_id)
text_lora_config = LoraConfig(
r=rank,
lora_alpha=rank,
target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
init_lora_weights=False,
use_dora=use_dora,
)
denoiser_lora_config = LoraConfig(
r=rank,
lora_alpha=rank,
target_modules=["to_q", "to_k", "to_v", "to_out.0"],
init_lora_weights=False,
use_dora=use_dora,
)
pipeline_components = {
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
# Denoiser
if self.unet_kwargs is not None:
pipeline_components.update({"unet": unet})
elif self.transformer_kwargs is not None:
pipeline_components.update({"transformer": transformer})
# Remaining text encoders.
if self.text_encoder_2_cls is not None:
pipeline_components.update({"tokenizer_2": tokenizer_2, "text_encoder_2": text_encoder_2})
if self.text_encoder_3_cls is not None:
pipeline_components.update({"tokenizer_3": tokenizer_3, "text_encoder_3": text_encoder_3})
# Remaining stuff
init_params = inspect.signature(self.pipeline_class.__init__).parameters
if "safety_checker" in init_params:
pipeline_components.update({"safety_checker": None})
if "feature_extractor" in init_params:
pipeline_components.update({"feature_extractor": None})
if "image_encoder" in init_params:
pipeline_components.update({"image_encoder": None})
return pipeline_components, text_lora_config, denoiser_lora_config
@property
def output_shape(self):
raise NotImplementedError
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 10
num_channels = 4
sizes = (32, 32)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "A painting of a squirrel eating a burger",
"num_inference_steps": 5,
"guidance_scale": 6.0,
"output_type": "np",
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
# Copied from: https://colab.research.google.com/gist/sayakpaul/df2ef6e1ae6d8c10a49d859883b10860/scratchpad.ipynb
def get_dummy_tokens(self):
max_seq_length = 77
inputs = torch.randint(2, 56, size=(1, max_seq_length), generator=torch.manual_seed(0))
prepared_inputs = {}
prepared_inputs["input_ids"] = inputs
return prepared_inputs
def test_simple_inference(self):
"""
Tests a simple inference and makes sure it works as expected
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, _ = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs()
output_no_lora = pipe(**inputs).images
self.assertTrue(output_no_lora.shape == self.output_shape)
def test_simple_inference_with_text_lora(self):
"""
Tests a simple inference with lora attached on the text encoder
and makes sure it works as expected
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, _ = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
if self.has_two_text_encoders or self.has_three_text_encoders:
lora_loadable_components = self.pipeline_class._lora_loadable_modules
if "text_encoder_2" in lora_loadable_components:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
output_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output"
)
def test_simple_inference_with_text_lora_and_scale(self):
"""
Tests a simple inference with lora attached on the text encoder + scale argument
and makes sure it works as expected
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, _ = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
if self.has_two_text_encoders or self.has_three_text_encoders:
lora_loadable_components = self.pipeline_class._lora_loadable_modules
if "text_encoder_2" in lora_loadable_components:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
output_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output"
)
if self.unet_kwargs is not None:
output_lora_scale = pipe(
**inputs, generator=torch.manual_seed(0), cross_attention_kwargs={"scale": 0.5}
).images
else:
output_lora_scale = pipe(
**inputs, generator=torch.manual_seed(0), joint_attention_kwargs={"scale": 0.5}
).images
self.assertTrue(
not np.allclose(output_lora, output_lora_scale, atol=1e-3, rtol=1e-3),
"Lora + scale should change the output",
)
if self.unet_kwargs is not None:
output_lora_0_scale = pipe(
**inputs, generator=torch.manual_seed(0), cross_attention_kwargs={"scale": 0.0}
).images
else:
output_lora_0_scale = pipe(
**inputs, generator=torch.manual_seed(0), joint_attention_kwargs={"scale": 0.0}
).images
self.assertTrue(
np.allclose(output_no_lora, output_lora_0_scale, atol=1e-3, rtol=1e-3),
"Lora + 0 scale should lead to same result as no LoRA",
)
def test_simple_inference_with_text_lora_fused(self):
"""
Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model
and makes sure it works as expected
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, _ = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
pipe.fuse_lora()
# Fusing should still keep the LoRA layers
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
ouput_fused = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertFalse(
np.allclose(ouput_fused, output_no_lora, atol=1e-3, rtol=1e-3), "Fused lora should change the output"
)
def test_simple_inference_with_text_lora_unloaded(self):
"""
Tests a simple inference with lora attached to text encoder, then unloads the lora weights
and makes sure it works as expected
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, _ = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
if self.has_two_text_encoders or self.has_three_text_encoders:
lora_loadable_components = self.pipeline_class._lora_loadable_modules
if "text_encoder_2" in lora_loadable_components:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
pipe.unload_lora_weights()
# unloading should remove the LoRA layers
self.assertFalse(
check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly unloaded in text encoder"
)
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
self.assertFalse(
check_if_lora_correctly_set(pipe.text_encoder_2),
"Lora not correctly unloaded in text encoder 2",
)
ouput_unloaded = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(ouput_unloaded, output_no_lora, atol=1e-3, rtol=1e-3),
"Fused lora should change the output",
)
def test_simple_inference_with_text_lora_save_load(self):
"""
Tests a simple usecase where users could use saving utilities for LoRA.
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, _ = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
with tempfile.TemporaryDirectory() as tmpdirname:
text_encoder_state_dict = get_peft_model_state_dict(pipe.text_encoder)
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
text_encoder_2_state_dict = get_peft_model_state_dict(pipe.text_encoder_2)
self.pipeline_class.save_lora_weights(
save_directory=tmpdirname,
text_encoder_lora_layers=text_encoder_state_dict,
text_encoder_2_lora_layers=text_encoder_2_state_dict,
safe_serialization=False,
)
else:
self.pipeline_class.save_lora_weights(
save_directory=tmpdirname,
text_encoder_lora_layers=text_encoder_state_dict,
safe_serialization=False,
)
if self.has_two_text_encoders:
if "text_encoder_2" not in self.pipeline_class._lora_loadable_modules:
self.pipeline_class.save_lora_weights(
save_directory=tmpdirname,
text_encoder_lora_layers=text_encoder_state_dict,
safe_serialization=False,
)
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
pipe.unload_lora_weights()
pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))
images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
self.assertTrue(
np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3),
"Loading from saved checkpoints should give same results.",
)
def test_simple_inference_with_partial_text_lora(self):
"""
Tests a simple inference with lora attached on the text encoder
with different ranks and some adapters removed
and makes sure it works as expected
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, _, _ = self.get_dummy_components(scheduler_cls)
# Verify `StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder` handles different ranks per module (PR#8324).
text_lora_config = LoraConfig(
r=4,
rank_pattern={"q_proj": 1, "k_proj": 2, "v_proj": 3},
lora_alpha=4,
target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
init_lora_weights=False,
use_dora=False,
)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
# Gather the state dict for the PEFT model, excluding `layers.4`, to ensure `load_lora_into_text_encoder`
# supports missing layers (PR#8324).
state_dict = {
f"text_encoder.{module_name}": param
for module_name, param in get_peft_model_state_dict(pipe.text_encoder).items()
if "text_model.encoder.layers.4" not in module_name
}
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
state_dict.update(
{
f"text_encoder_2.{module_name}": param
for module_name, param in get_peft_model_state_dict(pipe.text_encoder_2).items()
if "text_model.encoder.layers.4" not in module_name
}
)
output_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output"
)
# Unload lora and load it back using the pipe.load_lora_weights machinery
pipe.unload_lora_weights()
pipe.load_lora_weights(state_dict)
output_partial_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
not np.allclose(output_partial_lora, output_lora, atol=1e-3, rtol=1e-3),
"Removing adapters should change the output",
)
def test_simple_inference_save_pretrained(self):
"""
Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, _ = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname)
pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname)
pipe_from_pretrained.to(torch_device)
self.assertTrue(
check_if_lora_correctly_set(pipe_from_pretrained.text_encoder),
"Lora not correctly set in text encoder",
)
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
self.assertTrue(
check_if_lora_correctly_set(pipe_from_pretrained.text_encoder_2),
"Lora not correctly set in text encoder 2",
)
images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3),
"Loading from saved checkpoints should give same results.",
)
def test_simple_inference_with_text_denoiser_lora_save_load(self):
"""
Tests a simple usecase where users could use saving utilities for LoRA for Unet + text encoder
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config)
else:
pipe.transformer.add_adapter(denoiser_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in Unet")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
with tempfile.TemporaryDirectory() as tmpdirname:
text_encoder_state_dict = get_peft_model_state_dict(pipe.text_encoder)
if self.unet_kwargs is not None:
denoiser_state_dict = get_peft_model_state_dict(pipe.unet)
else:
denoiser_state_dict = get_peft_model_state_dict(pipe.transformer)
saving_kwargs = {
"save_directory": tmpdirname,
"text_encoder_lora_layers": text_encoder_state_dict,
"safe_serialization": False,
}
if self.unet_kwargs is not None:
saving_kwargs.update({"unet_lora_layers": denoiser_state_dict})
else:
saving_kwargs.update({"transformer_lora_layers": denoiser_state_dict})
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
text_encoder_2_state_dict = get_peft_model_state_dict(pipe.text_encoder_2)
saving_kwargs.update({"text_encoder_2_lora_layers": text_encoder_2_state_dict})
self.pipeline_class.save_lora_weights(**saving_kwargs)
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
pipe.unload_lora_weights()
pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))
images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
self.assertTrue(
np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3),
"Loading from saved checkpoints should give same results.",
)
def test_simple_inference_with_text_denoiser_lora_and_scale(self):
"""
Tests a simple inference with lora attached on the text encoder + Unet + scale argument
and makes sure it works as expected
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config)
else:
pipe.transformer.add_adapter(denoiser_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
output_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output"
)
if self.unet_kwargs is not None:
output_lora_scale = pipe(
**inputs, generator=torch.manual_seed(0), cross_attention_kwargs={"scale": 0.5}
).images
else:
output_lora_scale = pipe(
**inputs, generator=torch.manual_seed(0), joint_attention_kwargs={"scale": 0.5}
).images
self.assertTrue(
not np.allclose(output_lora, output_lora_scale, atol=1e-3, rtol=1e-3),
"Lora + scale should change the output",
)
if self.unet_kwargs is not None:
output_lora_0_scale = pipe(
**inputs, generator=torch.manual_seed(0), cross_attention_kwargs={"scale": 0.0}
).images
else:
output_lora_0_scale = pipe(
**inputs, generator=torch.manual_seed(0), joint_attention_kwargs={"scale": 0.0}
).images
self.assertTrue(
np.allclose(output_no_lora, output_lora_0_scale, atol=1e-3, rtol=1e-3),
"Lora + 0 scale should lead to same result as no LoRA",
)
self.assertTrue(
pipe.text_encoder.text_model.encoder.layers[0].self_attn.q_proj.scaling["default"] == 1.0,
"The scaling parameter has not been correctly restored!",
)
def test_simple_inference_with_text_lora_denoiser_fused(self):
"""
Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model
and makes sure it works as expected - with unet
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config)
else:
pipe.transformer.add_adapter(denoiser_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
pipe.fuse_lora()
# Fusing should still keep the LoRA layers
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
ouput_fused = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertFalse(
np.allclose(ouput_fused, output_no_lora, atol=1e-3, rtol=1e-3), "Fused lora should change the output"
)
def test_simple_inference_with_text_denoiser_lora_unloaded(self):
"""
Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights
and makes sure it works as expected
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config)
else:
pipe.transformer.add_adapter(denoiser_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
pipe.unload_lora_weights()
# unloading should remove the LoRA layers
self.assertFalse(
check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly unloaded in text encoder"
)
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertFalse(
check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly unloaded in denoiser"
)
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
self.assertFalse(
check_if_lora_correctly_set(pipe.text_encoder_2),
"Lora not correctly unloaded in text encoder 2",
)
ouput_unloaded = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(ouput_unloaded, output_no_lora, atol=1e-3, rtol=1e-3),
"Fused lora should change the output",
)
def test_simple_inference_with_text_denoiser_lora_unfused(self):
"""
Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights
and makes sure it works as expected
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
pipe.text_encoder.add_adapter(text_lora_config)
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config)
else:
pipe.transformer.add_adapter(denoiser_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
pipe.fuse_lora()
output_fused_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.unfuse_lora()
output_unfused_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
# unloading should remove the LoRA layers
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Unfuse should still keep LoRA layers")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Unfuse should still keep LoRA layers")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Unfuse should still keep LoRA layers"
)
# Fuse and unfuse should lead to the same results
self.assertTrue(
np.allclose(output_fused_lora, output_unfused_lora, atol=1e-3, rtol=1e-3),
"Fused lora should change the output",
)
def test_simple_inference_with_text_denoiser_multi_adapter(self):
"""
Tests a simple inference with lora attached to text encoder and unet, attaches
multiple adapters and set them
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-2")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2")
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2")
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
pipe.set_adapters("adapter-1")
output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.set_adapters("adapter-2")
output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.set_adapters(["adapter-1", "adapter-2"])
output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0)).images
# Fuse and unfuse should lead to the same results
self.assertFalse(
np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
"Adapter 1 and 2 should give different results",
)
self.assertFalse(
np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
"Adapter 1 and mixed adapters should give different results",
)
self.assertFalse(
np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
"Adapter 2 and mixed adapters should give different results",
)
pipe.disable_lora()
output_disabled = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3),
"output with no lora and output with lora disabled should give same results",
)
def test_simple_inference_with_text_denoiser_block_scale(self):
"""
Tests a simple inference with lora attached to text encoder and unet, attaches
one adapter and set differnt weights for different blocks (i.e. block lora)
"""
if self.pipeline_class.__name__ == "StableDiffusion3Pipeline":
return
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
weights_1 = {"text_encoder": 2, "unet": {"down": 5}}
pipe.set_adapters("adapter-1", weights_1)
output_weights_1 = pipe(**inputs, generator=torch.manual_seed(0)).images
weights_2 = {"unet": {"up": 5}}
pipe.set_adapters("adapter-1", weights_2)
output_weights_2 = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertFalse(
np.allclose(output_weights_1, output_weights_2, atol=1e-3, rtol=1e-3),
"LoRA weights 1 and 2 should give different results",
)
self.assertFalse(
np.allclose(output_no_lora, output_weights_1, atol=1e-3, rtol=1e-3),
"No adapter and LoRA weights 1 should give different results",
)
self.assertFalse(
np.allclose(output_no_lora, output_weights_2, atol=1e-3, rtol=1e-3),
"No adapter and LoRA weights 2 should give different results",
)
pipe.disable_lora()
output_disabled = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3),
"output with no lora and output with lora disabled should give same results",
)
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
"""
Tests a simple inference with lora attached to text encoder and unet, attaches
multiple adapters and set differnt weights for different blocks (i.e. block lora)
"""
if self.pipeline_class.__name__ == "StableDiffusion3Pipeline":
return
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-2")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2")
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2")
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
scales_1 = {"text_encoder": 2, "unet": {"down": 5}}
scales_2 = {"unet": {"down": 5, "mid": 5}}
pipe.set_adapters("adapter-1", scales_1)
output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.set_adapters("adapter-2", scales_2)
output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.set_adapters(["adapter-1", "adapter-2"], [scales_1, scales_2])
output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0)).images
# Fuse and unfuse should lead to the same results
self.assertFalse(
np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
"Adapter 1 and 2 should give different results",
)
self.assertFalse(
np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
"Adapter 1 and mixed adapters should give different results",
)
self.assertFalse(
np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
"Adapter 2 and mixed adapters should give different results",
)
pipe.disable_lora()
output_disabled = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3),
"output with no lora and output with lora disabled should give same results",
)
# a mismatching number of adapter_names and adapter_weights should raise an error
with self.assertRaises(ValueError):
pipe.set_adapters(["adapter-1", "adapter-2"], [scales_1])
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
"""Tests that any valid combination of lora block scales can be used in pipe.set_adapter"""
if self.pipeline_class.__name__ in ["StableDiffusion3Pipeline", "FluxPipeline"]:
return
def updown_options(blocks_with_tf, layers_per_block, value):
"""
Generate every possible combination for how a lora weight dict for the up/down part can be.
E.g. 2, {"block_1": 2}, {"block_1": [2,2,2]}, {"block_1": 2, "block_2": [2,2,2]}, ...
"""
num_val = value
list_val = [value] * layers_per_block
node_opts = [None, num_val, list_val]
node_opts_foreach_block = [node_opts] * len(blocks_with_tf)
updown_opts = [num_val]
for nodes in product(*node_opts_foreach_block):
if all(n is None for n in nodes):
continue
opt = {}
for b, n in zip(blocks_with_tf, nodes):
if n is not None:
opt["block_" + str(b)] = n
updown_opts.append(opt)
return updown_opts
def all_possible_dict_opts(unet, value):
"""
Generate every possible combination for how a lora weight dict can be.
E.g. 2, {"unet: {"down": 2}}, {"unet: {"down": [2,2,2]}}, {"unet: {"mid": 2, "up": [2,2,2]}}, ...
"""
down_blocks_with_tf = [i for i, d in enumerate(unet.down_blocks) if hasattr(d, "attentions")]
up_blocks_with_tf = [i for i, u in enumerate(unet.up_blocks) if hasattr(u, "attentions")]
layers_per_block = unet.config.layers_per_block
text_encoder_opts = [None, value]
text_encoder_2_opts = [None, value]
mid_opts = [None, value]
down_opts = [None] + updown_options(down_blocks_with_tf, layers_per_block, value)
up_opts = [None] + updown_options(up_blocks_with_tf, layers_per_block + 1, value)
opts = []
for t1, t2, d, m, u in product(text_encoder_opts, text_encoder_2_opts, down_opts, mid_opts, up_opts):
if all(o is None for o in (t1, t2, d, m, u)):
continue
opt = {}
if t1 is not None:
opt["text_encoder"] = t1
if t2 is not None:
opt["text_encoder_2"] = t2
if all(o is None for o in (d, m, u)):
# no unet scaling
continue
opt["unet"] = {}
if d is not None:
opt["unet"]["down"] = d
if m is not None:
opt["unet"]["mid"] = m
if u is not None:
opt["unet"]["up"] = u
opts.append(opt)
return opts
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(self.scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
if self.has_two_text_encoders or self.has_three_text_encoders:
lora_loadable_components = self.pipeline_class._lora_loadable_modules
if "text_encoder_2" in lora_loadable_components:
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
for scale_dict in all_possible_dict_opts(pipe.unet, value=1234):
# test if lora block scales can be set with this scale_dict
if not self.has_two_text_encoders and "text_encoder_2" in scale_dict:
del scale_dict["text_encoder_2"]
pipe.set_adapters("adapter-1", scale_dict) # test will fail if this line throws an error
def test_simple_inference_with_text_denoiser_multi_adapter_delete_adapter(self):
"""
Tests a simple inference with lora attached to text encoder and unet, attaches
multiple adapters and set/delete them
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-2")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2")
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
lora_loadable_components = self.pipeline_class._lora_loadable_modules
if "text_encoder_2" in lora_loadable_components:
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2")
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
pipe.set_adapters("adapter-1")
output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.set_adapters("adapter-2")
output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.set_adapters(["adapter-1", "adapter-2"])
output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertFalse(
np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
"Adapter 1 and 2 should give different results",
)
self.assertFalse(
np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
"Adapter 1 and mixed adapters should give different results",
)
self.assertFalse(
np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
"Adapter 2 and mixed adapters should give different results",
)
pipe.delete_adapters("adapter-1")
output_deleted_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(output_deleted_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
"Adapter 1 and 2 should give different results",
)
pipe.delete_adapters("adapter-2")
output_deleted_adapters = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(output_no_lora, output_deleted_adapters, atol=1e-3, rtol=1e-3),
"output with no lora and output with lora disabled should give same results",
)
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-2")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2")
pipe.set_adapters(["adapter-1", "adapter-2"])
pipe.delete_adapters(["adapter-1", "adapter-2"])
output_deleted_adapters = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(output_no_lora, output_deleted_adapters, atol=1e-3, rtol=1e-3),
"output with no lora and output with lora disabled should give same results",
)
def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self):
"""
Tests a simple inference with lora attached to text encoder and unet, attaches
multiple adapters and set them
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-2")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2")
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
lora_loadable_components = self.pipeline_class._lora_loadable_modules
if "text_encoder_2" in lora_loadable_components:
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2")
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
pipe.set_adapters("adapter-1")
output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.set_adapters("adapter-2")
output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.set_adapters(["adapter-1", "adapter-2"])
output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0)).images
# Fuse and unfuse should lead to the same results
self.assertFalse(
np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
"Adapter 1 and 2 should give different results",
)
self.assertFalse(
np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
"Adapter 1 and mixed adapters should give different results",
)
self.assertFalse(
np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
"Adapter 2 and mixed adapters should give different results",
)
pipe.set_adapters(["adapter-1", "adapter-2"], [0.5, 0.6])
output_adapter_mixed_weighted = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertFalse(
np.allclose(output_adapter_mixed_weighted, output_adapter_mixed, atol=1e-3, rtol=1e-3),
"Weighted adapter and mixed adapter should give different results",
)
pipe.disable_lora()
output_disabled = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3),
"output with no lora and output with lora disabled should give same results",
)
@skip_mps
def test_lora_fuse_nan(self):
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
# corrupt one LoRA weight with `inf` values
with torch.no_grad():
if self.unet_kwargs:
pipe.unet.mid_block.attentions[0].transformer_blocks[0].attn1.to_q.lora_A[
"adapter-1"
].weight += float("inf")
else:
pipe.transformer.transformer_blocks[0].attn.to_q.lora_A["adapter-1"].weight += float("inf")
# with `safe_fusing=True` we should see an Error
with self.assertRaises(ValueError):
pipe.fuse_lora(safe_fusing=True)
# without we should not see an error, but every image will be black
pipe.fuse_lora(safe_fusing=False)
out = pipe("test", num_inference_steps=2, output_type="np").images
self.assertTrue(np.isnan(out).all())
def test_get_adapters(self):
"""
Tests a simple usecase where we attach multiple adapters and check if the results
are the expected results
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
adapter_names = pipe.get_active_adapters()
self.assertListEqual(adapter_names, ["adapter-1"])
pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-2")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2")
adapter_names = pipe.get_active_adapters()
self.assertListEqual(adapter_names, ["adapter-2"])
pipe.set_adapters(["adapter-1", "adapter-2"])
self.assertListEqual(pipe.get_active_adapters(), ["adapter-1", "adapter-2"])
def test_get_list_adapters(self):
"""
Tests a simple usecase where we attach multiple adapters and check if the results
are the expected results
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
adapter_names = pipe.get_list_adapters()
dicts_to_be_checked = {"text_encoder": ["adapter-1"]}
if self.unet_kwargs is not None:
dicts_to_be_checked.update({"unet": ["adapter-1"]})
else:
dicts_to_be_checked.update({"transformer": ["adapter-1"]})
self.assertDictEqual(adapter_names, dicts_to_be_checked)
pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-2")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2")
adapter_names = pipe.get_list_adapters()
dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]}
if self.unet_kwargs is not None:
dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2"]})
else:
dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2"]})
self.assertDictEqual(adapter_names, dicts_to_be_checked)
pipe.set_adapters(["adapter-1", "adapter-2"])
dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]}
if self.unet_kwargs is not None:
dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2"]})
else:
dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2"]})
self.assertDictEqual(
pipe.get_list_adapters(),
dicts_to_be_checked,
)
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-3")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-3")
dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]}
if self.unet_kwargs is not None:
dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2", "adapter-3"]})
else:
dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2", "adapter-3"]})
self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked)
@require_peft_version_greater(peft_version="0.6.2")
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
"""
Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model
and makes sure it works as expected - with unet and multi-adapter case
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
# Attach a second adapter
pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config, "adapter-2")
else:
pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2")
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
lora_loadable_components = self.pipeline_class._lora_loadable_modules
if "text_encoder_2" in lora_loadable_components:
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2")
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
# set them to multi-adapter inference mode
pipe.set_adapters(["adapter-1", "adapter-2"])
ouputs_all_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.set_adapters(["adapter-1"])
ouputs_lora_1 = pipe(**inputs, generator=torch.manual_seed(0)).images
pipe.fuse_lora(adapter_names=["adapter-1"])
# Fusing should still keep the LoRA layers so outpout should remain the same
outputs_lora_1_fused = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(ouputs_lora_1, outputs_lora_1_fused, atol=1e-3, rtol=1e-3),
"Fused lora should not change the output",
)
pipe.unfuse_lora()
pipe.fuse_lora(adapter_names=["adapter-2", "adapter-1"])
# Fusing should still keep the LoRA layers
output_all_lora_fused = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(
np.allclose(output_all_lora_fused, ouputs_all_lora, atol=1e-3, rtol=1e-3),
"Fused lora should not change the output",
)
@require_peft_version_greater(peft_version="0.9.0")
def test_simple_inference_with_dora(self):
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(
scheduler_cls, use_dora=True
)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
output_no_dora_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertTrue(output_no_dora_lora.shape == self.output_shape)
pipe.text_encoder.add_adapter(text_lora_config)
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config)
else:
pipe.transformer.add_adapter(denoiser_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
lora_loadable_components = self.pipeline_class._lora_loadable_modules
if "text_encoder_2" in lora_loadable_components:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
output_dora_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
self.assertFalse(
np.allclose(output_dora_lora, output_no_dora_lora, atol=1e-3, rtol=1e-3),
"DoRA lora should change the output",
)
@unittest.skip("This is failing for now - need to investigate")
def test_simple_inference_with_text_denoiser_lora_unfused_torch_compile(self):
"""
Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights
and makes sure it works as expected
"""
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)
pipe.text_encoder.add_adapter(text_lora_config)
if self.unet_kwargs is not None:
pipe.unet.add_adapter(denoiser_lora_config)
else:
pipe.transformer.add_adapter(denoiser_lora_config)
self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer
self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser")
if self.has_two_text_encoders or self.has_three_text_encoders:
pipe.text_encoder_2.add_adapter(text_lora_config)
self.assertTrue(
check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
)
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
pipe.text_encoder = torch.compile(pipe.text_encoder, mode="reduce-overhead", fullgraph=True)
if self.has_two_text_encoders or self.has_three_text_encoders:
pipe.text_encoder_2 = torch.compile(pipe.text_encoder_2, mode="reduce-overhead", fullgraph=True)
# Just makes sure it works..
_ = pipe(**inputs, generator=torch.manual_seed(0)).images
def test_modify_padding_mode(self):
if self.pipeline_class.__name__ in ["StableDiffusion3Pipeline", "FluxPipeline"]:
return
def set_pad_mode(network, mode="circular"):
for _, module in network.named_modules():
if isinstance(module, torch.nn.Conv2d):
module.padding_mode = mode
scheduler_classes = (
[FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler]
)
for scheduler_cls in scheduler_classes:
components, _, _ = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_pad_mode = "circular"
set_pad_mode(pipe.vae, _pad_mode)
set_pad_mode(pipe.unet, _pad_mode)
_, _, inputs = self.get_dummy_inputs()
_ = pipe(**inputs).images
|
diffusers/tests/lora/utils.py/0
|
{
"file_path": "diffusers/tests/lora/utils.py",
"repo_id": "diffusers",
"token_count": 41726
}
| 155
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 1,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
|
diffusers/tests/models/transformers/test_models_transformer_cogvideox.py/0
|
{
"file_path": "diffusers/tests/models/transformers/test_models_transformer_cogvideox.py",
"repo_id": "diffusers",
"token_count": 1068
}
| 156
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers.models.unets.unet_2d_blocks import * # noqa F403
from diffusers.utils.testing_utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class DownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = DownBlock2D # noqa F405
block_type = "down"
def test_output(self):
expected_slice = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(expected_slice)
class ResnetDownsampleBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = ResnetDownsampleBlock2D # noqa F405
block_type = "down"
def test_output(self):
expected_slice = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(expected_slice)
class AttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = AttnDownBlock2D # noqa F405
block_type = "down"
def test_output(self):
expected_slice = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(expected_slice)
class CrossAttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = CrossAttnDownBlock2D # noqa F405
block_type = "down"
def prepare_init_args_and_inputs_for_common(self):
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
init_dict["cross_attention_dim"] = 32
return init_dict, inputs_dict
def test_output(self):
expected_slice = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(expected_slice)
class SimpleCrossAttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = SimpleCrossAttnDownBlock2D # noqa F405
block_type = "down"
@property
def dummy_input(self):
return super().get_dummy_input(include_encoder_hidden_states=True)
def prepare_init_args_and_inputs_for_common(self):
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
init_dict["cross_attention_dim"] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps", "MPS result is not consistent")
def test_output(self):
expected_slice = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(expected_slice)
class SkipDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = SkipDownBlock2D # noqa F405
block_type = "down"
@property
def dummy_input(self):
return super().get_dummy_input(include_skip_sample=True)
def test_output(self):
expected_slice = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(expected_slice)
class AttnSkipDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = AttnSkipDownBlock2D # noqa F405
block_type = "down"
@property
def dummy_input(self):
return super().get_dummy_input(include_skip_sample=True)
def test_output(self):
expected_slice = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(expected_slice)
class DownEncoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = DownEncoderBlock2D # noqa F405
block_type = "down"
@property
def dummy_input(self):
return super().get_dummy_input(include_temb=False)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 32,
"out_channels": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
expected_slice = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(expected_slice)
class AttnDownEncoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = AttnDownEncoderBlock2D # noqa F405
block_type = "down"
@property
def dummy_input(self):
return super().get_dummy_input(include_temb=False)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 32,
"out_channels": 32,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
expected_slice = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(expected_slice)
class UNetMidBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = UNetMidBlock2D # noqa F405
block_type = "mid"
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"in_channels": 32,
"temb_channels": 128,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
expected_slice = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(expected_slice)
class UNetMidBlock2DCrossAttnTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = UNetMidBlock2DCrossAttn # noqa F405
block_type = "mid"
def prepare_init_args_and_inputs_for_common(self):
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
init_dict["cross_attention_dim"] = 32
return init_dict, inputs_dict
def test_output(self):
expected_slice = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(expected_slice)
class UNetMidBlock2DSimpleCrossAttnTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = UNetMidBlock2DSimpleCrossAttn # noqa F405
block_type = "mid"
@property
def dummy_input(self):
return super().get_dummy_input(include_encoder_hidden_states=True)
def prepare_init_args_and_inputs_for_common(self):
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
init_dict["cross_attention_dim"] = 32
return init_dict, inputs_dict
def test_output(self):
expected_slice = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(expected_slice)
class UpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = UpBlock2D # noqa F405
block_type = "up"
@property
def dummy_input(self):
return super().get_dummy_input(include_res_hidden_states_tuple=True)
def test_output(self):
expected_slice = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(expected_slice)
class ResnetUpsampleBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = ResnetUpsampleBlock2D # noqa F405
block_type = "up"
@property
def dummy_input(self):
return super().get_dummy_input(include_res_hidden_states_tuple=True)
def test_output(self):
expected_slice = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(expected_slice)
class CrossAttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = CrossAttnUpBlock2D # noqa F405
block_type = "up"
@property
def dummy_input(self):
return super().get_dummy_input(include_res_hidden_states_tuple=True)
def prepare_init_args_and_inputs_for_common(self):
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
init_dict["cross_attention_dim"] = 32
return init_dict, inputs_dict
def test_output(self):
expected_slice = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(expected_slice)
class SimpleCrossAttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = SimpleCrossAttnUpBlock2D # noqa F405
block_type = "up"
@property
def dummy_input(self):
return super().get_dummy_input(include_res_hidden_states_tuple=True, include_encoder_hidden_states=True)
def prepare_init_args_and_inputs_for_common(self):
init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common()
init_dict["cross_attention_dim"] = 32
return init_dict, inputs_dict
def test_output(self):
expected_slice = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(expected_slice)
class AttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = AttnUpBlock2D # noqa F405
block_type = "up"
@property
def dummy_input(self):
return super().get_dummy_input(include_res_hidden_states_tuple=True)
@unittest.skipIf(torch_device == "mps", "MPS result is not consistent")
def test_output(self):
expected_slice = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(expected_slice)
class SkipUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = SkipUpBlock2D # noqa F405
block_type = "up"
@property
def dummy_input(self):
return super().get_dummy_input(include_res_hidden_states_tuple=True)
def test_output(self):
expected_slice = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(expected_slice)
class AttnSkipUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = AttnSkipUpBlock2D # noqa F405
block_type = "up"
@property
def dummy_input(self):
return super().get_dummy_input(include_res_hidden_states_tuple=True)
def test_output(self):
expected_slice = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(expected_slice)
class UpDecoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = UpDecoderBlock2D # noqa F405
block_type = "up"
@property
def dummy_input(self):
return super().get_dummy_input(include_temb=False)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {"in_channels": 32, "out_channels": 32}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
expected_slice = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(expected_slice)
class AttnUpDecoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = AttnUpDecoderBlock2D # noqa F405
block_type = "up"
@property
def dummy_input(self):
return super().get_dummy_input(include_temb=False)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {"in_channels": 32, "out_channels": 32}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
expected_slice = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(expected_slice)
|
diffusers/tests/models/unets/test_unet_2d_blocks.py/0
|
{
"file_path": "diffusers/tests/models/unets/test_unet_2d_blocks.py",
"repo_id": "diffusers",
"token_count": 5186
}
| 157
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc and Tencent Hunyuan Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, BertModel, T5EncoderModel
from diffusers import (
AutoencoderKL,
DDPMScheduler,
HunyuanDiT2DModel,
HunyuanDiTControlNetPipeline,
)
from diffusers.models import HunyuanDiT2DControlNetModel, HunyuanDiT2DMultiControlNetModel
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class HunyuanDiTControlNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = HunyuanDiTControlNetPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
def get_dummy_components(self):
torch.manual_seed(0)
transformer = HunyuanDiT2DModel(
sample_size=16,
num_layers=4,
patch_size=2,
attention_head_dim=8,
num_attention_heads=3,
in_channels=4,
cross_attention_dim=32,
cross_attention_dim_t5=32,
pooled_projection_dim=16,
hidden_size=24,
activation_fn="gelu-approximate",
)
torch.manual_seed(0)
controlnet = HunyuanDiT2DControlNetModel(
sample_size=16,
transformer_num_layers=4,
patch_size=2,
attention_head_dim=8,
num_attention_heads=3,
in_channels=4,
cross_attention_dim=32,
cross_attention_dim_t5=32,
pooled_projection_dim=16,
hidden_size=24,
activation_fn="gelu-approximate",
)
torch.manual_seed(0)
vae = AutoencoderKL()
scheduler = DDPMScheduler()
text_encoder = BertModel.from_pretrained("hf-internal-testing/tiny-random-BertModel")
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel")
text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")
tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
components = {
"transformer": transformer.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_2,
"tokenizer_2": tokenizer_2,
"safety_checker": None,
"feature_extractor": None,
"controlnet": controlnet,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
control_image = randn_tensor(
(1, 3, 16, 16),
generator=generator,
device=torch.device(device),
dtype=torch.float16,
)
controlnet_conditioning_scale = 0.5
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "np",
"control_image": control_image,
"controlnet_conditioning_scale": controlnet_conditioning_scale,
}
return inputs
def test_controlnet_hunyuandit(self):
components = self.get_dummy_components()
pipe = HunyuanDiTControlNetPipeline(**components)
pipe = pipe.to(torch_device, dtype=torch.float16)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output = pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
expected_slice = np.array(
[0.6953125, 0.89208984, 0.59375, 0.5078125, 0.5786133, 0.6035156, 0.5839844, 0.53564453, 0.52246094]
)
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f"Expected: {expected_slice}, got: {image_slice.flatten()}"
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-3,
)
def test_sequential_cpu_offload_forward_pass(self):
# TODO(YiYi) need to fix later
pass
def test_sequential_offload_forward_pass_twice(self):
# TODO(YiYi) need to fix later
pass
def test_save_load_optional_components(self):
# TODO(YiYi) need to fix later
pass
@slow
@require_torch_gpu
class HunyuanDiTControlNetPipelineSlowTests(unittest.TestCase):
pipeline_class = HunyuanDiTControlNetPipeline
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_canny(self):
controlnet = HunyuanDiT2DControlNetModel.from_pretrained(
"Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny", torch_dtype=torch.float16
)
pipe = HunyuanDiTControlNetPipeline.from_pretrained(
"Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers", controlnet=controlnet, torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = "At night, an ancient Chinese-style lion statue stands in front of the hotel, its eyes gleaming as if guarding the building. The background is the hotel entrance at night, with a close-up, eye-level, and centered composition. This photo presents a realistic photographic style, embodies Chinese sculpture culture, and reveals a mysterious atmosphere."
n_prompt = ""
control_image = load_image(
"https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny/resolve/main/canny.jpg?download=true"
)
output = pipe(
prompt,
negative_prompt=n_prompt,
control_image=control_image,
controlnet_conditioning_scale=0.5,
guidance_scale=5.0,
num_inference_steps=2,
output_type="np",
generator=generator,
)
image = output.images[0]
assert image.shape == (1024, 1024, 3)
original_image = image[-3:, -3:, -1].flatten()
expected_image = np.array(
[0.43652344, 0.4399414, 0.44921875, 0.45043945, 0.45703125, 0.44873047, 0.43579102, 0.44018555, 0.42578125]
)
assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
def test_pose(self):
controlnet = HunyuanDiT2DControlNetModel.from_pretrained(
"Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Pose", torch_dtype=torch.float16
)
pipe = HunyuanDiTControlNetPipeline.from_pretrained(
"Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers", controlnet=controlnet, torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = "An Asian woman, dressed in a green top, wearing a purple headscarf and a purple scarf, stands in front of a blackboard. The background is the blackboard. The photo is presented in a close-up, eye-level, and centered composition, adopting a realistic photographic style"
n_prompt = ""
control_image = load_image(
"https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Pose/resolve/main/pose.jpg?download=true"
)
output = pipe(
prompt,
negative_prompt=n_prompt,
control_image=control_image,
controlnet_conditioning_scale=0.5,
guidance_scale=5.0,
num_inference_steps=2,
output_type="np",
generator=generator,
)
image = output.images[0]
assert image.shape == (1024, 1024, 3)
original_image = image[-3:, -3:, -1].flatten()
expected_image = np.array(
[0.4091797, 0.4177246, 0.39526367, 0.4194336, 0.40356445, 0.3857422, 0.39208984, 0.40429688, 0.37451172]
)
assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
def test_depth(self):
controlnet = HunyuanDiT2DControlNetModel.from_pretrained(
"Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Depth", torch_dtype=torch.float16
)
pipe = HunyuanDiTControlNetPipeline.from_pretrained(
"Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers", controlnet=controlnet, torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = "In the dense forest, a black and white panda sits quietly in green trees and red flowers, surrounded by mountains, rivers, and the ocean. The background is the forest in a bright environment."
n_prompt = ""
control_image = load_image(
"https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Depth/resolve/main/depth.jpg?download=true"
)
output = pipe(
prompt,
negative_prompt=n_prompt,
control_image=control_image,
controlnet_conditioning_scale=0.5,
guidance_scale=5.0,
num_inference_steps=2,
output_type="np",
generator=generator,
)
image = output.images[0]
assert image.shape == (1024, 1024, 3)
original_image = image[-3:, -3:, -1].flatten()
expected_image = np.array(
[0.31982422, 0.32177734, 0.30126953, 0.3190918, 0.3100586, 0.31396484, 0.3232422, 0.33544922, 0.30810547]
)
assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
def test_multi_controlnet(self):
controlnet = HunyuanDiT2DControlNetModel.from_pretrained(
"Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny", torch_dtype=torch.float16
)
controlnet = HunyuanDiT2DMultiControlNetModel([controlnet, controlnet])
pipe = HunyuanDiTControlNetPipeline.from_pretrained(
"Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers", controlnet=controlnet, torch_dtype=torch.float16
)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = "At night, an ancient Chinese-style lion statue stands in front of the hotel, its eyes gleaming as if guarding the building. The background is the hotel entrance at night, with a close-up, eye-level, and centered composition. This photo presents a realistic photographic style, embodies Chinese sculpture culture, and reveals a mysterious atmosphere."
n_prompt = ""
control_image = load_image(
"https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny/resolve/main/canny.jpg?download=true"
)
output = pipe(
prompt,
negative_prompt=n_prompt,
control_image=[control_image, control_image],
controlnet_conditioning_scale=[0.25, 0.25],
guidance_scale=5.0,
num_inference_steps=2,
output_type="np",
generator=generator,
)
image = output.images[0]
assert image.shape == (1024, 1024, 3)
original_image = image[-3:, -3:, -1].flatten()
expected_image = np.array(
[0.43652344, 0.44018555, 0.4494629, 0.44995117, 0.45654297, 0.44848633, 0.43603516, 0.4404297, 0.42626953]
)
assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
|
diffusers/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py/0
|
{
"file_path": "diffusers/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py",
"repo_id": "diffusers",
"token_count": 5987
}
| 158
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
KolorsPipeline,
UNet2DConditionModel,
)
from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import (
TEXT_TO_IMAGE_BATCH_PARAMS,
TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS,
TEXT_TO_IMAGE_IMAGE_PARAMS,
TEXT_TO_IMAGE_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class KolorsPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KolorsPipeline
params = TEXT_TO_IMAGE_PARAMS
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"})
def get_dummy_components(self, time_cond_proj_dim=None):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(2, 4),
layers_per_block=2,
time_cond_proj_dim=time_cond_proj_dim,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
# specific config below
attention_head_dim=(2, 4),
use_linear_projection=True,
addition_embed_type="text_time",
addition_time_embed_dim=8,
transformer_layers_per_block=(1, 2),
projection_class_embeddings_input_dim=56,
cross_attention_dim=8,
norm_num_groups=1,
)
scheduler = EulerDiscreteScheduler(
beta_start=0.00085,
beta_end=0.012,
steps_offset=1,
beta_schedule="scaled_linear",
timestep_spacing="leading",
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
sample_size=128,
)
torch.manual_seed(0)
text_encoder = ChatGLMModel.from_pretrained("hf-internal-testing/tiny-random-chatglm3-6b")
tokenizer = ChatGLMTokenizer.from_pretrained("hf-internal-testing/tiny-random-chatglm3-6b")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"image_encoder": None,
"feature_extractor": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "np",
}
return inputs
def test_inference(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 64, 64, 3))
expected_slice = np.array(
[0.26413745, 0.4425478, 0.4102801, 0.42693347, 0.52529025, 0.3867405, 0.47512037, 0.41538602, 0.43855375]
)
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(max_diff, 1e-3)
def test_save_load_optional_components(self):
super().test_save_load_optional_components(expected_max_difference=2e-4)
def test_save_load_float16(self):
super().test_save_load_float16(expected_max_diff=2e-1)
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=5e-4)
|
diffusers/tests/pipelines/kolors/test_kolors.py/0
|
{
"file_path": "diffusers/tests/pipelines/kolors/test_kolors.py",
"repo_id": "diffusers",
"token_count": 2344
}
| 159
|
# Copyright 2024 Marigold authors, PRS ETH Zurich. All rights reserved.
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
# More information and citation instructions are available on the
# Marigold project website: https://marigoldmonodepth.github.io
# --------------------------------------------------------------------------
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
AutoencoderTiny,
LCMScheduler,
MarigoldDepthPipeline,
UNet2DConditionModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
is_flaky,
load_image,
require_torch_gpu,
slow,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class MarigoldDepthPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = MarigoldDepthPipeline
params = frozenset(["image"])
batch_params = frozenset(["image"])
image_params = frozenset(["image"])
image_latents_params = frozenset(["latents"])
callback_cfg_params = frozenset([])
test_xformers_attention = False
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"output_type",
]
)
def get_dummy_components(self, time_cond_proj_dim=None):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
time_cond_proj_dim=time_cond_proj_dim,
sample_size=32,
in_channels=8,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
)
scheduler = LCMScheduler(
beta_start=0.00085,
beta_end=0.012,
prediction_type="v_prediction",
set_alpha_to_one=False,
steps_offset=1,
beta_schedule="scaled_linear",
clip_sample=False,
thresholding=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"prediction_type": "depth",
"scale_invariant": True,
"shift_invariant": True,
}
return components
def get_dummy_tiny_autoencoder(self):
return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4)
def get_dummy_inputs(self, device, seed=0):
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
image = image / 2 + 0.5
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image": image,
"num_inference_steps": 1,
"processing_resolution": 0,
"generator": generator,
"output_type": "np",
}
return inputs
def _test_marigold_depth(
self,
generator_seed: int = 0,
expected_slice: np.ndarray = None,
atol: float = 1e-4,
**pipe_kwargs,
):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(device)
pipe.set_progress_bar_config(disable=None)
pipe_inputs = self.get_dummy_inputs(device, seed=generator_seed)
pipe_inputs.update(**pipe_kwargs)
prediction = pipe(**pipe_inputs).prediction
prediction_slice = prediction[0, -3:, -3:, -1].flatten()
if pipe_inputs.get("match_input_resolution", True):
self.assertEqual(prediction.shape, (1, 32, 32, 1), "Unexpected output resolution")
else:
self.assertTrue(prediction.shape[0] == 1 and prediction.shape[3] == 1, "Unexpected output dimensions")
self.assertEqual(
max(prediction.shape[1:3]),
pipe_inputs.get("processing_resolution", 768),
"Unexpected output resolution",
)
self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol))
def test_marigold_depth_dummy_defaults(self):
self._test_marigold_depth(
expected_slice=np.array([0.4529, 0.5184, 0.4985, 0.4355, 0.4273, 0.4153, 0.5229, 0.4818, 0.4627]),
)
def test_marigold_depth_dummy_G0_S1_P32_E1_B1_M1(self):
self._test_marigold_depth(
generator_seed=0,
expected_slice=np.array([0.4529, 0.5184, 0.4985, 0.4355, 0.4273, 0.4153, 0.5229, 0.4818, 0.4627]),
num_inference_steps=1,
processing_resolution=32,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_dummy_G0_S1_P16_E1_B1_M1(self):
self._test_marigold_depth(
generator_seed=0,
expected_slice=np.array([0.4511, 0.4531, 0.4542, 0.5024, 0.4987, 0.4969, 0.5281, 0.5215, 0.5182]),
num_inference_steps=1,
processing_resolution=16,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_dummy_G2024_S1_P32_E1_B1_M1(self):
self._test_marigold_depth(
generator_seed=2024,
expected_slice=np.array([0.4671, 0.4739, 0.5130, 0.4308, 0.4411, 0.4720, 0.5064, 0.4796, 0.4795]),
num_inference_steps=1,
processing_resolution=32,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_dummy_G0_S2_P32_E1_B1_M1(self):
self._test_marigold_depth(
generator_seed=0,
expected_slice=np.array([0.4165, 0.4485, 0.4647, 0.4003, 0.4577, 0.5074, 0.5106, 0.5077, 0.5042]),
num_inference_steps=2,
processing_resolution=32,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_dummy_G0_S1_P64_E1_B1_M1(self):
self._test_marigold_depth(
generator_seed=0,
expected_slice=np.array([0.4817, 0.5425, 0.5146, 0.5367, 0.5034, 0.4743, 0.4395, 0.4734, 0.4399]),
num_inference_steps=1,
processing_resolution=64,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
@is_flaky
def test_marigold_depth_dummy_G0_S1_P32_E3_B1_M1(self):
self._test_marigold_depth(
generator_seed=0,
expected_slice=np.array([0.3260, 0.3591, 0.2837, 0.2971, 0.2750, 0.2426, 0.4200, 0.3588, 0.3254]),
num_inference_steps=1,
processing_resolution=32,
ensemble_size=3,
ensembling_kwargs={"reduction": "mean"},
batch_size=1,
match_input_resolution=True,
)
@is_flaky
def test_marigold_depth_dummy_G0_S1_P32_E4_B2_M1(self):
self._test_marigold_depth(
generator_seed=0,
expected_slice=np.array([0.3180, 0.4194, 0.3013, 0.2902, 0.3245, 0.2897, 0.4718, 0.4174, 0.3705]),
num_inference_steps=1,
processing_resolution=32,
ensemble_size=4,
ensembling_kwargs={"reduction": "mean"},
batch_size=2,
match_input_resolution=True,
)
def test_marigold_depth_dummy_G0_S1_P16_E1_B1_M0(self):
self._test_marigold_depth(
generator_seed=0,
expected_slice=np.array([0.5515, 0.4588, 0.4197, 0.4741, 0.4229, 0.4328, 0.5333, 0.5314, 0.5182]),
num_inference_steps=1,
processing_resolution=16,
ensemble_size=1,
batch_size=1,
match_input_resolution=False,
)
def test_marigold_depth_dummy_no_num_inference_steps(self):
with self.assertRaises(ValueError) as e:
self._test_marigold_depth(
num_inference_steps=None,
expected_slice=np.array([0.0]),
)
self.assertIn("num_inference_steps", str(e))
def test_marigold_depth_dummy_no_processing_resolution(self):
with self.assertRaises(ValueError) as e:
self._test_marigold_depth(
processing_resolution=None,
expected_slice=np.array([0.0]),
)
self.assertIn("processing_resolution", str(e))
@slow
@require_torch_gpu
class MarigoldDepthPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _test_marigold_depth(
self,
is_fp16: bool = True,
device: str = "cuda",
generator_seed: int = 0,
expected_slice: np.ndarray = None,
model_id: str = "prs-eth/marigold-lcm-v1-0",
image_url: str = "https://marigoldmonodepth.github.io/images/einstein.jpg",
atol: float = 1e-4,
**pipe_kwargs,
):
from_pretrained_kwargs = {}
if is_fp16:
from_pretrained_kwargs["variant"] = "fp16"
from_pretrained_kwargs["torch_dtype"] = torch.float16
pipe = MarigoldDepthPipeline.from_pretrained(model_id, **from_pretrained_kwargs)
if device == "cuda":
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device=device).manual_seed(generator_seed)
image = load_image(image_url)
width, height = image.size
prediction = pipe(image, generator=generator, **pipe_kwargs).prediction
prediction_slice = prediction[0, -3:, -3:, -1].flatten()
if pipe_kwargs.get("match_input_resolution", True):
self.assertEqual(prediction.shape, (1, height, width, 1), "Unexpected output resolution")
else:
self.assertTrue(prediction.shape[0] == 1 and prediction.shape[3] == 1, "Unexpected output dimensions")
self.assertEqual(
max(prediction.shape[1:3]),
pipe_kwargs.get("processing_resolution", 768),
"Unexpected output resolution",
)
self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol))
def test_marigold_depth_einstein_f32_cpu_G0_S1_P32_E1_B1_M1(self):
self._test_marigold_depth(
is_fp16=False,
device="cpu",
generator_seed=0,
expected_slice=np.array([0.4323, 0.4323, 0.4323, 0.4323, 0.4323, 0.4323, 0.4323, 0.4323, 0.4323]),
num_inference_steps=1,
processing_resolution=32,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_einstein_f32_cuda_G0_S1_P768_E1_B1_M1(self):
self._test_marigold_depth(
is_fp16=False,
device="cuda",
generator_seed=0,
expected_slice=np.array([0.1244, 0.1265, 0.1292, 0.1240, 0.1252, 0.1266, 0.1246, 0.1226, 0.1180]),
num_inference_steps=1,
processing_resolution=768,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E1_B1_M1(self):
self._test_marigold_depth(
is_fp16=True,
device="cuda",
generator_seed=0,
expected_slice=np.array([0.1241, 0.1262, 0.1290, 0.1238, 0.1250, 0.1265, 0.1244, 0.1225, 0.1179]),
num_inference_steps=1,
processing_resolution=768,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_einstein_f16_cuda_G2024_S1_P768_E1_B1_M1(self):
self._test_marigold_depth(
is_fp16=True,
device="cuda",
generator_seed=2024,
expected_slice=np.array([0.1710, 0.1725, 0.1738, 0.1700, 0.1700, 0.1696, 0.1698, 0.1663, 0.1592]),
num_inference_steps=1,
processing_resolution=768,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_einstein_f16_cuda_G0_S2_P768_E1_B1_M1(self):
self._test_marigold_depth(
is_fp16=True,
device="cuda",
generator_seed=0,
expected_slice=np.array([0.1085, 0.1098, 0.1110, 0.1081, 0.1085, 0.1082, 0.1085, 0.1057, 0.0996]),
num_inference_steps=2,
processing_resolution=768,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_einstein_f16_cuda_G0_S1_P512_E1_B1_M1(self):
self._test_marigold_depth(
is_fp16=True,
device="cuda",
generator_seed=0,
expected_slice=np.array([0.2683, 0.2693, 0.2698, 0.2666, 0.2632, 0.2615, 0.2656, 0.2603, 0.2573]),
num_inference_steps=1,
processing_resolution=512,
ensemble_size=1,
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E3_B1_M1(self):
self._test_marigold_depth(
is_fp16=True,
device="cuda",
generator_seed=0,
expected_slice=np.array([0.1200, 0.1215, 0.1237, 0.1193, 0.1197, 0.1202, 0.1196, 0.1166, 0.1109]),
num_inference_steps=1,
processing_resolution=768,
ensemble_size=3,
ensembling_kwargs={"reduction": "mean"},
batch_size=1,
match_input_resolution=True,
)
def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E4_B2_M1(self):
self._test_marigold_depth(
is_fp16=True,
device="cuda",
generator_seed=0,
expected_slice=np.array([0.1121, 0.1135, 0.1155, 0.1111, 0.1115, 0.1118, 0.1111, 0.1079, 0.1019]),
num_inference_steps=1,
processing_resolution=768,
ensemble_size=4,
ensembling_kwargs={"reduction": "mean"},
batch_size=2,
match_input_resolution=True,
)
def test_marigold_depth_einstein_f16_cuda_G0_S1_P512_E1_B1_M0(self):
self._test_marigold_depth(
is_fp16=True,
device="cuda",
generator_seed=0,
expected_slice=np.array([0.2671, 0.2690, 0.2720, 0.2659, 0.2676, 0.2739, 0.2664, 0.2686, 0.2573]),
num_inference_steps=1,
processing_resolution=512,
ensemble_size=1,
batch_size=1,
match_input_resolution=False,
)
|
diffusers/tests/pipelines/marigold/test_marigold_depth.py/0
|
{
"file_path": "diffusers/tests/pipelines/marigold/test_marigold_depth.py",
"repo_id": "diffusers",
"token_count": 8593
}
| 160
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import inspect
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
AutoencoderKL,
AutoPipelineForInpainting,
EulerDiscreteScheduler,
StableDiffusionXLInpaintPipeline,
StableDiffusionXLPAGInpaintPipeline,
UNet2DConditionModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS,
)
from ..test_pipelines_common import (
IPAdapterTesterMixin,
PipelineFromPipeTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
SDXLOptionalComponentsTesterMixin,
)
enable_full_determinism()
class StableDiffusionXLPAGInpaintPipelineFastTests(
PipelineTesterMixin,
IPAdapterTesterMixin,
PipelineLatentTesterMixin,
PipelineFromPipeTesterMixin,
SDXLOptionalComponentsTesterMixin,
unittest.TestCase,
):
pipeline_class = StableDiffusionXLPAGInpaintPipeline
params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS.union({"pag_scale", "pag_adaptive_scale"})
batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
image_params = frozenset([])
image_latents_params = frozenset([])
callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union(
{"add_text_embeds", "add_time_ids", "mask", "masked_image_latents"}
)
# based on tests.pipelines.stable_diffusion_xl.test_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipelineFastTests.get_dummy_components
def get_dummy_components(
self, skip_first_text_encoder=False, time_cond_proj_dim=None, requires_aesthetics_score=False
):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
time_cond_proj_dim=time_cond_proj_dim,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
# SD2-specific config below
attention_head_dim=(2, 4),
use_linear_projection=True,
addition_embed_type="text_time",
addition_time_embed_dim=8,
transformer_layers_per_block=(1, 2),
projection_class_embeddings_input_dim=72 if requires_aesthetics_score else 80, # 5 * 8 + 32
cross_attention_dim=64 if not skip_first_text_encoder else 32,
)
scheduler = EulerDiscreteScheduler(
beta_start=0.00085,
beta_end=0.012,
steps_offset=1,
beta_schedule="scaled_linear",
timestep_spacing="leading",
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
sample_size=128,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
# SD2-specific config below
hidden_act="gelu",
projection_dim=32,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
torch.manual_seed(0)
image_encoder_config = CLIPVisionConfig(
hidden_size=32,
image_size=224,
projection_dim=32,
intermediate_size=37,
num_attention_heads=4,
num_channels=3,
num_hidden_layers=5,
patch_size=14,
)
image_encoder = CLIPVisionModelWithProjection(image_encoder_config)
feature_extractor = CLIPImageProcessor(
crop_size=224,
do_center_crop=True,
do_normalize=True,
do_resize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
resample=3,
size=224,
)
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder if not skip_first_text_encoder else None,
"tokenizer": tokenizer if not skip_first_text_encoder else None,
"text_encoder_2": text_encoder_2,
"tokenizer_2": tokenizer_2,
"image_encoder": image_encoder,
"feature_extractor": feature_extractor,
"requires_aesthetics_score": requires_aesthetics_score,
}
return components
def get_dummy_inputs(self, device, seed=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
# create mask
image[8:, 8:, :] = 255
mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64))
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"strength": 1.0,
"pag_scale": 0.9,
"output_type": "np",
}
return inputs
def test_pag_disable_enable(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(requires_aesthetics_score=True)
# base pipeline
pipe_sd = StableDiffusionXLInpaintPipeline(**components)
pipe_sd = pipe_sd.to(device)
pipe_sd.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
del inputs["pag_scale"]
assert (
"pag_scale" not in inspect.signature(pipe_sd.__call__).parameters
), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}."
out = pipe_sd(**inputs).images[0, -3:, -3:, -1]
# pag disabled with pag_scale=0.0
pipe_pag = self.pipeline_class(**components)
pipe_pag = pipe_pag.to(device)
pipe_pag.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["pag_scale"] = 0.0
out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1]
# pag enabled
pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"])
pipe_pag = pipe_pag.to(device)
pipe_pag.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1]
assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3
assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3
def test_save_load_optional_components(self):
self._test_save_load_optional_components()
def test_pag_inference(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(requires_aesthetics_score=True)
pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"])
pipe_pag = pipe_pag.to(device)
pipe_pag.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = pipe_pag(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (
1,
64,
64,
3,
), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}"
expected_slice = np.array([0.8366, 0.5513, 0.6105, 0.6213, 0.6957, 0.7400, 0.6614, 0.6102, 0.5239])
max_diff = np.abs(image_slice.flatten() - expected_slice).max()
assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}"
@slow
@require_torch_gpu
class StableDiffusionXLPAGInpaintPipelineIntegrationTests(unittest.TestCase):
repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", seed=0, guidance_scale=7.0):
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
init_image = load_image(img_url).convert("RGB")
mask_image = load_image(mask_url).convert("RGB")
generator = torch.Generator(device=generator_device).manual_seed(seed)
inputs = {
"prompt": "A majestic tiger sitting on a bench",
"generator": generator,
"image": init_image,
"mask_image": mask_image,
"strength": 0.8,
"num_inference_steps": 3,
"guidance_scale": guidance_scale,
"pag_scale": 3.0,
"output_type": "np",
}
return inputs
def test_pag_cfg(self):
pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16)
pipeline.enable_model_cpu_offload()
pipeline.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = pipeline(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1024, 1024, 3)
expected_slice = np.array(
[0.41385046, 0.39608297, 0.4360491, 0.26872507, 0.32187328, 0.4242474, 0.2603805, 0.34167895, 0.46561807]
)
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
), f"output is different from expected, {image_slice.flatten()}"
def test_pag_uncond(self):
pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16)
pipeline.enable_model_cpu_offload()
pipeline.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device, guidance_scale=0.0)
image = pipeline(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1024, 1024, 3)
expected_slice = np.array(
[0.41597816, 0.39302617, 0.44287828, 0.2687074, 0.28315824, 0.40582314, 0.20877528, 0.2380802, 0.39447647]
)
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
), f"output is different from expected, {image_slice.flatten()}"
|
diffusers/tests/pipelines/pag/test_pag_sdxl_inpaint.py/0
|
{
"file_path": "diffusers/tests/pipelines/pag/test_pag_sdxl_inpaint.py",
"repo_id": "diffusers",
"token_count": 5988
}
| 161
|
# coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from parameterized import parameterized
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
LCMScheduler,
MultiAdapter,
PNDMScheduler,
StableDiffusionAdapterPipeline,
T2IAdapter,
UNet2DConditionModel,
)
from diffusers.utils import logging
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
numpy_cosine_similarity_distance,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineFromPipeTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class AdapterTests:
pipeline_class = StableDiffusionAdapterPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
def get_dummy_components(self, adapter_type, time_cond_proj_dim=None):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
time_cond_proj_dim=time_cond_proj_dim,
)
scheduler = PNDMScheduler(skip_prk_steps=True)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
torch.manual_seed(0)
if adapter_type == "full_adapter" or adapter_type == "light_adapter":
adapter = T2IAdapter(
in_channels=3,
channels=[32, 64],
num_res_blocks=2,
downscale_factor=2,
adapter_type=adapter_type,
)
elif adapter_type == "multi_adapter":
adapter = MultiAdapter(
[
T2IAdapter(
in_channels=3,
channels=[32, 64],
num_res_blocks=2,
downscale_factor=2,
adapter_type="full_adapter",
),
T2IAdapter(
in_channels=3,
channels=[32, 64],
num_res_blocks=2,
downscale_factor=2,
adapter_type="full_adapter",
),
]
)
else:
raise ValueError(
f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter', 'light_adapter', or 'multi_adapter''"
)
components = {
"adapter": adapter,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def get_dummy_components_with_full_downscaling(self, adapter_type):
"""Get dummy components with x8 VAE downscaling and 4 UNet down blocks.
These dummy components are intended to fully-exercise the T2I-Adapter
downscaling behavior.
"""
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 32, 32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D"),
up_block_types=("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
cross_attention_dim=32,
)
scheduler = PNDMScheduler(skip_prk_steps=True)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 32, 32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
torch.manual_seed(0)
if adapter_type == "full_adapter" or adapter_type == "light_adapter":
adapter = T2IAdapter(
in_channels=3,
channels=[32, 32, 32, 64],
num_res_blocks=2,
downscale_factor=8,
adapter_type=adapter_type,
)
elif adapter_type == "multi_adapter":
adapter = MultiAdapter(
[
T2IAdapter(
in_channels=3,
channels=[32, 32, 32, 64],
num_res_blocks=2,
downscale_factor=8,
adapter_type="full_adapter",
),
T2IAdapter(
in_channels=3,
channels=[32, 32, 32, 64],
num_res_blocks=2,
downscale_factor=8,
adapter_type="full_adapter",
),
]
)
else:
raise ValueError(
f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter', 'light_adapter', or 'multi_adapter''"
)
components = {
"adapter": adapter,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def get_dummy_inputs(self, device, seed=0, height=64, width=64, num_images=1):
if num_images == 1:
image = floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device)
else:
image = [
floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) for _ in range(num_images)
]
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "np",
}
return inputs
def test_attention_slicing_forward_pass(self):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
@parameterized.expand(
[
# (dim=264) The internal feature map will be 33x33 after initial pixel unshuffling (downscaled x8).
(((4 * 8 + 1) * 8),),
# (dim=272) The internal feature map will be 17x17 after the first T2I down block (downscaled x16).
(((4 * 4 + 1) * 16),),
# (dim=288) The internal feature map will be 9x9 after the second T2I down block (downscaled x32).
(((4 * 2 + 1) * 32),),
# (dim=320) The internal feature map will be 5x5 after the third T2I down block (downscaled x64).
(((4 * 1 + 1) * 64),),
]
)
def test_multiple_image_dimensions(self, dim):
"""Test that the T2I-Adapter pipeline supports any input dimension that
is divisible by the adapter's `downscale_factor`. This test was added in
response to an issue where the T2I Adapter's downscaling padding
behavior did not match the UNet's behavior.
Note that we have selected `dim` values to produce odd resolutions at
each downscaling level.
"""
components = self.get_dummy_components_with_full_downscaling()
sd_pipe = StableDiffusionAdapterPipeline(**components)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device, height=dim, width=dim)
image = sd_pipe(**inputs).images
assert image.shape == (1, dim, dim, 3)
def test_adapter_lcm(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(time_cond_proj_dim=256)
sd_pipe = StableDiffusionAdapterPipeline(**components)
sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
output = sd_pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4535, 0.5493, 0.4359, 0.5452, 0.6086, 0.4441, 0.5544, 0.501, 0.4859])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_adapter_lcm_custom_timesteps(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(time_cond_proj_dim=256)
sd_pipe = StableDiffusionAdapterPipeline(**components)
sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
del inputs["num_inference_steps"]
inputs["timesteps"] = [999, 499]
output = sd_pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4535, 0.5493, 0.4359, 0.5452, 0.6086, 0.4441, 0.5544, 0.501, 0.4859])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
class StableDiffusionFullAdapterPipelineFastTests(
AdapterTests, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase
):
def get_dummy_components(self, time_cond_proj_dim=None):
return super().get_dummy_components("full_adapter", time_cond_proj_dim=time_cond_proj_dim)
def get_dummy_components_with_full_downscaling(self):
return super().get_dummy_components_with_full_downscaling("full_adapter")
def test_stable_diffusion_adapter_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionAdapterPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4858, 0.5500, 0.4278, 0.4669, 0.6184, 0.4322, 0.5010, 0.5033, 0.4746])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
def test_from_pipe_consistent_forward_pass_cpu_offload(self):
super().test_from_pipe_consistent_forward_pass_cpu_offload(expected_max_diff=6e-3)
class StableDiffusionLightAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase):
def get_dummy_components(self, time_cond_proj_dim=None):
return super().get_dummy_components("light_adapter", time_cond_proj_dim=time_cond_proj_dim)
def get_dummy_components_with_full_downscaling(self):
return super().get_dummy_components_with_full_downscaling("light_adapter")
def test_stable_diffusion_adapter_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionAdapterPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4965, 0.5548, 0.4330, 0.4771, 0.6226, 0.4382, 0.5037, 0.5071, 0.4782])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase):
def get_dummy_components(self, time_cond_proj_dim=None):
return super().get_dummy_components("multi_adapter", time_cond_proj_dim=time_cond_proj_dim)
def get_dummy_components_with_full_downscaling(self):
return super().get_dummy_components_with_full_downscaling("multi_adapter")
def get_dummy_inputs(self, device, height=64, width=64, seed=0):
inputs = super().get_dummy_inputs(device, seed, height=height, width=width, num_images=2)
inputs["adapter_conditioning_scale"] = [0.5, 0.5]
return inputs
def test_stable_diffusion_adapter_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionAdapterPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4902, 0.5539, 0.4317, 0.4682, 0.6190, 0.4351, 0.5018, 0.5046, 0.4772])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
def test_inference_batch_consistent(
self, batch_sizes=[2, 4, 13], additional_params_copy_to_batched_inputs=["num_inference_steps"]
):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
logger = logging.get_logger(pipe.__module__)
logger.setLevel(level=diffusers.logging.FATAL)
# batchify inputs
for batch_size in batch_sizes:
batched_inputs = {}
for name, value in inputs.items():
if name in self.batch_params:
# prompt is string
if name == "prompt":
len_prompt = len(value)
# make unequal batch sizes
batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]
# make last batch super long
batched_inputs[name][-1] = 100 * "very long"
elif name == "image":
batched_images = []
for image in value:
batched_images.append(batch_size * [image])
batched_inputs[name] = batched_images
else:
batched_inputs[name] = batch_size * [value]
elif name == "batch_size":
batched_inputs[name] = batch_size
else:
batched_inputs[name] = value
for arg in additional_params_copy_to_batched_inputs:
batched_inputs[arg] = inputs[arg]
batched_inputs["output_type"] = "np"
if self.pipeline_class.__name__ == "DanceDiffusionPipeline":
batched_inputs.pop("output_type")
output = pipe(**batched_inputs)
assert len(output[0]) == batch_size
batched_inputs["output_type"] = "np"
if self.pipeline_class.__name__ == "DanceDiffusionPipeline":
batched_inputs.pop("output_type")
output = pipe(**batched_inputs)[0]
assert output.shape[0] == batch_size
logger.setLevel(level=diffusers.logging.WARNING)
def test_num_images_per_prompt(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
batch_sizes = [1, 2]
num_images_per_prompts = [1, 2]
for batch_size in batch_sizes:
for num_images_per_prompt in num_images_per_prompts:
inputs = self.get_dummy_inputs(torch_device)
for key in inputs.keys():
if key in self.batch_params:
if key == "image":
batched_images = []
for image in inputs[key]:
batched_images.append(batch_size * [image])
inputs[key] = batched_images
else:
inputs[key] = batch_size * [inputs[key]]
images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
def test_inference_batch_single_identical(
self,
batch_size=3,
test_max_difference=None,
test_mean_pixel_difference=None,
relax_max_difference=False,
expected_max_diff=2e-3,
additional_params_copy_to_batched_inputs=["num_inference_steps"],
):
if test_max_difference is None:
# TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems
# make sure that batched and non-batched is identical
test_max_difference = torch_device != "mps"
if test_mean_pixel_difference is None:
# TODO same as above
test_mean_pixel_difference = torch_device != "mps"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
logger = logging.get_logger(pipe.__module__)
logger.setLevel(level=diffusers.logging.FATAL)
# batchify inputs
batched_inputs = {}
for name, value in inputs.items():
if name in self.batch_params:
# prompt is string
if name == "prompt":
len_prompt = len(value)
# make unequal batch sizes
batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]
# make last batch super long
batched_inputs[name][-1] = 100 * "very long"
elif name == "image":
batched_images = []
for image in value:
batched_images.append(batch_size * [image])
batched_inputs[name] = batched_images
else:
batched_inputs[name] = batch_size * [value]
elif name == "batch_size":
batched_inputs[name] = batch_size
elif name == "generator":
batched_inputs[name] = [self.get_generator(i) for i in range(batch_size)]
else:
batched_inputs[name] = value
for arg in additional_params_copy_to_batched_inputs:
batched_inputs[arg] = inputs[arg]
if self.pipeline_class.__name__ != "DanceDiffusionPipeline":
batched_inputs["output_type"] = "np"
output_batch = pipe(**batched_inputs)
assert output_batch[0].shape[0] == batch_size
inputs["generator"] = self.get_generator(0)
output = pipe(**inputs)
logger.setLevel(level=diffusers.logging.WARNING)
if test_max_difference:
if relax_max_difference:
# Taking the median of the largest <n> differences
# is resilient to outliers
diff = np.abs(output_batch[0][0] - output[0][0])
diff = diff.flatten()
diff.sort()
max_diff = np.median(diff[-5:])
else:
max_diff = np.abs(output_batch[0][0] - output[0][0]).max()
assert max_diff < expected_max_diff
if test_mean_pixel_difference:
assert_mean_pixel_difference(output_batch[0][0], output[0][0])
@slow
@require_torch_gpu
class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_stable_diffusion_adapter_color(self):
adapter_model = "TencentARC/t2iadapter_color_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "snail"
image_url = (
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/color.png"
)
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_color_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_depth(self):
adapter_model = "TencentARC/t2iadapter_depth_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "snail"
image_url = (
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/color.png"
)
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_color_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_depth_sd_v14(self):
adapter_model = "TencentARC/t2iadapter_depth_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "desk"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/desk_depth.png"
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_depth_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_depth_sd_v15(self):
adapter_model = "TencentARC/t2iadapter_depth_sd15v2"
sd_model = "runwayml/stable-diffusion-v1-5"
prompt = "desk"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/desk_depth.png"
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_depth_sd15v2.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_keypose_sd_v14(self):
adapter_model = "TencentARC/t2iadapter_keypose_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "person"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/person_keypose.png"
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_keypose_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_openpose_sd_v14(self):
adapter_model = "TencentARC/t2iadapter_openpose_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "person"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/iron_man_pose.png"
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_openpose_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_seg_sd_v14(self):
adapter_model = "TencentARC/t2iadapter_seg_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "motorcycle"
image_url = (
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motor.png"
)
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_seg_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_zoedepth_sd_v15(self):
adapter_model = "TencentARC/t2iadapter_zoedepth_sd15v1"
sd_model = "runwayml/stable-diffusion-v1-5"
prompt = "motorcycle"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motorcycle.png"
input_channels = 3
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_zoedepth_sd15v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.set_progress_bar_config(disable=None)
pipe.enable_model_cpu_offload()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_canny_sd_v14(self):
adapter_model = "TencentARC/t2iadapter_canny_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "toy"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png"
input_channels = 1
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_canny_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_canny_sd_v15(self):
adapter_model = "TencentARC/t2iadapter_canny_sd15v2"
sd_model = "runwayml/stable-diffusion-v1-5"
prompt = "toy"
image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png"
input_channels = 1
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_canny_sd15v2.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_sketch_sd14(self):
adapter_model = "TencentARC/t2iadapter_sketch_sd14v1"
sd_model = "CompVis/stable-diffusion-v1-4"
prompt = "cat"
image_url = (
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/edge.png"
)
input_channels = 1
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_sketch_sd14v1.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_sketch_sd15(self):
adapter_model = "TencentARC/t2iadapter_sketch_sd15v2"
sd_model = "runwayml/stable-diffusion-v1-5"
prompt = "cat"
image_url = (
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/edge.png"
)
input_channels = 1
out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_sketch_sd15v2.npy"
image = load_image(image_url)
expected_out = load_numpy(out_url)
if input_channels == 1:
image = image.convert("L")
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)
pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device="cpu").manual_seed(0)
out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images
max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten())
assert max_diff < 1e-2
def test_stable_diffusion_adapter_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_seg_sd14v1")
pipe = StableDiffusionAdapterPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", adapter=adapter, safety_checker=None
)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motor.png"
)
pipe(prompt="foo", image=image, num_inference_steps=2)
mem_bytes = torch.cuda.max_memory_allocated()
assert mem_bytes < 5 * 10**9
|
diffusers/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py/0
|
{
"file_path": "diffusers/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py",
"repo_id": "diffusers",
"token_count": 18773
}
| 162
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
EulerDiscreteScheduler,
StableDiffusionSAGPipeline,
UNet2DConditionModel,
)
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
IPAdapterTesterMixin,
PipelineFromPipeTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class StableDiffusionSAGPipelineFastTests(
IPAdapterTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
PipelineFromPipeTesterMixin,
unittest.TestCase,
):
pipeline_class = StableDiffusionSAGPipeline
params = TEXT_TO_IMAGE_PARAMS
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(4, 8),
layers_per_block=2,
sample_size=8,
norm_num_groups=1,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=8,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[4, 8],
norm_num_groups=1,
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=8,
num_hidden_layers=2,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "np",
}
return inputs
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@unittest.skip("Not necessary to test here.")
def test_xformers_attention_forwardGenerator_pass(self):
pass
def test_pipeline_different_schedulers(self):
pipeline = self.pipeline_class(**self.get_dummy_components())
inputs = self.get_dummy_inputs("cpu")
expected_image_size = (16, 16, 3)
for scheduler_cls in [DDIMScheduler, DEISMultistepScheduler, DPMSolverMultistepScheduler]:
pipeline.scheduler = scheduler_cls.from_config(pipeline.scheduler.config)
image = pipeline(**inputs).images[0]
shape = image.shape
assert shape == expected_image_size
pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
with self.assertRaises(ValueError):
# Karras schedulers are not supported
image = pipeline(**inputs).images[0]
@nightly
@require_torch_gpu
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_stable_diffusion_1(self):
sag_pipe = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
sag_pipe = sag_pipe.to(torch_device)
sag_pipe.set_progress_bar_config(disable=None)
prompt = "."
generator = torch.manual_seed(0)
output = sag_pipe(
[prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np"
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def test_stable_diffusion_2(self):
sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
sag_pipe = sag_pipe.to(torch_device)
sag_pipe.set_progress_bar_config(disable=None)
prompt = "."
generator = torch.manual_seed(0)
output = sag_pipe(
[prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np"
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def test_stable_diffusion_2_non_square(self):
sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
sag_pipe = sag_pipe.to(torch_device)
sag_pipe.set_progress_bar_config(disable=None)
prompt = "."
generator = torch.manual_seed(0)
output = sag_pipe(
[prompt],
width=768,
height=512,
generator=generator,
guidance_scale=7.5,
sag_scale=1.0,
num_inference_steps=20,
output_type="np",
)
image = output.images
assert image.shape == (1, 512, 768, 3)
|
diffusers/tests/pipelines/stable_diffusion_sag/test_stable_diffusion_sag.py/0
|
{
"file_path": "diffusers/tests/pipelines/stable_diffusion_sag/test_stable_diffusion_sag.py",
"repo_id": "diffusers",
"token_count": 3617
}
| 163
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from huggingface_hub import ModelCard
from diffusers import (
DDPMScheduler,
DiffusionPipeline,
KandinskyV22CombinedPipeline,
KandinskyV22Pipeline,
KandinskyV22PriorPipeline,
)
from diffusers.pipelines.pipeline_utils import CONNECTED_PIPES_KEYS
def state_dicts_almost_equal(sd1, sd2):
sd1 = dict(sorted(sd1.items()))
sd2 = dict(sorted(sd2.items()))
models_are_equal = True
for ten1, ten2 in zip(sd1.values(), sd2.values()):
if (ten1 - ten2).abs().sum() > 1e-3:
models_are_equal = False
return models_are_equal
class CombinedPipelineFastTest(unittest.TestCase):
def modelcard_has_connected_pipeline(self, model_id):
modelcard = ModelCard.load(model_id)
connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS}
connected_pipes = {k: v for k, v in connected_pipes.items() if v is not None}
return len(connected_pipes) > 0
def test_correct_modelcard_format(self):
# hf-internal-testing/tiny-random-kandinsky-v22-prior has no metadata
assert not self.modelcard_has_connected_pipeline("hf-internal-testing/tiny-random-kandinsky-v22-prior")
# see https://huggingface.co/hf-internal-testing/tiny-random-kandinsky-v22-decoder/blob/8baff9897c6be017013e21b5c562e5a381646c7e/README.md?code=true#L2
assert self.modelcard_has_connected_pipeline("hf-internal-testing/tiny-random-kandinsky-v22-decoder")
def test_load_connected_checkpoint_when_specified(self):
pipeline_prior = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-prior")
pipeline_prior_connected = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-random-kandinsky-v22-prior", load_connected_pipeline=True
)
# Passing `load_connected_pipeline` to prior is a no-op as the pipeline has no connected pipeline
assert pipeline_prior.__class__ == pipeline_prior_connected.__class__
pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-decoder")
pipeline_connected = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-random-kandinsky-v22-decoder", load_connected_pipeline=True
)
# Passing `load_connected_pipeline` to decoder loads the combined pipeline
assert pipeline.__class__ != pipeline_connected.__class__
assert pipeline.__class__ == KandinskyV22Pipeline
assert pipeline_connected.__class__ == KandinskyV22CombinedPipeline
# check that loaded components match prior and decoder components
assert set(pipeline_connected.components.keys()) == set(
["prior_" + k for k in pipeline_prior.components.keys()] + list(pipeline.components.keys())
)
def test_load_connected_checkpoint_default(self):
prior = KandinskyV22PriorPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-prior")
decoder = KandinskyV22Pipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-decoder")
# check that combined pipeline loads both prior & decoder because of
# https://huggingface.co/hf-internal-testing/tiny-random-kandinsky-v22-decoder/blob/8baff9897c6be017013e21b5c562e5a381646c7e/README.md?code=true#L3
assert (
KandinskyV22CombinedPipeline._load_connected_pipes
) # combined pipelines will download more checkpoints that just the one specified
pipeline = KandinskyV22CombinedPipeline.from_pretrained(
"hf-internal-testing/tiny-random-kandinsky-v22-decoder"
)
prior_comps = prior.components
decoder_comps = decoder.components
for k, component in pipeline.components.items():
if k.startswith("prior_"):
k = k[6:]
comp = prior_comps[k]
else:
comp = decoder_comps[k]
if isinstance(component, torch.nn.Module):
assert state_dicts_almost_equal(component.state_dict(), comp.state_dict())
elif hasattr(component, "config"):
assert dict(component.config) == dict(comp.config)
else:
assert component.__class__ == comp.__class__
def test_load_connected_checkpoint_with_passed_obj(self):
pipeline = KandinskyV22CombinedPipeline.from_pretrained(
"hf-internal-testing/tiny-random-kandinsky-v22-decoder"
)
prior_scheduler = DDPMScheduler.from_config(pipeline.prior_scheduler.config)
scheduler = DDPMScheduler.from_config(pipeline.scheduler.config)
# make sure we pass a different scheduler and prior_scheduler
assert pipeline.prior_scheduler.__class__ != prior_scheduler.__class__
assert pipeline.scheduler.__class__ != scheduler.__class__
pipeline_new = KandinskyV22CombinedPipeline.from_pretrained(
"hf-internal-testing/tiny-random-kandinsky-v22-decoder",
prior_scheduler=prior_scheduler,
scheduler=scheduler,
)
assert dict(pipeline_new.prior_scheduler.config) == dict(prior_scheduler.config)
assert dict(pipeline_new.scheduler.config) == dict(scheduler.config)
|
diffusers/tests/pipelines/test_pipelines_combined.py/0
|
{
"file_path": "diffusers/tests/pipelines/test_pipelines_combined.py",
"repo_id": "diffusers",
"token_count": 2384
}
| 164
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import DDPMWuerstchenScheduler, WuerstchenDecoderPipeline
from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class WuerstchenDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = WuerstchenDecoderPipeline
params = ["prompt"]
batch_params = ["image_embeddings", "prompt", "negative_prompt"]
required_optional_params = [
"num_images_per_prompt",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
test_xformers_attention = False
callback_cfg_params = ["image_embeddings", "text_encoder_hidden_states"]
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def dummy_tokenizer(self):
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
projection_dim=self.text_embedder_hidden_size,
hidden_size=self.text_embedder_hidden_size,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModel(config).eval()
@property
def dummy_vqgan(self):
torch.manual_seed(0)
model_kwargs = {
"bottleneck_blocks": 1,
"num_vq_embeddings": 2,
}
model = PaellaVQModel(**model_kwargs)
return model.eval()
@property
def dummy_decoder(self):
torch.manual_seed(0)
model_kwargs = {
"c_cond": self.text_embedder_hidden_size,
"c_hidden": [320],
"nhead": [-1],
"blocks": [4],
"level_config": ["CT"],
"clip_embd": self.text_embedder_hidden_size,
"inject_effnet": [False],
}
model = WuerstchenDiffNeXt(**model_kwargs)
return model.eval()
def get_dummy_components(self):
decoder = self.dummy_decoder
text_encoder = self.dummy_text_encoder
tokenizer = self.dummy_tokenizer
vqgan = self.dummy_vqgan
scheduler = DDPMWuerstchenScheduler()
components = {
"decoder": decoder,
"vqgan": vqgan,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"latent_dim_scale": 4.0,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image_embeddings": torch.ones((1, 4, 4, 4), device=device),
"prompt": "horse",
"generator": generator,
"guidance_scale": 1.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_wuerstchen_decoder(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.0000, 0.0000, 0.0089, 1.0000, 1.0000, 0.3927, 1.0000, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(expected_max_diff=1e-5)
@skip_mps
def test_attention_slicing_forward_pass(self):
test_max_difference = torch_device == "cpu"
test_mean_pixel_difference = False
self._test_attention_slicing_forward_pass(
test_max_difference=test_max_difference,
test_mean_pixel_difference=test_mean_pixel_difference,
)
@unittest.skip(reason="bf16 not supported and requires CUDA")
def test_float16_inference(self):
super().test_float16_inference()
|
diffusers/tests/pipelines/wuerstchen/test_wuerstchen_decoder.py/0
|
{
"file_path": "diffusers/tests/pipelines/wuerstchen/test_wuerstchen_decoder.py",
"repo_id": "diffusers",
"token_count": 2630
}
| 165
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils.testing_utils import torch_device
from .test_schedulers import SchedulerCommonTest
class EulerDiscreteSchedulerTest(SchedulerCommonTest):
scheduler_classes = (EulerDiscreteScheduler,)
num_inference_steps = 10
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**kwargs)
return config
def test_timesteps(self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_betas(self):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_timestep_type(self):
timestep_types = ["discrete", "continuous"]
for timestep_type in timestep_types:
self.check_over_configs(timestep_type=timestep_type)
def test_karras_sigmas(self):
self.check_over_configs(use_karras_sigmas=True, sigma_min=0.02, sigma_max=700.0)
def test_rescale_betas_zero_snr(self):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = self.num_inference_steps
scheduler.set_timesteps(num_inference_steps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
return sample
def full_loop_custom_timesteps(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = self.num_inference_steps
scheduler.set_timesteps(num_inference_steps)
timesteps = scheduler.timesteps
# reset the timesteps using `timesteps`
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps=None, timesteps=timesteps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
return sample
def full_loop_custom_sigmas(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps = self.num_inference_steps
scheduler.set_timesteps(num_inference_steps)
sigmas = scheduler.sigmas
# reset the timesteps using `sigmas`
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(num_inference_steps=None, sigmas=sigmas)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
sample = sample.to(torch_device)
for i, t in enumerate(scheduler.timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
return sample
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 10.0807) < 1e-2
assert abs(result_mean.item() - 0.0131) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 0.0002) < 1e-2
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
def test_full_loop_device(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
sample = sample.to(torch_device)
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 10.0807) < 1e-2
assert abs(result_mean.item() - 0.0131) < 1e-3
def test_full_loop_device_karras_sigmas(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True)
scheduler.set_timesteps(self.num_inference_steps, device=torch_device)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
sample = sample.to(torch_device)
for t in scheduler.timesteps:
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 124.52299499511719) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963) < 1e-3
def test_full_loop_with_noise(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.num_inference_steps)
generator = torch.manual_seed(0)
model = self.dummy_model()
sample = self.dummy_sample_deter * scheduler.init_noise_sigma
# add noise
t_start = self.num_inference_steps - 2
noise = self.dummy_noise_deter
noise = noise.to(sample.device)
timesteps = scheduler.timesteps[t_start * scheduler.order :]
sample = scheduler.add_noise(sample, noise, timesteps[:1])
for i, t in enumerate(timesteps):
sample = scheduler.scale_model_input(sample, t)
model_output = model(sample, t)
output = scheduler.step(model_output, t, sample, generator=generator)
sample = output.prev_sample
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 57062.9297) < 1e-2, f" expected result sum 57062.9297, but get {result_sum}"
assert abs(result_mean.item() - 74.3007) < 1e-3, f" expected result mean 74.3007, but get {result_mean}"
def test_custom_timesteps(self):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
for interpolation_type in ["linear", "log_linear"]:
for final_sigmas_type in ["sigma_min", "zero"]:
sample = self.full_loop(
prediction_type=prediction_type,
interpolation_type=interpolation_type,
final_sigmas_type=final_sigmas_type,
)
sample_custom_timesteps = self.full_loop_custom_timesteps(
prediction_type=prediction_type,
interpolation_type=interpolation_type,
final_sigmas_type=final_sigmas_type,
)
assert (
torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5
), f"Scheduler outputs are not identical for prediction_type: {prediction_type}, interpolation_type: {interpolation_type} and final_sigmas_type: {final_sigmas_type}"
def test_custom_sigmas(self):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
for final_sigmas_type in ["sigma_min", "zero"]:
sample = self.full_loop(
prediction_type=prediction_type,
final_sigmas_type=final_sigmas_type,
)
sample_custom_timesteps = self.full_loop_custom_sigmas(
prediction_type=prediction_type,
final_sigmas_type=final_sigmas_type,
)
assert (
torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5
), f"Scheduler outputs are not identical for prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}"
|
diffusers/tests/schedulers/test_scheduler_euler.py/0
|
{
"file_path": "diffusers/tests/schedulers/test_scheduler_euler.py",
"repo_id": "diffusers",
"token_count": 4760
}
| 166
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import os
import tempfile
import unittest
import uuid
from typing import Dict, List, Tuple
import numpy as np
import torch
from huggingface_hub import delete_repo
import diffusers
from diffusers import (
CMStochasticIterativeScheduler,
DDIMScheduler,
DEISMultistepScheduler,
DiffusionPipeline,
EDMEulerScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
IPNDMScheduler,
LMSDiscreteScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import logging
from diffusers.utils.testing_utils import CaptureLogger, torch_device
from ..others.test_utils import TOKEN, USER, is_staging_test
torch.backends.cuda.matmul.allow_tf32 = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class SchedulerObject(SchedulerMixin, ConfigMixin):
config_name = "config.json"
@register_to_config
def __init__(
self,
a=2,
b=5,
c=(2, 5),
d="for diffusion",
e=[1, 3],
):
pass
class SchedulerObject2(SchedulerMixin, ConfigMixin):
config_name = "config.json"
@register_to_config
def __init__(
self,
a=2,
b=5,
c=(2, 5),
d="for diffusion",
f=[1, 3],
):
pass
class SchedulerObject3(SchedulerMixin, ConfigMixin):
config_name = "config.json"
@register_to_config
def __init__(
self,
a=2,
b=5,
c=(2, 5),
d="for diffusion",
e=[1, 3],
f=[1, 3],
):
pass
class SchedulerBaseTests(unittest.TestCase):
def test_save_load_from_different_config(self):
obj = SchedulerObject()
# mock add obj class to `diffusers`
setattr(diffusers, "SchedulerObject", SchedulerObject)
logger = logging.get_logger("diffusers.configuration_utils")
with tempfile.TemporaryDirectory() as tmpdirname:
obj.save_config(tmpdirname)
with CaptureLogger(logger) as cap_logger_1:
config = SchedulerObject2.load_config(tmpdirname)
new_obj_1 = SchedulerObject2.from_config(config)
# now save a config parameter that is not expected
with open(os.path.join(tmpdirname, SchedulerObject.config_name), "r") as f:
data = json.load(f)
data["unexpected"] = True
with open(os.path.join(tmpdirname, SchedulerObject.config_name), "w") as f:
json.dump(data, f)
with CaptureLogger(logger) as cap_logger_2:
config = SchedulerObject.load_config(tmpdirname)
new_obj_2 = SchedulerObject.from_config(config)
with CaptureLogger(logger) as cap_logger_3:
config = SchedulerObject2.load_config(tmpdirname)
new_obj_3 = SchedulerObject2.from_config(config)
assert new_obj_1.__class__ == SchedulerObject2
assert new_obj_2.__class__ == SchedulerObject
assert new_obj_3.__class__ == SchedulerObject2
assert cap_logger_1.out == ""
assert (
cap_logger_2.out
== "The config attributes {'unexpected': True} were passed to SchedulerObject, but are not expected and"
" will"
" be ignored. Please verify your config.json configuration file.\n"
)
assert cap_logger_2.out.replace("SchedulerObject", "SchedulerObject2") == cap_logger_3.out
def test_save_load_compatible_schedulers(self):
SchedulerObject2._compatibles = ["SchedulerObject"]
SchedulerObject._compatibles = ["SchedulerObject2"]
obj = SchedulerObject()
# mock add obj class to `diffusers`
setattr(diffusers, "SchedulerObject", SchedulerObject)
setattr(diffusers, "SchedulerObject2", SchedulerObject2)
logger = logging.get_logger("diffusers.configuration_utils")
with tempfile.TemporaryDirectory() as tmpdirname:
obj.save_config(tmpdirname)
# now save a config parameter that is expected by another class, but not origin class
with open(os.path.join(tmpdirname, SchedulerObject.config_name), "r") as f:
data = json.load(f)
data["f"] = [0, 0]
data["unexpected"] = True
with open(os.path.join(tmpdirname, SchedulerObject.config_name), "w") as f:
json.dump(data, f)
with CaptureLogger(logger) as cap_logger:
config = SchedulerObject.load_config(tmpdirname)
new_obj = SchedulerObject.from_config(config)
assert new_obj.__class__ == SchedulerObject
assert (
cap_logger.out
== "The config attributes {'unexpected': True} were passed to SchedulerObject, but are not expected and"
" will"
" be ignored. Please verify your config.json configuration file.\n"
)
def test_save_load_from_different_config_comp_schedulers(self):
SchedulerObject3._compatibles = ["SchedulerObject", "SchedulerObject2"]
SchedulerObject2._compatibles = ["SchedulerObject", "SchedulerObject3"]
SchedulerObject._compatibles = ["SchedulerObject2", "SchedulerObject3"]
obj = SchedulerObject()
# mock add obj class to `diffusers`
setattr(diffusers, "SchedulerObject", SchedulerObject)
setattr(diffusers, "SchedulerObject2", SchedulerObject2)
setattr(diffusers, "SchedulerObject3", SchedulerObject3)
logger = logging.get_logger("diffusers.configuration_utils")
logger.setLevel(diffusers.logging.INFO)
with tempfile.TemporaryDirectory() as tmpdirname:
obj.save_config(tmpdirname)
with CaptureLogger(logger) as cap_logger_1:
config = SchedulerObject.load_config(tmpdirname)
new_obj_1 = SchedulerObject.from_config(config)
with CaptureLogger(logger) as cap_logger_2:
config = SchedulerObject2.load_config(tmpdirname)
new_obj_2 = SchedulerObject2.from_config(config)
with CaptureLogger(logger) as cap_logger_3:
config = SchedulerObject3.load_config(tmpdirname)
new_obj_3 = SchedulerObject3.from_config(config)
assert new_obj_1.__class__ == SchedulerObject
assert new_obj_2.__class__ == SchedulerObject2
assert new_obj_3.__class__ == SchedulerObject3
assert cap_logger_1.out == ""
assert cap_logger_2.out == "{'f'} was not found in config. Values will be initialized to default values.\n"
assert cap_logger_3.out == "{'f'} was not found in config. Values will be initialized to default values.\n"
def test_default_arguments_not_in_config(self):
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", torch_dtype=torch.float16
)
assert pipe.scheduler.__class__ == DDIMScheduler
# Default for DDIMScheduler
assert pipe.scheduler.config.timestep_spacing == "leading"
# Switch to a different one, verify we use the default for that class
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
assert pipe.scheduler.config.timestep_spacing == "linspace"
# Override with kwargs
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
assert pipe.scheduler.config.timestep_spacing == "trailing"
# Verify overridden kwargs stick
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
assert pipe.scheduler.config.timestep_spacing == "trailing"
# And stick
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
assert pipe.scheduler.config.timestep_spacing == "trailing"
def test_default_solver_type_after_switch(self):
pipe = DiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", torch_dtype=torch.float16
)
assert pipe.scheduler.__class__ == DDIMScheduler
pipe.scheduler = DEISMultistepScheduler.from_config(pipe.scheduler.config)
assert pipe.scheduler.config.solver_type == "logrho"
# Switch to UniPC, verify the solver is the default
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
assert pipe.scheduler.config.solver_type == "bh2"
class SchedulerCommonTest(unittest.TestCase):
scheduler_classes = ()
forward_default_kwargs = ()
@property
def default_num_inference_steps(self):
return 50
@property
def default_timestep(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.get("num_inference_steps", self.default_num_inference_steps)
try:
scheduler_config = self.get_scheduler_config()
scheduler = self.scheduler_classes[0](**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
timestep = scheduler.timesteps[0]
except NotImplementedError:
logger.warning(
f"The scheduler {self.__class__.__name__} does not implement a `get_scheduler_config` method."
f" `default_timestep` will be set to the default value of 1."
)
timestep = 1
return timestep
# NOTE: currently taking the convention that default_timestep > default_timestep_2 (alternatively,
# default_timestep comes earlier in the timestep schedule than default_timestep_2)
@property
def default_timestep_2(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.get("num_inference_steps", self.default_num_inference_steps)
try:
scheduler_config = self.get_scheduler_config()
scheduler = self.scheduler_classes[0](**scheduler_config)
scheduler.set_timesteps(num_inference_steps)
if len(scheduler.timesteps) >= 2:
timestep_2 = scheduler.timesteps[1]
else:
logger.warning(
f"Using num_inference_steps from the scheduler testing class's default config leads to a timestep"
f" scheduler of length {len(scheduler.timesteps)} < 2. The default `default_timestep_2` value of 0"
f" will be used."
)
timestep_2 = 0
except NotImplementedError:
logger.warning(
f"The scheduler {self.__class__.__name__} does not implement a `get_scheduler_config` method."
f" `default_timestep_2` will be set to the default value of 0."
)
timestep_2 = 0
return timestep_2
@property
def dummy_sample(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
sample = torch.rand((batch_size, num_channels, height, width))
return sample
@property
def dummy_noise_deter(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
num_elems = batch_size * num_channels * height * width
sample = torch.arange(num_elems).flip(-1)
sample = sample.reshape(num_channels, height, width, batch_size)
sample = sample / num_elems
sample = sample.permute(3, 0, 1, 2)
return sample
@property
def dummy_sample_deter(self):
batch_size = 4
num_channels = 3
height = 8
width = 8
num_elems = batch_size * num_channels * height * width
sample = torch.arange(num_elems)
sample = sample.reshape(num_channels, height, width, batch_size)
sample = sample / num_elems
sample = sample.permute(3, 0, 1, 2)
return sample
def get_scheduler_config(self):
raise NotImplementedError
def dummy_model(self):
def model(sample, t, *args):
# if t is a tensor, match the number of dimensions of sample
if isinstance(t, torch.Tensor):
num_dims = len(sample.shape)
# pad t with 1s to match num_dims
t = t.reshape(-1, *(1,) * (num_dims - 1)).to(sample.device).to(sample.dtype)
return sample * t / (t + 1)
return model
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
time_step = time_step if time_step is not None else self.default_timestep
for scheduler_class in self.scheduler_classes:
# TODO(Suraj) - delete the following two lines once DDPM, DDIM, and PNDM have timesteps casted to float by default
if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler):
time_step = float(time_step)
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max)
time_step = scaled_sigma_max
if scheduler_class == EDMEulerScheduler:
time_step = scheduler.timesteps[-1]
if scheduler_class == VQDiffusionScheduler:
num_vec_classes = scheduler_config["num_vec_classes"]
sample = self.dummy_sample(num_vec_classes)
model = self.dummy_model(num_vec_classes)
residual = model(sample, time_step)
else:
sample = self.dummy_sample
residual = 0.1 * sample
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
new_scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# Make sure `scale_model_input` is invoked to prevent a warning
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
_ = scheduler.scale_model_input(sample, scaled_sigma_max)
_ = new_scheduler.scale_model_input(sample, scaled_sigma_max)
elif scheduler_class != VQDiffusionScheduler:
_ = scheduler.scale_model_input(sample, scheduler.timesteps[-1])
_ = new_scheduler.scale_model_input(sample, scheduler.timesteps[-1])
# Set the seed before step() as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
kwargs.update(forward_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", None)
time_step = time_step if time_step is not None else self.default_timestep
for scheduler_class in self.scheduler_classes:
if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler):
time_step = float(time_step)
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
if scheduler_class == VQDiffusionScheduler:
num_vec_classes = scheduler_config["num_vec_classes"]
sample = self.dummy_sample(num_vec_classes)
model = self.dummy_model(num_vec_classes)
residual = model(sample, time_step)
else:
sample = self.dummy_sample
residual = 0.1 * sample
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
new_scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_from_save_pretrained(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", self.default_num_inference_steps)
for scheduler_class in self.scheduler_classes:
timestep = self.default_timestep
if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler):
timestep = float(timestep)
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
timestep = scheduler.sigma_to_t(scheduler.config.sigma_max)
if scheduler_class == VQDiffusionScheduler:
num_vec_classes = scheduler_config["num_vec_classes"]
sample = self.dummy_sample(num_vec_classes)
model = self.dummy_model(num_vec_classes)
residual = model(sample, timestep)
else:
sample = self.dummy_sample
residual = 0.1 * sample
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
new_scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
new_output = new_scheduler.step(residual, timestep, sample, **kwargs).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def test_compatibles(self):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
assert all(c is not None for c in scheduler.compatibles)
for comp_scheduler_cls in scheduler.compatibles:
comp_scheduler = comp_scheduler_cls.from_config(scheduler.config)
assert comp_scheduler is not None
new_scheduler = scheduler_class.from_config(comp_scheduler.config)
new_scheduler_config = {k: v for k, v in new_scheduler.config.items() if k in scheduler.config}
scheduler_diff = {k: v for k, v in new_scheduler.config.items() if k not in scheduler.config}
# make sure that configs are essentially identical
assert new_scheduler_config == dict(scheduler.config)
# make sure that only differences are for configs that are not in init
init_keys = inspect.signature(scheduler_class.__init__).parameters.keys()
assert set(scheduler_diff.keys()).intersection(set(init_keys)) == set()
def test_from_pretrained(self):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_pretrained(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
# `_use_default_values` should not exist for just saved & loaded scheduler
scheduler_config = dict(scheduler.config)
del scheduler_config["_use_default_values"]
assert scheduler_config == new_scheduler.config
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", self.default_num_inference_steps)
timestep_0 = self.default_timestep
timestep_1 = self.default_timestep_2
for scheduler_class in self.scheduler_classes:
if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler):
timestep_0 = float(timestep_0)
timestep_1 = float(timestep_1)
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
if scheduler_class == VQDiffusionScheduler:
num_vec_classes = scheduler_config["num_vec_classes"]
sample = self.dummy_sample(num_vec_classes)
model = self.dummy_model(num_vec_classes)
residual = model(sample, timestep_0)
else:
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
output_0 = scheduler.step(residual, timestep_0, sample, **kwargs).prev_sample
output_1 = scheduler.step(residual, timestep_1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_scheduler_outputs_equivalence(self):
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop("num_inference_steps", self.default_num_inference_steps)
timestep = self.default_timestep
if len(self.scheduler_classes) > 0 and self.scheduler_classes[0] == IPNDMScheduler:
timestep = 1
for scheduler_class in self.scheduler_classes:
if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler):
timestep = float(timestep)
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
timestep = scheduler.sigma_to_t(scheduler.config.sigma_max)
if scheduler_class == VQDiffusionScheduler:
num_vec_classes = scheduler_config["num_vec_classes"]
sample = self.dummy_sample(num_vec_classes)
model = self.dummy_model(num_vec_classes)
residual = model(sample, timestep)
else:
sample = self.dummy_sample
residual = 0.1 * sample
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
outputs_dict = scheduler.step(residual, timestep, sample, **kwargs)
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
scheduler.set_timesteps(num_inference_steps)
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
kwargs["num_inference_steps"] = num_inference_steps
# Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
if "generator" in set(inspect.signature(scheduler.step).parameters.keys()):
kwargs["generator"] = torch.manual_seed(0)
outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs)
recursive_check(outputs_tuple, outputs_dict)
def test_scheduler_public_api(self):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
if scheduler_class != VQDiffusionScheduler:
self.assertTrue(
hasattr(scheduler, "init_noise_sigma"),
f"{scheduler_class} does not implement a required attribute `init_noise_sigma`",
)
self.assertTrue(
hasattr(scheduler, "scale_model_input"),
(
f"{scheduler_class} does not implement a required class method `scale_model_input(sample,"
" timestep)`"
),
)
self.assertTrue(
hasattr(scheduler, "step"),
f"{scheduler_class} does not implement a required class method `step(...)`",
)
if scheduler_class != VQDiffusionScheduler:
sample = self.dummy_sample
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max)
scaled_sample = scheduler.scale_model_input(sample, scaled_sigma_max)
elif scheduler_class == EDMEulerScheduler:
scaled_sample = scheduler.scale_model_input(sample, scheduler.timesteps[-1])
else:
scaled_sample = scheduler.scale_model_input(sample, 0.0)
self.assertEqual(sample.shape, scaled_sample.shape)
def test_add_noise_device(self):
for scheduler_class in self.scheduler_classes:
if scheduler_class == IPNDMScheduler:
continue
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(self.default_num_inference_steps)
sample = self.dummy_sample.to(torch_device)
if scheduler_class == CMStochasticIterativeScheduler:
# Get valid timestep based on sigma_max, which should always be in timestep schedule.
scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max)
scaled_sample = scheduler.scale_model_input(sample, scaled_sigma_max)
elif scheduler_class == EDMEulerScheduler:
scaled_sample = scheduler.scale_model_input(sample, scheduler.timesteps[-1])
else:
scaled_sample = scheduler.scale_model_input(sample, 0.0)
self.assertEqual(sample.shape, scaled_sample.shape)
noise = torch.randn_like(scaled_sample).to(torch_device)
t = scheduler.timesteps[5][None]
noised = scheduler.add_noise(scaled_sample, noise, t)
self.assertEqual(noised.shape, scaled_sample.shape)
def test_deprecated_kwargs(self):
for scheduler_class in self.scheduler_classes:
has_kwarg_in_model_class = "kwargs" in inspect.signature(scheduler_class.__init__).parameters
has_deprecated_kwarg = len(scheduler_class._deprecated_kwargs) > 0
if has_kwarg_in_model_class and not has_deprecated_kwarg:
raise ValueError(
f"{scheduler_class} has `**kwargs` in its __init__ method but has not defined any deprecated"
" kwargs under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if"
" there are no deprecated arguments or add the deprecated argument with `_deprecated_kwargs ="
" [<deprecated_argument>]`"
)
if not has_kwarg_in_model_class and has_deprecated_kwarg:
raise ValueError(
f"{scheduler_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated"
" kwargs under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs`"
f" argument to {self.model_class}.__init__ if there are deprecated arguments or remove the"
" deprecated argument from `_deprecated_kwargs = [<deprecated_argument>]`"
)
def test_trained_betas(self):
for scheduler_class in self.scheduler_classes:
if scheduler_class in (VQDiffusionScheduler, CMStochasticIterativeScheduler):
continue
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config, trained_betas=np.array([0.1, 0.3]))
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_pretrained(tmpdirname)
new_scheduler = scheduler_class.from_pretrained(tmpdirname)
assert scheduler.betas.tolist() == new_scheduler.betas.tolist()
def test_getattr_is_correct(self):
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
# save some things to test
scheduler.dummy_attribute = 5
scheduler.register_to_config(test_attribute=5)
logger = logging.get_logger("diffusers.configuration_utils")
# 30 for warning
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
assert hasattr(scheduler, "dummy_attribute")
assert getattr(scheduler, "dummy_attribute") == 5
assert scheduler.dummy_attribute == 5
# no warning should be thrown
assert cap_logger.out == ""
logger = logging.get_logger("diffusers.schedulers.scheduling_utils")
# 30 for warning
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
assert hasattr(scheduler, "save_pretrained")
fn = scheduler.save_pretrained
fn_1 = getattr(scheduler, "save_pretrained")
assert fn == fn_1
# no warning should be thrown
assert cap_logger.out == ""
# warning should be thrown
with self.assertWarns(FutureWarning):
assert scheduler.test_attribute == 5
with self.assertWarns(FutureWarning):
assert getattr(scheduler, "test_attribute") == 5
with self.assertRaises(AttributeError) as error:
scheduler.does_not_exist
assert str(error.exception) == f"'{type(scheduler).__name__}' object has no attribute 'does_not_exist'"
@is_staging_test
class SchedulerPushToHubTester(unittest.TestCase):
identifier = uuid.uuid4()
repo_id = f"test-scheduler-{identifier}"
org_repo_id = f"valid_org/{repo_id}-org"
def test_push_to_hub(self):
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
scheduler.push_to_hub(self.repo_id, token=TOKEN)
scheduler_loaded = DDIMScheduler.from_pretrained(f"{USER}/{self.repo_id}")
assert type(scheduler) == type(scheduler_loaded)
# Reset repo
delete_repo(token=TOKEN, repo_id=self.repo_id)
# Push to hub via save_config
with tempfile.TemporaryDirectory() as tmp_dir:
scheduler.save_config(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN)
scheduler_loaded = DDIMScheduler.from_pretrained(f"{USER}/{self.repo_id}")
assert type(scheduler) == type(scheduler_loaded)
# Reset repo
delete_repo(token=TOKEN, repo_id=self.repo_id)
def test_push_to_hub_in_organization(self):
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
scheduler.push_to_hub(self.org_repo_id, token=TOKEN)
scheduler_loaded = DDIMScheduler.from_pretrained(self.org_repo_id)
assert type(scheduler) == type(scheduler_loaded)
# Reset repo
delete_repo(token=TOKEN, repo_id=self.org_repo_id)
# Push to hub via save_config
with tempfile.TemporaryDirectory() as tmp_dir:
scheduler.save_config(tmp_dir, repo_id=self.org_repo_id, push_to_hub=True, token=TOKEN)
scheduler_loaded = DDIMScheduler.from_pretrained(self.org_repo_id)
assert type(scheduler) == type(scheduler_loaded)
# Reset repo
delete_repo(token=TOKEN, repo_id=self.org_repo_id)
|
diffusers/tests/schedulers/test_schedulers.py/0
|
{
"file_path": "diffusers/tests/schedulers/test_schedulers.py",
"repo_id": "diffusers",
"token_count": 17208
}
| 167
|
import gc
import unittest
import torch
from diffusers import (
DDIMScheduler,
StableDiffusionXLImg2ImgPipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
numpy_cosine_similarity_distance,
require_torch_gpu,
slow,
)
from .single_file_testing_utils import SDXLSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_gpu
class StableDiffusionXLImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin):
pipeline_class = StableDiffusionXLImg2ImgPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors"
repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
original_config = (
"https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
)
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/sketch-mountains-input.png"
)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"image": init_image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class StableDiffusionXLImg2ImgRefinerPipelineSingleFileSlowTests(unittest.TestCase):
pipeline_class = StableDiffusionXLImg2ImgPipeline
ckpt_path = (
"https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/sd_xl_refiner_1.0.safetensors"
)
repo_id = "stabilityai/stable-diffusion-xl-refiner-1.0"
original_config = (
"https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml"
)
def test_single_file_format_inference_is_same_as_pretrained(self):
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/sketch-mountains-input.png"
)
pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.unet.set_default_attn_processor()
pipe.enable_model_cpu_offload()
generator = torch.Generator(device="cpu").manual_seed(0)
image = pipe(
prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np"
).images[0]
pipe_single_file = self.pipeline_class.from_single_file(self.ckpt_path, torch_dtype=torch.float16)
pipe_single_file.scheduler = DDIMScheduler.from_config(pipe_single_file.scheduler.config)
pipe_single_file.unet.set_default_attn_processor()
pipe_single_file.enable_model_cpu_offload()
generator = torch.Generator(device="cpu").manual_seed(0)
image_single_file = pipe_single_file(
prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np"
).images[0]
max_diff = numpy_cosine_similarity_distance(image.flatten(), image_single_file.flatten())
assert max_diff < 5e-4
|
diffusers/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py/0
|
{
"file_path": "diffusers/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py",
"repo_id": "diffusers",
"token_count": 1728
}
| 168
|
# coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import requests
# Configuration
GITHUB_REPO = "huggingface/diffusers"
GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID")
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
PATH_IN_REPO = os.getenv("PATH_IN_REPO")
def main(args):
action_url = f"https://github.com/{GITHUB_REPO}/actions/runs/{GITHUB_RUN_ID}"
if args.status == "success":
hub_path = f"https://huggingface.co/datasets/diffusers/community-pipelines-mirror/tree/main/{PATH_IN_REPO}"
message = (
"✅ Community pipelines successfully mirrored.\n"
f"🕸️ GitHub Action URL: {action_url}.\n"
f"🤗 Hub location: {hub_path}."
)
else:
message = f"❌ Something wrong happened. Check out the GitHub Action to know more: {action_url}."
payload = {"text": message}
response = requests.post(SLACK_WEBHOOK_URL, json=payload)
if response.status_code == 200:
print("Notification sent to Slack successfully.")
else:
print("Failed to send notification to Slack.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--status", type=str, default="success", choices=["success", "failure"])
args = parser.parse_args()
main(args)
|
diffusers/utils/notify_community_pipelines_mirror.py/0
|
{
"file_path": "diffusers/utils/notify_community_pipelines_mirror.py",
"repo_id": "diffusers",
"token_count": 679
}
| 169
|
# @package _global_
# Change the seed to match what PushT eval uses
# (to avoid evaluating on seeds used for generating the training data).
seed: 100000
# Change the dataset repository to the PushT one.
dataset_repo_id: lerobot/pusht
override_dataset_stats:
observation.image:
# stats from imagenet, since we use a pretrained vision model
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
training:
offline_steps: 80000
online_steps: 0
eval_freq: 10000
save_freq: 100000
log_freq: 250
save_model: true
batch_size: 8
lr: 1e-5
lr_backbone: 1e-5
weight_decay: 1e-4
grad_clip_norm: 10
online_steps_between_rollouts: 1
delta_timestamps:
action: "[i / ${fps} for i in range(${policy.chunk_size})]"
eval:
n_episodes: 50
batch_size: 50
# See `configuration_act.py` for more details.
policy:
name: act
# Input / output structure.
n_obs_steps: 1
chunk_size: 100 # chunk_size
n_action_steps: 100
input_shapes:
observation.image: [3, 96, 96]
observation.state: ["${env.state_dim}"]
output_shapes:
action: ["${env.action_dim}"]
# Normalization / Unnormalization
input_normalization_modes:
observation.image: mean_std
# Use min_max normalization just because it's more standard.
observation.state: min_max
output_normalization_modes:
# Use min_max normalization just because it's more standard.
action: min_max
# Architecture.
# Vision backbone.
vision_backbone: resnet18
pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
replace_final_stride_with_dilation: false
# Transformer layers.
pre_norm: false
dim_model: 512
n_heads: 8
dim_feedforward: 3200
feedforward_activation: relu
n_encoder_layers: 4
# Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
# that means only the first layer is used. Here we match the original implementation by setting this to 1.
# See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
n_decoder_layers: 1
# VAE.
use_vae: true
latent_dim: 32
n_vae_encoder_layers: 4
# Inference.
temporal_ensemble_coeff: null
# Training and loss computation.
dropout: 0.1
kl_weight: 10.0
|
lerobot/examples/advanced/1_train_act_pusht/act_pusht.yaml/0
|
{
"file_path": "lerobot/examples/advanced/1_train_act_pusht/act_pusht.yaml",
"repo_id": "lerobot",
"token_count": 848
}
| 170
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Process UMI (Universal Manipulation Interface) data stored in Zarr format like in: https://github.com/real-stanford/universal_manipulation_interface"""
import logging
import shutil
from pathlib import Path
import torch
import tqdm
import zarr
from datasets import Dataset, Features, Image, Sequence, Value
from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub._umi_imagecodecs_numcodecs import register_codecs
from lerobot.common.datasets.push_dataset_to_hub.utils import (
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
def check_format(raw_dir) -> bool:
zarr_path = raw_dir / "cup_in_the_wild.zarr"
zarr_data = zarr.open(zarr_path, mode="r")
required_datasets = {
"data/robot0_demo_end_pose",
"data/robot0_demo_start_pose",
"data/robot0_eef_pos",
"data/robot0_eef_rot_axis_angle",
"data/robot0_gripper_width",
"meta/episode_ends",
"data/camera0_rgb",
}
for dataset in required_datasets:
if dataset not in zarr_data:
return False
# mandatory to access zarr_data
register_codecs()
nb_frames = zarr_data["data/camera0_rgb"].shape[0]
required_datasets.remove("meta/episode_ends")
assert all(nb_frames == zarr_data[dataset].shape[0] for dataset in required_datasets)
def load_from_raw(
raw_dir: Path,
videos_dir: Path,
fps: int,
video: bool,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
zarr_path = raw_dir / "cup_in_the_wild.zarr"
zarr_data = zarr.open(zarr_path, mode="r")
# We process the image data separately because it is too large to fit in memory
end_pose = torch.from_numpy(zarr_data["data/robot0_demo_end_pose"][:])
start_pos = torch.from_numpy(zarr_data["data/robot0_demo_start_pose"][:])
eff_pos = torch.from_numpy(zarr_data["data/robot0_eef_pos"][:])
eff_rot_axis_angle = torch.from_numpy(zarr_data["data/robot0_eef_rot_axis_angle"][:])
gripper_width = torch.from_numpy(zarr_data["data/robot0_gripper_width"][:])
states_pos = torch.cat([eff_pos, eff_rot_axis_angle], dim=1)
states = torch.cat([states_pos, gripper_width], dim=1)
episode_ends = zarr_data["meta/episode_ends"][:]
num_episodes = episode_ends.shape[0]
# We convert it in torch tensor later because the jit function does not support torch tensors
episode_ends = torch.from_numpy(episode_ends)
# load data indices from which each episode starts and ends
from_ids, to_ids = [], []
from_idx = 0
for to_idx in episode_ends:
from_ids.append(from_idx)
to_ids.append(to_idx)
from_idx = to_idx
ep_dicts_dir = videos_dir / "ep_dicts"
ep_dicts_dir.mkdir(exist_ok=True, parents=True)
ep_dicts = []
ep_ids = episodes if episodes else range(num_episodes)
for ep_idx, selected_ep_idx in tqdm.tqdm(enumerate(ep_ids)):
ep_dict_path = ep_dicts_dir / f"{ep_idx}"
if not ep_dict_path.is_file():
from_idx = from_ids[selected_ep_idx]
to_idx = to_ids[selected_ep_idx]
num_frames = to_idx - from_idx
# TODO(rcadene): save temporary images of the episode?
state = states[from_idx:to_idx]
ep_dict = {}
# load 57MB of images in RAM (400x224x224x3 uint8)
imgs_array = zarr_data["data/camera0_rgb"][from_idx:to_idx]
img_key = "observation.image"
if video:
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
video_path = videos_dir / fname
if not video_path.is_file():
# save png images in temporary directory
tmp_imgs_dir = videos_dir / "tmp_images"
save_images_concurrently(imgs_array, tmp_imgs_dir)
# encode images to a mp4 video
encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
# clean temporary images directory
shutil.rmtree(tmp_imgs_dir)
# store the reference to the video frame
ep_dict[img_key] = [
{"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
]
else:
ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
ep_dict["observation.state"] = state
ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64)
ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
ep_dict["episode_data_index_from"] = torch.tensor([from_idx] * num_frames)
ep_dict["episode_data_index_to"] = torch.tensor([from_idx + num_frames] * num_frames)
ep_dict["end_pose"] = end_pose[from_idx:to_idx]
ep_dict["start_pos"] = start_pos[from_idx:to_idx]
ep_dict["gripper_width"] = gripper_width[from_idx:to_idx]
torch.save(ep_dict, ep_dict_path)
else:
ep_dict = torch.load(ep_dict_path)
ep_dicts.append(ep_dict)
data_dict = concatenate_episodes(ep_dicts)
total_frames = data_dict["frame_index"].shape[0]
data_dict["index"] = torch.arange(0, total_frames, 1)
return data_dict
def to_hf_dataset(data_dict, video):
features = {}
if video:
features["observation.image"] = VideoFrame()
else:
features["observation.image"] = Image()
features["observation.state"] = Sequence(
length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
)
features["episode_index"] = Value(dtype="int64", id=None)
features["frame_index"] = Value(dtype="int64", id=None)
features["timestamp"] = Value(dtype="float32", id=None)
features["index"] = Value(dtype="int64", id=None)
features["episode_data_index_from"] = Value(dtype="int64", id=None)
features["episode_data_index_to"] = Value(dtype="int64", id=None)
# `start_pos` and `end_pos` respectively represent the positions of the end-effector
# at the beginning and the end of the episode.
# `gripper_width` indicates the distance between the grippers, and this value is included
# in the state vector, which comprises the concatenation of the end-effector position
# and gripper width.
features["end_pose"] = Sequence(
length=data_dict["end_pose"].shape[1], feature=Value(dtype="float32", id=None)
)
features["start_pos"] = Sequence(
length=data_dict["start_pos"].shape[1], feature=Value(dtype="float32", id=None)
)
features["gripper_width"] = Sequence(
length=data_dict["gripper_width"].shape[1], feature=Value(dtype="float32", id=None)
)
hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
hf_dataset.set_transform(hf_transform_to_torch)
return hf_dataset
def from_raw_to_lerobot_format(
raw_dir: Path,
videos_dir: Path,
fps: int | None = None,
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
# sanity check
check_format(raw_dir)
if fps is None:
# For umi cup in the wild: https://arxiv.org/pdf/2402.10329#table.caption.16
fps = 10
if not video:
logging.warning(
"Generating UMI dataset without `video=True` creates ~150GB on disk and requires ~80GB in RAM."
)
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
hf_dataset = to_hf_dataset(data_dict, video)
episode_data_index = calculate_episode_data_index(hf_dataset)
info = {
"codebase_version": CODEBASE_VERSION,
"fps": fps,
"video": video,
}
if video:
info["encoding"] = get_default_encoding()
return hf_dataset, episode_data_index, info
|
lerobot/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py/0
|
{
"file_path": "lerobot/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py",
"repo_id": "lerobot",
"token_count": 3818
}
| 171
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A protocol that all policies should follow.
This provides a mechanism for type-hinting and isinstance checks without requiring the policies classes
subclass a base class.
The protocol structure, method signatures, and docstrings should be used by developers as a reference for
how to implement new policies.
"""
from typing import Protocol, runtime_checkable
from torch import Tensor
@runtime_checkable
class Policy(Protocol):
"""The required interface for implementing a policy.
We also expect all policies to subclass torch.nn.Module and PyTorchModelHubMixin.
"""
name: str
def __init__(self, cfg, dataset_stats: dict[str, dict[str, Tensor]] | None = None):
"""
Args:
cfg: Policy configuration class instance or None, in which case the default instantiation of the
configuration class is used.
dataset_stats: Dataset statistics to be used for normalization.
"""
def reset(self):
"""To be called whenever the environment is reset.
Does things like clearing caches.
"""
def forward(self, batch: dict[str, Tensor]) -> dict:
"""Run the batch through the model and compute the loss for training or validation.
Returns a dictionary with "loss" and potentially other information. Apart from "loss" which is a Tensor, all
other items should be logging-friendly, native Python types.
"""
def select_action(self, batch: dict[str, Tensor]) -> Tensor:
"""Return one action to run in the environment (potentially in batch mode).
When the model uses a history of observations, or outputs a sequence of actions, this method deals
with caching.
"""
@runtime_checkable
class PolicyWithUpdate(Policy, Protocol):
def update(self):
"""An update method that is to be called after a training optimization step.
Implements an additional updates the model parameters may need (for example, doing an EMA step for a
target model, or incrementing an internal buffer).
"""
|
lerobot/lerobot/common/policies/policy_protocol.py/0
|
{
"file_path": "lerobot/lerobot/common/policies/policy_protocol.py",
"repo_id": "lerobot",
"token_count": 818
}
| 172
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
def is_package_available(pkg_name: str, return_version: bool = False) -> tuple[bool, str] | bool:
"""Copied from https://github.com/huggingface/transformers/blob/main/src/transformers/utils/import_utils.py
Check if the package spec exists and grab its version to avoid importing a local directory.
**Note:** this doesn't work for all packages.
"""
package_exists = importlib.util.find_spec(pkg_name) is not None
package_version = "N/A"
if package_exists:
try:
# Primary method to get the package version
package_version = importlib.metadata.version(pkg_name)
except importlib.metadata.PackageNotFoundError:
# Fallback method: Only for "torch" and versions containing "dev"
if pkg_name == "torch":
try:
package = importlib.import_module(pkg_name)
temp_version = getattr(package, "__version__", "N/A")
# Check if the version contains "dev"
if "dev" in temp_version:
package_version = temp_version
package_exists = True
else:
package_exists = False
except ImportError:
# If the package can't be imported, it's not available
package_exists = False
else:
# For packages other than "torch", don't attempt the fallback and set as not available
package_exists = False
logging.debug(f"Detected {pkg_name} version: {package_version}")
if return_version:
return package_exists, package_version
else:
return package_exists
_torch_available, _torch_version = is_package_available("torch", return_version=True)
_gym_xarm_available = is_package_available("gym_xarm")
_gym_aloha_available = is_package_available("gym_aloha")
_gym_pusht_available = is_package_available("gym_pusht")
|
lerobot/lerobot/common/utils/import_utils.py/0
|
{
"file_path": "lerobot/lerobot/common/utils/import_utils.py",
"repo_id": "lerobot",
"token_count": 1045
}
| 173
|
# @package _global_
# Train with:
#
# python lerobot/scripts/train.py \
# env=pusht \
# env.gym.obs_type=environment_state_agent_pos \
# policy=tdmpc_pusht_keypoints \
# eval.batch_size=50 \
# eval.n_episodes=50 \
# eval.use_async_envs=true \
# device=cuda \
# use_amp=true
seed: 1
dataset_repo_id: lerobot/pusht_keypoints
training:
offline_steps: 0
# Offline training dataloader
num_workers: 4
batch_size: 256
grad_clip_norm: 10.0
lr: 3e-4
eval_freq: 10000
log_freq: 500
save_freq: 50000
online_steps: 1000000
online_rollout_n_episodes: 10
online_rollout_batch_size: 10
online_steps_between_rollouts: 1000
online_sampling_ratio: 1.0
online_env_seed: 10000
online_buffer_capacity: 40000
online_buffer_seed_size: 0
do_online_rollout_async: false
delta_timestamps:
observation.environment_state: "[i / ${fps} for i in range(${policy.horizon} + 1)]"
observation.state: "[i / ${fps} for i in range(${policy.horizon} + 1)]"
action: "[i / ${fps} for i in range(${policy.horizon})]"
next.reward: "[i / ${fps} for i in range(${policy.horizon})]"
policy:
name: tdmpc
pretrained_model_path:
# Input / output structure.
n_action_repeats: 1
horizon: 5
n_action_steps: 5
input_shapes:
# TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
observation.environment_state: [16]
observation.state: ["${env.state_dim}"]
output_shapes:
action: ["${env.action_dim}"]
# Normalization / Unnormalization
input_normalization_modes:
observation.environment_state: min_max
observation.state: min_max
output_normalization_modes:
action: min_max
# Architecture / modeling.
# Neural networks.
image_encoder_hidden_dim: 32
state_encoder_hidden_dim: 256
latent_dim: 50
q_ensemble_size: 5
mlp_dim: 512
# Reinforcement learning.
discount: 0.98
# Inference.
use_mpc: true
cem_iterations: 6
max_std: 2.0
min_std: 0.05
n_gaussian_samples: 512
n_pi_samples: 51
uncertainty_regularizer_coeff: 1.0
n_elites: 50
elite_weighting_temperature: 0.5
gaussian_mean_momentum: 0.1
# Training and loss computation.
max_random_shift_ratio: 0.0476
# Loss coefficients.
reward_coeff: 0.5
expectile_weight: 0.9
value_coeff: 0.1
consistency_coeff: 20.0
advantage_scaling: 3.0
pi_coeff: 0.5
temporal_decay_coeff: 0.5
# Target model.
target_model_momentum: 0.995
|
lerobot/lerobot/configs/policy/tdmpc_pusht_keypoints.yaml/0
|
{
"file_path": "lerobot/lerobot/configs/policy/tdmpc_pusht_keypoints.yaml",
"repo_id": "lerobot",
"token_count": 971
}
| 174
|
version https://git-lfs.github.com/spec/v1
oid sha256:5c96f47b569b7af82e05200213d733626664150aa7c5ae3298fd04a2138a2023
size 4344
|
lerobot/tests/data/lerobot/aloha_mobile_elevator/meta_data/stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_mobile_elevator/meta_data/stats.safetensors",
"repo_id": "lerobot",
"token_count": 64
}
| 175
|
version https://git-lfs.github.com/spec/v1
oid sha256:fbdd33bb556b796a1c09d62b00379af1b8769c1c1e1b2e84cb4f3942d45522df
size 136
|
lerobot/tests/data/lerobot/aloha_static_candy/meta_data/episode_data_index.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_static_candy/meta_data/episode_data_index.safetensors",
"repo_id": "lerobot",
"token_count": 66
}
| 176
|
version https://git-lfs.github.com/spec/v1
oid sha256:9080d7e7dd70914305aa93e6933e9754b0ee4fd06411507906f91a2c4337f2c3
size 4208
|
lerobot/tests/data/lerobot/aloha_static_cups_open/meta_data/stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_static_cups_open/meta_data/stats.safetensors",
"repo_id": "lerobot",
"token_count": 67
}
| 177
|
version https://git-lfs.github.com/spec/v1
oid sha256:b8646242adb25c8986e2200a57c19de7fe126af12f4f3593ff53d31e9e3d6339
size 4752
|
lerobot/tests/data/lerobot/aloha_static_vinh_cup/meta_data/stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/aloha_static_vinh_cup/meta_data/stats.safetensors",
"repo_id": "lerobot",
"token_count": 65
}
| 178
|
version https://git-lfs.github.com/spec/v1
oid sha256:0e86b01d7cd15b629eb4645b1f41d5eff2577d0570d423d11e9a97bc0c7c27a6
size 3848
|
lerobot/tests/data/lerobot/unitreeh1_rearrange_objects/meta_data/stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/unitreeh1_rearrange_objects/meta_data/stats.safetensors",
"repo_id": "lerobot",
"token_count": 69
}
| 179
|
version https://git-lfs.github.com/spec/v1
oid sha256:95e0985c9180aee035813b801b3f92746fbbc65fb015826acd265b75c68c3049
size 3848
|
lerobot/tests/data/lerobot/unitreeh1_warehouse/meta_data/stats.safetensors/0
|
{
"file_path": "lerobot/tests/data/lerobot/unitreeh1_warehouse/meta_data/stats.safetensors",
"repo_id": "lerobot",
"token_count": 65
}
| 180
|
version https://git-lfs.github.com/spec/v1
oid sha256:c3dcff0a705ebfdaf11b7f49ad85b464eff03477ace3d63ce45d6a3a10b429d5
size 111338
|
lerobot/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_0.safetensors/0
|
{
"file_path": "lerobot/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_0.safetensors",
"repo_id": "lerobot",
"token_count": 65
}
| 181
|
version https://git-lfs.github.com/spec/v1
oid sha256:e39afdf1f3db8a72a1095a5a0ffdb7e67f478a28bd73e59cda197687da8d236c
size 68
|
lerobot/tests/data/save_policy_to_safetensors/aloha_act/output_dict.safetensors/0
|
{
"file_path": "lerobot/tests/data/save_policy_to_safetensors/aloha_act/output_dict.safetensors",
"repo_id": "lerobot",
"token_count": 66
}
| 182
|
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from copy import deepcopy
from itertools import chain
from pathlib import Path
import einops
import pytest
import torch
from datasets import Dataset
from huggingface_hub import HfApi
from safetensors.torch import load_file
import lerobot
from lerobot.common.datasets.compute_stats import (
aggregate_stats,
compute_stats,
get_stats_einops_patterns,
)
from lerobot.common.datasets.factory import make_dataset
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, MultiLeRobotDataset
from lerobot.common.datasets.utils import (
create_branch,
flatten_dict,
hf_transform_to_torch,
load_previous_and_future_frames,
unflatten_dict,
)
from lerobot.common.utils.utils import init_hydra_config, seeded_context
from tests.utils import DEFAULT_CONFIG_PATH, DEVICE
@pytest.mark.parametrize(
"env_name, repo_id, policy_name",
lerobot.env_dataset_policy_triplets
+ [("aloha", ["lerobot/aloha_sim_insertion_human", "lerobot/aloha_sim_transfer_cube_human"], "act")],
)
def test_factory(env_name, repo_id, policy_name):
"""
Tests that:
- we can create a dataset with the factory.
- for a commonly used set of data keys, the data dimensions are correct.
"""
cfg = init_hydra_config(
DEFAULT_CONFIG_PATH,
overrides=[
f"env={env_name}",
f"dataset_repo_id={repo_id}",
f"policy={policy_name}",
f"device={DEVICE}",
],
)
dataset = make_dataset(cfg)
delta_timestamps = dataset.delta_timestamps
camera_keys = dataset.camera_keys
item = dataset[0]
keys_ndim_required = [
("action", 1, True),
("episode_index", 0, True),
("frame_index", 0, True),
("timestamp", 0, True),
# TODO(rcadene): should we rename it agent_pos?
("observation.state", 1, True),
("next.reward", 0, False),
("next.done", 0, False),
]
# test number of dimensions
for key, ndim, required in keys_ndim_required:
if key not in item:
if required:
assert key in item, f"{key}"
else:
logging.warning(f'Missing key in dataset: "{key}" not in {dataset}.')
continue
if delta_timestamps is not None and key in delta_timestamps:
assert item[key].ndim == ndim + 1, f"{key}"
assert item[key].shape[0] == len(delta_timestamps[key]), f"{key}"
else:
assert item[key].ndim == ndim, f"{key}"
if key in camera_keys:
assert item[key].dtype == torch.float32, f"{key}"
# TODO(rcadene): we assume for now that image normalization takes place in the model
assert item[key].max() <= 1.0, f"{key}"
assert item[key].min() >= 0.0, f"{key}"
if delta_timestamps is not None and key in delta_timestamps:
# test t,c,h,w
assert item[key].shape[1] == 3, f"{key}"
else:
# test c,h,w
assert item[key].shape[0] == 3, f"{key}"
if delta_timestamps is not None:
# test missing keys in delta_timestamps
for key in delta_timestamps:
assert key in item, f"{key}"
# TODO(alexander-soare): If you're hunting for savings on testing time, this takes about 5 seconds.
def test_multilerobotdataset_frames():
"""Check that all dataset frames are incorporated."""
# Note: use the image variants of the dataset to make the test approx 3x faster.
# Note: We really do need three repo_ids here as at some point this caught an issue with the chaining
# logic that wouldn't be caught with two repo IDs.
repo_ids = [
"lerobot/aloha_sim_insertion_human_image",
"lerobot/aloha_sim_transfer_cube_human_image",
"lerobot/aloha_sim_insertion_scripted_image",
]
sub_datasets = [LeRobotDataset(repo_id) for repo_id in repo_ids]
dataset = MultiLeRobotDataset(repo_ids)
assert len(dataset) == sum(len(d) for d in sub_datasets)
assert dataset.num_samples == sum(d.num_samples for d in sub_datasets)
assert dataset.num_episodes == sum(d.num_episodes for d in sub_datasets)
# Run through all items of the LeRobotDatasets in parallel with the items of the MultiLerobotDataset and
# check they match.
expected_dataset_indices = []
for i, sub_dataset in enumerate(sub_datasets):
expected_dataset_indices.extend([i] * len(sub_dataset))
for expected_dataset_index, sub_dataset_item, dataset_item in zip(
expected_dataset_indices, chain(*sub_datasets), dataset, strict=True
):
dataset_index = dataset_item.pop("dataset_index")
assert dataset_index == expected_dataset_index
assert sub_dataset_item.keys() == dataset_item.keys()
for k in sub_dataset_item:
assert torch.equal(sub_dataset_item[k], dataset_item[k])
def test_compute_stats_on_xarm():
"""Check that the statistics are computed correctly according to the stats_patterns property.
We compare with taking a straight min, mean, max, std of all the data in one pass (which we can do
because we are working with a small dataset).
"""
dataset = LeRobotDataset("lerobot/xarm_lift_medium")
# reduce size of dataset sample on which stats compute is tested to 10 frames
dataset.hf_dataset = dataset.hf_dataset.select(range(10))
# Note: we set the batch size to be smaller than the whole dataset to make sure we are testing batched
# computation of the statistics. While doing this, we also make sure it works when we don't divide the
# dataset into even batches.
computed_stats = compute_stats(dataset, batch_size=int(len(dataset) * 0.25), num_workers=0)
# get einops patterns to aggregate batches and compute statistics
stats_patterns = get_stats_einops_patterns(dataset)
# get all frames from the dataset in the same dtype and range as during compute_stats
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=0,
batch_size=len(dataset),
shuffle=False,
)
full_batch = next(iter(dataloader))
# compute stats based on all frames from the dataset without any batching
expected_stats = {}
for k, pattern in stats_patterns.items():
full_batch[k] = full_batch[k].float()
expected_stats[k] = {}
expected_stats[k]["mean"] = einops.reduce(full_batch[k], pattern, "mean")
expected_stats[k]["std"] = torch.sqrt(
einops.reduce((full_batch[k] - expected_stats[k]["mean"]) ** 2, pattern, "mean")
)
expected_stats[k]["min"] = einops.reduce(full_batch[k], pattern, "min")
expected_stats[k]["max"] = einops.reduce(full_batch[k], pattern, "max")
# test computed stats match expected stats
for k in stats_patterns:
assert torch.allclose(computed_stats[k]["mean"], expected_stats[k]["mean"])
assert torch.allclose(computed_stats[k]["std"], expected_stats[k]["std"])
assert torch.allclose(computed_stats[k]["min"], expected_stats[k]["min"])
assert torch.allclose(computed_stats[k]["max"], expected_stats[k]["max"])
# load stats used during training which are expected to match the ones returned by computed_stats
loaded_stats = dataset.stats # noqa: F841
# TODO(rcadene): we can't test this because expected_stats is computed on a subset
# # test loaded stats match expected stats
# for k in stats_patterns:
# assert torch.allclose(loaded_stats[k]["mean"], expected_stats[k]["mean"])
# assert torch.allclose(loaded_stats[k]["std"], expected_stats[k]["std"])
# assert torch.allclose(loaded_stats[k]["min"], expected_stats[k]["min"])
# assert torch.allclose(loaded_stats[k]["max"], expected_stats[k]["max"])
def test_load_previous_and_future_frames_within_tolerance():
hf_dataset = Dataset.from_dict(
{
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5],
"index": [0, 1, 2, 3, 4],
"episode_index": [0, 0, 0, 0, 0],
}
)
hf_dataset.set_transform(hf_transform_to_torch)
episode_data_index = {
"from": torch.tensor([0]),
"to": torch.tensor([5]),
}
delta_timestamps = {"index": [-0.2, 0, 0.139]}
tol = 0.04
item = hf_dataset[2]
item = load_previous_and_future_frames(item, hf_dataset, episode_data_index, delta_timestamps, tol)
data, is_pad = item["index"], item["index_is_pad"]
assert torch.equal(data, torch.tensor([0, 2, 3])), "Data does not match expected values"
assert not is_pad.any(), "Unexpected padding detected"
def test_load_previous_and_future_frames_outside_tolerance_inside_episode_range():
hf_dataset = Dataset.from_dict(
{
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5],
"index": [0, 1, 2, 3, 4],
"episode_index": [0, 0, 0, 0, 0],
}
)
hf_dataset.set_transform(hf_transform_to_torch)
episode_data_index = {
"from": torch.tensor([0]),
"to": torch.tensor([5]),
}
delta_timestamps = {"index": [-0.2, 0, 0.141]}
tol = 0.04
item = hf_dataset[2]
with pytest.raises(AssertionError):
load_previous_and_future_frames(item, hf_dataset, episode_data_index, delta_timestamps, tol)
def test_load_previous_and_future_frames_outside_tolerance_outside_episode_range():
hf_dataset = Dataset.from_dict(
{
"timestamp": [0.1, 0.2, 0.3, 0.4, 0.5],
"index": [0, 1, 2, 3, 4],
"episode_index": [0, 0, 0, 0, 0],
}
)
hf_dataset.set_transform(hf_transform_to_torch)
episode_data_index = {
"from": torch.tensor([0]),
"to": torch.tensor([5]),
}
delta_timestamps = {"index": [-0.3, -0.24, 0, 0.26, 0.3]}
tol = 0.04
item = hf_dataset[2]
item = load_previous_and_future_frames(item, hf_dataset, episode_data_index, delta_timestamps, tol)
data, is_pad = item["index"], item["index_is_pad"]
assert torch.equal(data, torch.tensor([0, 0, 2, 4, 4])), "Data does not match expected values"
assert torch.equal(
is_pad, torch.tensor([True, False, False, True, True])
), "Padding does not match expected values"
def test_flatten_unflatten_dict():
d = {
"obs": {
"min": 0,
"max": 1,
"mean": 2,
"std": 3,
},
"action": {
"min": 4,
"max": 5,
"mean": 6,
"std": 7,
},
}
original_d = deepcopy(d)
d = unflatten_dict(flatten_dict(d))
# test equality between nested dicts
assert json.dumps(original_d, sort_keys=True) == json.dumps(d, sort_keys=True), f"{original_d} != {d}"
@pytest.mark.parametrize(
"repo_id",
[
"lerobot/pusht",
"lerobot/aloha_sim_insertion_human",
"lerobot/xarm_lift_medium",
# (michel-aractingi) commenting the two datasets from openx as test is failing
# "lerobot/nyu_franka_play_dataset",
# "lerobot/cmu_stretch",
],
)
def test_backward_compatibility(repo_id):
"""The artifacts for this test have been generated by `tests/scripts/save_dataset_to_safetensors.py`."""
dataset = LeRobotDataset(
repo_id,
)
test_dir = Path("tests/data/save_dataset_to_safetensors") / repo_id
def load_and_compare(i):
new_frame = dataset[i] # noqa: B023
old_frame = load_file(test_dir / f"frame_{i}.safetensors") # noqa: B023
# ignore language instructions (if exists) in language conditioned datasets
# TODO (michel-aractingi): transform language obs to langauge embeddings via tokenizer
new_frame.pop("language_instruction", None)
old_frame.pop("language_instruction", None)
new_keys = set(new_frame.keys())
old_keys = set(old_frame.keys())
assert new_keys == old_keys, f"{new_keys=} and {old_keys=} are not the same"
for key in new_frame:
assert torch.isclose(
new_frame[key], old_frame[key]
).all(), f"{key=} for index={i} does not contain the same value"
# test2 first frames of first episode
i = dataset.episode_data_index["from"][0].item()
load_and_compare(i)
load_and_compare(i + 1)
# test 2 frames at the middle of first episode
i = int((dataset.episode_data_index["to"][0].item() - dataset.episode_data_index["from"][0].item()) / 2)
load_and_compare(i)
load_and_compare(i + 1)
# test 2 last frames of first episode
i = dataset.episode_data_index["to"][0].item()
load_and_compare(i - 2)
load_and_compare(i - 1)
# TODO(rcadene): Enable testing on second and last episode
# We currently cant because our test dataset only contains the first episode
# # test 2 first frames of second episode
# i = dataset.episode_data_index["from"][1].item()
# load_and_compare(i)
# load_and_compare(i + 1)
# # test 2 last frames of second episode
# i = dataset.episode_data_index["to"][1].item()
# load_and_compare(i - 2)
# load_and_compare(i - 1)
# # test 2 last frames of last episode
# i = dataset.episode_data_index["to"][-1].item()
# load_and_compare(i - 2)
# load_and_compare(i - 1)
def test_aggregate_stats():
"""Makes 3 basic datasets and checks that aggregate stats are computed correctly."""
with seeded_context(0):
data_a = torch.rand(30, dtype=torch.float32)
data_b = torch.rand(20, dtype=torch.float32)
data_c = torch.rand(20, dtype=torch.float32)
hf_dataset_1 = Dataset.from_dict(
{"a": data_a[:10], "b": data_b[:10], "c": data_c[:10], "index": torch.arange(10)}
)
hf_dataset_1.set_transform(hf_transform_to_torch)
hf_dataset_2 = Dataset.from_dict({"a": data_a[10:20], "b": data_b[10:], "index": torch.arange(10)})
hf_dataset_2.set_transform(hf_transform_to_torch)
hf_dataset_3 = Dataset.from_dict({"a": data_a[20:], "c": data_c[10:], "index": torch.arange(10)})
hf_dataset_3.set_transform(hf_transform_to_torch)
dataset_1 = LeRobotDataset.from_preloaded("d1", hf_dataset=hf_dataset_1)
dataset_1.stats = compute_stats(dataset_1, batch_size=len(hf_dataset_1), num_workers=0)
dataset_2 = LeRobotDataset.from_preloaded("d2", hf_dataset=hf_dataset_2)
dataset_2.stats = compute_stats(dataset_2, batch_size=len(hf_dataset_2), num_workers=0)
dataset_3 = LeRobotDataset.from_preloaded("d3", hf_dataset=hf_dataset_3)
dataset_3.stats = compute_stats(dataset_3, batch_size=len(hf_dataset_3), num_workers=0)
stats = aggregate_stats([dataset_1, dataset_2, dataset_3])
for data_key, data in zip(["a", "b", "c"], [data_a, data_b, data_c], strict=True):
for agg_fn in ["mean", "min", "max"]:
assert torch.allclose(stats[data_key][agg_fn], einops.reduce(data, "n -> 1", agg_fn))
assert torch.allclose(stats[data_key]["std"], torch.std(data, correction=0))
@pytest.mark.skip("Requires internet access")
def test_create_branch():
api = HfApi()
repo_id = "cadene/test_create_branch"
repo_type = "dataset"
branch = "test"
ref = f"refs/heads/{branch}"
# Prepare a repo with a test branch
api.delete_repo(repo_id, repo_type=repo_type, missing_ok=True)
api.create_repo(repo_id, repo_type=repo_type)
create_branch(repo_id, repo_type=repo_type, branch=branch)
# Make sure the test branch exists
branches = api.list_repo_refs(repo_id, repo_type=repo_type).branches
refs = [branch.ref for branch in branches]
assert ref in refs
# Overwrite it
create_branch(repo_id, repo_type=repo_type, branch=branch)
# Clean
api.delete_repo(repo_id, repo_type=repo_type)
|
lerobot/tests/test_datasets.py/0
|
{
"file_path": "lerobot/tests/test_datasets.py",
"repo_id": "lerobot",
"token_count": 7009
}
| 183
|
from parler_tts import ParlerTTSForCausalLM, ParlerTTSForConditionalGeneration, ParlerTTSDecoderConfig
from transformers import AutoConfig
import os
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("save_directory", type=str, help="Directory where to save the model and the decoder.")
parser.add_argument("--text_model", type=str, help="Repository id or path to the text encoder.")
parser.add_argument("--audio_model", type=str, help="Repository id or path to the audio encoder.")
args = parser.parse_args()
text_model = args.text_model
encodec_version = args.audio_model
t5 = AutoConfig.from_pretrained(text_model)
encodec = AutoConfig.from_pretrained(encodec_version)
encodec_vocab_size = encodec.codebook_size
num_codebooks = encodec.num_codebooks
print("num_codebooks", num_codebooks)
decoder_config = ParlerTTSDecoderConfig(
vocab_size=encodec_vocab_size + 64, # + 64 instead of +1 to have a multiple of 64
max_position_embeddings=4096, # 30 s = 2580
num_hidden_layers=30,
ffn_dim=6144,
num_attention_heads=24,
num_key_value_heads=24,
layerdrop=0.0,
use_cache=True,
activation_function="gelu",
hidden_size=1536,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
pad_token_id=encodec_vocab_size,
eos_token_id=encodec_vocab_size,
bos_token_id=encodec_vocab_size + 1,
num_codebooks=num_codebooks,
)
decoder = ParlerTTSForCausalLM(decoder_config)
decoder.save_pretrained(os.path.join(args.save_directory, "decoder"))
model = ParlerTTSForConditionalGeneration.from_sub_models_pretrained(
text_encoder_pretrained_model_name_or_path=text_model,
audio_encoder_pretrained_model_name_or_path=encodec_version,
decoder_pretrained_model_name_or_path=os.path.join(args.save_directory, "decoder"),
vocab_size=t5.vocab_size,
)
# set the appropriate bos/pad token ids
model.generation_config.decoder_start_token_id = encodec_vocab_size + 1
model.generation_config.pad_token_id = encodec_vocab_size
model.generation_config.eos_token_id = encodec_vocab_size
# set other default generation config params
model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate)
model.generation_config.do_sample = True # True
model.config.pad_token_id = encodec_vocab_size
model.config.decoder_start_token_id = encodec_vocab_size + 1
model.save_pretrained(os.path.join(args.save_directory, "parler-tts-untrained-larger/"))
|
parler-tts/helpers/model_init_scripts/init_large_model.py/0
|
{
"file_path": "parler-tts/helpers/model_init_scripts/init_large_model.py",
"repo_id": "parler-tts",
"token_count": 1090
}
| 184
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
_deps = [
"transformers>=4.43.0,<=4.43.3",
"torch",
"sentencepiece",
"descript-audio-codec",
]
_extras_dev_deps = [
"black~=23.1",
"isort>=5.5.4",
"ruff>=0.0.241,<=0.0.259",
]
_extras_training_deps = [
"jiwer",
"wandb",
"accelerate",
"evaluate",
"datasets[audio]>=2.14.5",
]
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
# read version
with open(os.path.join(here, "parler_tts", "__init__.py"), encoding="utf-8") as f:
for line in f:
if line.startswith("__version__"):
version = line.split("=")[1].strip().strip('"')
break
else:
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name="parler_tts",
version=version,
description="Toolkit for using and training Parler-TTS, a high-quality text-to-speech model.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=_deps,
extras_require={
"dev": _extras_dev_deps,
"train": _extras_training_deps,
},
)
|
parler-tts/setup.py/0
|
{
"file_path": "parler-tts/setup.py",
"repo_id": "parler-tts",
"token_count": 722
}
| 185
|
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# DeepSpeed
[DeepSpeed](https://www.deepspeed.ai/) is a library designed for speed and scale for distributed training of large models with billions of parameters. At its core is the Zero Redundancy Optimizer (ZeRO) that shards optimizer states (ZeRO-1), gradients (ZeRO-2), and parameters (ZeRO-3) across data parallel processes. This drastically reduces memory usage, allowing you to scale your training to billion parameter models. To unlock even more memory efficiency, ZeRO-Offload reduces GPU compute and memory by leveraging CPU resources during optimization.
Both of these features are supported in 🤗 Accelerate, and you can use them with 🤗 PEFT.
## Compatibility with `bitsandbytes` quantization + LoRA
Below is a table that summarizes the compatibility between PEFT's LoRA, [`bitsandbytes`](https://github.com/TimDettmers/bitsandbytes) library and DeepSpeed Zero stages with respect to fine-tuning. DeepSpeed Zero-1 and 2 will have no effect at inference as stage 1 shards the optimizer states and stage 2 shards the optimizer states and gradients:
| DeepSpeed stage | Is compatible? |
|---|---|
| Zero-1 | 🟢 |
| Zero-2 | 🟢 |
| Zero-3 | 🟢 |
For DeepSpeed Stage 3 + QLoRA, please refer to the section [Use PEFT QLoRA and DeepSpeed with ZeRO3 for finetuning large models on multiple GPUs](#use-peft-qlora-and-deepspeed-with-zero3-for-finetuning-large-models-on-multiple-gpus) below.
For confirming these observations, we ran the SFT (Supervised Fine-tuning) [offical example scripts](https://github.com/huggingface/trl/tree/main/examples) of the [Transformers Reinforcement Learning (TRL) library](https://github.com/huggingface/trl) using QLoRA + PEFT and the accelerate configs available [here](https://github.com/huggingface/trl/tree/main/examples/accelerate_configs). We ran these experiments on a 2x NVIDIA T4 GPU.
# Use PEFT and DeepSpeed with ZeRO3 for finetuning large models on multiple devices and multiple nodes
This section of guide will help you learn how to use our DeepSpeed [training script](https://github.com/huggingface/peft/blob/main/examples/sft/train.py) for performing SFT. You'll configure the script to do SFT (supervised fine-tuning) of Llama-70B model with LoRA and ZeRO-3 on 8xH100 80GB GPUs on a single machine. You can configure it to scale to multiple machines by changing the accelerate config.
## Configuration
Start by running the following command to [create a DeepSpeed configuration file](https://huggingface.co/docs/accelerate/quicktour#launching-your-distributed-script) with 🤗 Accelerate. The `--config_file` flag allows you to save the configuration file to a specific location, otherwise it is saved as a `default_config.yaml` file in the 🤗 Accelerate cache.
The configuration file is used to set the default options when you launch the training script.
```bash
accelerate config --config_file deepspeed_config.yaml
```
You'll be asked a few questions about your setup, and configure the following arguments. In this example, you'll use ZeRO-3 so make sure you pick those options.
```bash
`zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning
`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them. Pass the same value as you would pass via cmd argument else you will encounter mismatch error.
`gradient_clipping`: Enable gradient clipping with value. Don't set this as you will be passing it via cmd arguments.
`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2. Set this as `none` as don't want to enable offloading.
`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3. Set this as `none` as don't want to enable offloading.
`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3. Set this to `True`.
`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3. Set this to `True`.
`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. Set this to `True`.
```
Once this is done, the corresponding config should look like below and you can find it in config folder at [deepspeed_config.yaml](https://github.com/huggingface/peft/blob/main/examples/sft/configs/deepspeed_config.yaml):
```yml
compute_environment: LOCAL_MACHINE
debug: false
deepspeed_config:
deepspeed_multinode_launcher: standard
gradient_accumulation_steps: 4
offload_optimizer_device: none
offload_param_device: none
zero3_init_flag: true
zero3_save_16bit_model: true
zero_stage: 3
distributed_type: DEEPSPEED
downcast_bf16: 'no'
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 8
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
```
## Launch command
The launch command is available at [run_peft_deepspeed.sh](https://github.com/huggingface/peft/blob/main/examples/sft/run_peft_deepspeed.sh) and it is also shown below:
```bash
accelerate launch --config_file "configs/deepspeed_config.yaml" train.py \
--seed 100 \
--model_name_or_path "meta-llama/Llama-2-70b-hf" \
--dataset_name "smangrul/ultrachat-10k-chatml" \
--chat_template_format "chatml" \
--add_special_tokens False \
--append_concat_token False \
--splits "train,test" \
--max_seq_len 2048 \
--num_train_epochs 1 \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \
--hub_strategy "every_save" \
--bf16 True \
--packing True \
--learning_rate 1e-4 \
--lr_scheduler_type "cosine" \
--weight_decay 1e-4 \
--warmup_ratio 0.0 \
--max_grad_norm 1.0 \
--output_dir "llama-sft-lora-deepspeed" \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 8 \
--gradient_accumulation_steps 4 \
--gradient_checkpointing True \
--use_reentrant False \
--dataset_text_field "content" \
--use_flash_attn True \
--use_peft_lora True \
--lora_r 8 \
--lora_alpha 16 \
--lora_dropout 0.1 \
--lora_target_modules "all-linear" \
--use_4bit_quantization False
```
Notice that we are using LoRA with rank=8, alpha=16 and targeting all linear layers. We are passing the deepspeed config file and finetuning 70B Llama model on a subset of the ultrachat dataset.
## The important parts
Let's dive a little deeper into the script so you can see what's going on, and understand how it works.
The first thing to know is that the script uses DeepSpeed for distributed training as the DeepSpeed config has been passed. The `SFTTrainer` class handles all the heavy lifting of creating the PEFT model using the peft config that is passed. After that, when you call `trainer.train()`, `SFTTrainer` internally uses 🤗 Accelerate to prepare the model, optimizer and trainer using the DeepSpeed config to create DeepSpeed engine which is then trained. The main code snippet is below:
```python
# trainer
trainer = SFTTrainer(
model=model,
tokenizer=tokenizer,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
peft_config=peft_config,
packing=data_args.packing,
dataset_kwargs={
"append_concat_token": data_args.append_concat_token,
"add_special_tokens": data_args.add_special_tokens,
},
dataset_text_field=data_args.dataset_text_field,
max_seq_length=data_args.max_seq_length,
)
trainer.accelerator.print(f"{trainer.model}")
# train
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
trainer.train(resume_from_checkpoint=checkpoint)
# saving final model
trainer.save_model()
```
## Memory usage
In the above example, the memory consumed per GPU is 64 GB (80%) as seen in the screenshot below:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/peft_deepspeed_mem_usage.png"/>
</div>
<small>GPU memory usage for the training run</small>
## More resources
You can also refer this blog post [Falcon 180B Finetuning using 🤗 PEFT and DeepSpeed](https://medium.com/@sourabmangrulkar/falcon-180b-finetuning-using-peft-and-deepspeed-b92643091d99) on how to finetune 180B Falcon model on 16 A100 GPUs on 2 machines.
# Use PEFT QLoRA and DeepSpeed with ZeRO3 for finetuning large models on multiple GPUs
In this section, we will look at how to use QLoRA and DeepSpeed Stage-3 for finetuning 70B llama model on 2X40GB GPUs.
For this, we first need `bitsandbytes>=0.43.0`, `accelerate>=0.28.0`, `transformers>4.38.2`, `trl>0.7.11` and `peft>0.9.0`. We need to set `zero3_init_flag` to true when using Accelerate config. Below is the config which can be found at [deepspeed_config_z3_qlora.yaml](https://github.com/huggingface/peft/blob/main/examples/sft/configs/deepspeed_config_z3_qlora.yaml):
```yml
compute_environment: LOCAL_MACHINE
debug: false
deepspeed_config:
deepspeed_multinode_launcher: standard
offload_optimizer_device: none
offload_param_device: none
zero3_init_flag: true
zero3_save_16bit_model: true
zero_stage: 3
distributed_type: DEEPSPEED
downcast_bf16: 'no'
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 2
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
```
Launch command is given below which is available at [run_peft_qlora_deepspeed_stage3.sh](https://github.com/huggingface/peft/blob/main/examples/sft/run_peft_deepspeed.sh):
```
accelerate launch --config_file "configs/deepspeed_config_z3_qlora.yaml" train.py \
--seed 100 \
--model_name_or_path "meta-llama/Llama-2-70b-hf" \
--dataset_name "smangrul/ultrachat-10k-chatml" \
--chat_template_format "chatml" \
--add_special_tokens False \
--append_concat_token False \
--splits "train,test" \
--max_seq_len 2048 \
--num_train_epochs 1 \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \
--hub_strategy "every_save" \
--bf16 True \
--packing True \
--learning_rate 1e-4 \
--lr_scheduler_type "cosine" \
--weight_decay 1e-4 \
--warmup_ratio 0.0 \
--max_grad_norm 1.0 \
--output_dir "llama-sft-qlora-dsz3" \
--per_device_train_batch_size 2 \
--per_device_eval_batch_size 2 \
--gradient_accumulation_steps 2 \
--gradient_checkpointing True \
--use_reentrant True \
--dataset_text_field "content" \
--use_flash_attn True \
--use_peft_lora True \
--lora_r 8 \
--lora_alpha 16 \
--lora_dropout 0.1 \
--lora_target_modules "all-linear" \
--use_4bit_quantization True \
--use_nested_quant True \
--bnb_4bit_compute_dtype "bfloat16" \
--bnb_4bit_quant_storage_dtype "bfloat16"
```
Notice the new argument being passed `bnb_4bit_quant_storage_dtype` which denotes the data type for packing the 4-bit parameters. For example, when it is set to `bfloat16`, **32/4 = 8** 4-bit params are packed together post quantization.
In terms of training code, the important code changes are:
```diff
...
bnb_config = BitsAndBytesConfig(
load_in_4bit=args.use_4bit_quantization,
bnb_4bit_quant_type=args.bnb_4bit_quant_type,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=args.use_nested_quant,
+ bnb_4bit_quant_storage=quant_storage_dtype,
)
...
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
quantization_config=bnb_config,
trust_remote_code=True,
attn_implementation="flash_attention_2" if args.use_flash_attn else "eager",
+ torch_dtype=quant_storage_dtype or torch.float32,
)
```
Notice that `torch_dtype` for `AutoModelForCausalLM` is same as the `bnb_4bit_quant_storage` data type. That's it. Everything else is handled by Trainer and TRL.
## Memory usage
In the above example, the memory consumed per GPU is **36.6 GB**. Therefore, what took 8X80GB GPUs with DeepSpeed Stage 3+LoRA and a couple of 80GB GPUs with DDP+QLoRA now requires 2X40GB GPUs. This makes finetuning of large models more accessible.
# Use PEFT and DeepSpeed with ZeRO3 and CPU Offloading for finetuning large models on a single GPU
This section of guide will help you learn how to use our DeepSpeed [training script](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py). You'll configure the script to train a large model for conditional generation with ZeRO-3 and CPU Offload.
<Tip>
💡 To help you get started, check out our example training scripts for [causal language modeling](https://github.com/huggingface/peft/blob/main/examples/causal_language_modeling/peft_lora_clm_accelerate_ds_zero3_offload.py) and [conditional generation](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py). You can adapt these scripts for your own applications or even use them out of the box if your task is similar to the one in the scripts.
</Tip>
## Configuration
Start by running the following command to [create a DeepSpeed configuration file](https://huggingface.co/docs/accelerate/quicktour#launching-your-distributed-script) with 🤗 Accelerate. The `--config_file` flag allows you to save the configuration file to a specific location, otherwise it is saved as a `default_config.yaml` file in the 🤗 Accelerate cache.
The configuration file is used to set the default options when you launch the training script.
```bash
accelerate config --config_file ds_zero3_cpu.yaml
```
You'll be asked a few questions about your setup, and configure the following arguments. In this example, you'll use ZeRO-3 along with CPU-Offload so make sure you pick those options.
```bash
`zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning
`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them.
`gradient_clipping`: Enable gradient clipping with value.
`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2.
`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3.
`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3.
`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3.
`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training.
```
An example [configuration file](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/accelerate_ds_zero3_cpu_offload_config.yaml) might look like the following. The most important thing to notice is that `zero_stage` is set to `3`, and `offload_optimizer_device` and `offload_param_device` are set to the `cpu`.
```yml
compute_environment: LOCAL_MACHINE
deepspeed_config:
gradient_accumulation_steps: 1
gradient_clipping: 1.0
offload_optimizer_device: cpu
offload_param_device: cpu
zero3_init_flag: true
zero3_save_16bit_model: true
zero_stage: 3
distributed_type: DEEPSPEED
downcast_bf16: 'no'
dynamo_backend: 'NO'
fsdp_config: {}
machine_rank: 0
main_training_function: main
megatron_lm_config: {}
mixed_precision: 'no'
num_machines: 1
num_processes: 1
rdzv_backend: static
same_network: true
use_cpu: false
```
## The important parts
Let's dive a little deeper into the script so you can see what's going on, and understand how it works.
Within the [`main`](https://github.com/huggingface/peft/blob/2822398fbe896f25d4dac5e468624dc5fd65a51b/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py#L103) function, the script creates an [`~accelerate.Accelerator`] class to initialize all the necessary requirements for distributed training.
<Tip>
💡 Feel free to change the model and dataset inside the `main` function. If your dataset format is different from the one in the script, you may also need to write your own preprocessing function.
</Tip>
The script also creates a configuration for the 🤗 PEFT method you're using, which in this case, is LoRA. The [`LoraConfig`] specifies the task type and important parameters such as the dimension of the low-rank matrices, the matrices scaling factor, and the dropout probability of the LoRA layers. If you want to use a different 🤗 PEFT method, make sure you replace `LoraConfig` with the appropriate [class](../package_reference/tuners).
```diff
def main():
+ accelerator = Accelerator()
model_name_or_path = "facebook/bart-large"
dataset_name = "twitter_complaints"
+ peft_config = LoraConfig(
task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
)
```
Throughout the script, you'll see the [`~accelerate.Accelerator.main_process_first`] and [`~accelerate.Accelerator.wait_for_everyone`] functions which help control and synchronize when processes are executed.
The [`get_peft_model`] function takes a base model and the [`peft_config`] you prepared earlier to create a [`PeftModel`]:
```diff
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
+ model = get_peft_model(model, peft_config)
```
Pass all the relevant training objects to 🤗 Accelerate's [`~accelerate.Accelerator.prepare`] which makes sure everything is ready for training:
```py
model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler = accelerator.prepare(
model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler
)
```
The next bit of code checks whether the DeepSpeed plugin is used in the `Accelerator`, and if the plugin exists, then we check if we are using ZeRO-3. This conditional flag is used when calling `generate` function call during inference for syncing GPUs when the model parameters are sharded:
```py
is_ds_zero_3 = False
if getattr(accelerator.state, "deepspeed_plugin", None):
is_ds_zero_3 = accelerator.state.deepspeed_plugin.zero_stage == 3
```
Inside the training loop, the usual `loss.backward()` is replaced by 🤗 Accelerate's [`~accelerate.Accelerator.backward`] which uses the correct `backward()` method based on your configuration:
```diff
for epoch in range(num_epochs):
with TorchTracemalloc() as tracemalloc:
model.train()
total_loss = 0
for step, batch in enumerate(tqdm(train_dataloader)):
outputs = model(**batch)
loss = outputs.loss
total_loss += loss.detach().float()
+ accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
```
That is all! The rest of the script handles the training loop, evaluation, and even pushes it to the Hub for you.
## Train
Run the following command to launch the training script. Earlier, you saved the configuration file to `ds_zero3_cpu.yaml`, so you'll need to pass the path to the launcher with the `--config_file` argument like this:
```bash
accelerate launch --config_file ds_zero3_cpu.yaml examples/peft_lora_seq2seq_accelerate_ds_zero3_offload.py
```
You'll see some output logs that track memory usage during training, and once it's completed, the script returns the accuracy and compares the predictions to the labels:
```bash
GPU Memory before entering the train : 1916
GPU Memory consumed at the end of the train (end-begin): 66
GPU Peak Memory consumed during the train (max-begin): 7488
GPU Total Peak Memory consumed during the train (max): 9404
CPU Memory before entering the train : 19411
CPU Memory consumed at the end of the train (end-begin): 0
CPU Peak Memory consumed during the train (max-begin): 0
CPU Total Peak Memory consumed during the train (max): 19411
epoch=4: train_ppl=tensor(1.0705, device='cuda:0') train_epoch_loss=tensor(0.0681, device='cuda:0')
100%|████████████████████████████████████████████████████████████████████████████████████████████| 7/7 [00:27<00:00, 3.92s/it]
GPU Memory before entering the eval : 1982
GPU Memory consumed at the end of the eval (end-begin): -66
GPU Peak Memory consumed during the eval (max-begin): 672
GPU Total Peak Memory consumed during the eval (max): 2654
CPU Memory before entering the eval : 19411
CPU Memory consumed at the end of the eval (end-begin): 0
CPU Peak Memory consumed during the eval (max-begin): 0
CPU Total Peak Memory consumed during the eval (max): 19411
accuracy=100.0
eval_preds[:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint']
dataset['train'][label_column][:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint']
```
# Caveats
1. Merging when using PEFT and DeepSpeed is currently unsupported and will raise error.
2. When using CPU offloading, the major gains from using PEFT to shrink the optimizer states and gradients to that of the adapter weights would be realized on CPU RAM and there won't be savings with respect to GPU memory.
3. DeepSpeed Stage 3 and qlora when used with CPU offloading leads to more GPU memory usage when compared to disabling CPU offloading.
|
peft/docs/source/accelerate/deepspeed.md/0
|
{
"file_path": "peft/docs/source/accelerate/deepspeed.md",
"repo_id": "peft",
"token_count": 7470
}
| 186
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# PEFT
🤗 PEFT (Parameter-Efficient Fine-Tuning) is a library for efficiently adapting large pretrained models to various downstream applications without fine-tuning all of a model's parameters because it is prohibitively costly. PEFT methods only fine-tune a small number of (extra) model parameters - significantly decreasing computational and storage costs - while yielding performance comparable to a fully fine-tuned model. This makes it more accessible to train and store large language models (LLMs) on consumer hardware.
PEFT is integrated with the Transformers, Diffusers, and Accelerate libraries to provide a faster and easier way to load, train, and use large models for inference.
<div class="mt-10">
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="quicktour"
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Get started</div>
<p class="text-gray-700">Start here if you're new to 🤗 PEFT to get an overview of the library's main features, and how to train a model with a PEFT method.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./task_guides/image_classification_lora"
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
<p class="text-gray-700">Practical guides demonstrating how to apply various PEFT methods across different types of tasks like image classification, causal language modeling, automatic speech recognition, and more. Learn how to use 🤗 PEFT with the DeepSpeed and Fully Sharded Data Parallel scripts.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual_guides/lora"
><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div>
<p class="text-gray-700">Get a better theoretical understanding of how LoRA and various soft prompting methods help reduce the number of trainable parameters to make training more efficient.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/config"
><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div>
<p class="text-gray-700">Technical descriptions of how 🤗 PEFT classes and methods work.</p>
</a>
</div>
</div>
<iframe
src="https://stevhliu-peft-methods.hf.space"
frameborder="0"
width="850"
height="620"
></iframe>
|
peft/docs/source/index.md/0
|
{
"file_path": "peft/docs/source/index.md",
"repo_id": "peft",
"token_count": 1161
}
| 187
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Multitask prompt tuning
[Multitask prompt tuning](https://huggingface.co/papers/2303.02861) decomposes the soft prompts of each task into a single learned transferable prompt instead of a separate prompt for each task. The single learned prompt can be adapted for each task by multiplicative low rank updates.
The abstract from the paper is:
*Prompt tuning, in which a base pretrained model is adapted to each task via conditioning on learned prompt vectors, has emerged as a promising approach for efficiently adapting large language models to multiple downstream tasks. However, existing methods typically learn soft prompt vectors from scratch, and it has not been clear how to exploit the rich cross-task knowledge with prompt vectors in a multitask learning setting. We propose multitask prompt tuning (MPT), which first learns a single transferable prompt by distilling knowledge from multiple task-specific source prompts. We then learn multiplicative low rank updates to this shared prompt to efficiently adapt it to each downstream target task. Extensive experiments on 23 NLP datasets demonstrate that our proposed approach outperforms the state-of-the-art methods, including the full finetuning baseline in some cases, despite only tuning 0.035% as many task-specific parameters*.
## MultitaskPromptTuningConfig
[[autodoc]] tuners.multitask_prompt_tuning.config.MultitaskPromptTuningConfig
## MultitaskPromptEmbedding
[[autodoc]] tuners.multitask_prompt_tuning.model.MultitaskPromptEmbedding
|
peft/docs/source/package_reference/multitask_prompt_tuning.md/0
|
{
"file_path": "peft/docs/source/package_reference/multitask_prompt_tuning.md",
"repo_id": "peft",
"token_count": 533
}
| 188
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# PEFT configurations and models
The sheer size of today's large pretrained models - which commonly have billions of parameters - present a significant training challenge because they require more storage space and more computational power to crunch all those calculations. You'll need access to powerful GPUs or TPUs to train these large pretrained models which is expensive, not widely accessible to everyone, not environmentally friendly, and not very practical. PEFT methods address many of these challenges. There are several types of PEFT methods (soft prompting, matrix decomposition, adapters), but they all focus on the same thing, reduce the number of trainable parameters. This makes it more accessible to train and store large models on consumer hardware.
The PEFT library is designed to help you quickly train large models on free or low-cost GPUs, and in this tutorial, you'll learn how to setup a configuration to apply a PEFT method to a pretrained base model for training. Once the PEFT configuration is setup, you can use any training framework you like (Transformer's [`~transformers.Trainer`] class, [Accelerate](https://hf.co/docs/accelerate), a custom PyTorch training loop).
## PEFT configurations
<Tip>
Learn more about the parameters you can configure for each PEFT method in their respective API reference page.
</Tip>
A configuration stores important parameters that specify how a particular PEFT method should be applied.
For example, take a look at the following [`LoraConfig`](https://huggingface.co/ybelkada/opt-350m-lora/blob/main/adapter_config.json) for applying LoRA and [`PromptEncoderConfig`](https://huggingface.co/smangrul/roberta-large-peft-p-tuning/blob/main/adapter_config.json) for applying p-tuning (these configuration files are already JSON-serialized). Whenever you load a PEFT adapter, it is a good idea to check whether it has an associated adapter_config.json file which is required.
<hfoptions id="config">
<hfoption id="LoraConfig">
```json
{
"base_model_name_or_path": "facebook/opt-350m", #base model to apply LoRA to
"bias": "none",
"fan_in_fan_out": false,
"inference_mode": true,
"init_lora_weights": true,
"layers_pattern": null,
"layers_to_transform": null,
"lora_alpha": 32,
"lora_dropout": 0.05,
"modules_to_save": null,
"peft_type": "LORA", #PEFT method type
"r": 16,
"revision": null,
"target_modules": [
"q_proj", #model modules to apply LoRA to (query and value projection layers)
"v_proj"
],
"task_type": "CAUSAL_LM" #type of task to train model on
}
```
You can create your own configuration for training by initializing a [`LoraConfig`].
```py
from peft import LoraConfig, TaskType
lora_config = LoraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
task_type=TaskType.CAUSAL_LM,
lora_alpha=32,
lora_dropout=0.05
)
```
</hfoption>
<hfoption id="PromptEncoderConfig">
```json
{
"base_model_name_or_path": "roberta-large", #base model to apply p-tuning to
"encoder_dropout": 0.0,
"encoder_hidden_size": 128,
"encoder_num_layers": 2,
"encoder_reparameterization_type": "MLP",
"inference_mode": true,
"num_attention_heads": 16,
"num_layers": 24,
"num_transformer_submodules": 1,
"num_virtual_tokens": 20,
"peft_type": "P_TUNING", #PEFT method type
"task_type": "SEQ_CLS", #type of task to train model on
"token_dim": 1024
}
```
You can create your own configuration for training by initializing a [`PromptEncoderConfig`].
```py
from peft import PromptEncoderConfig, TaskType
p_tuning_config = PromptEncoderConfig(
encoder_reparameterization_type="MLP",
encoder_hidden_size=128,
num_attention_heads=16,
num_layers=24,
num_transformer_submodules=1,
num_virtual_tokens=20,
token_dim=1024,
task_type=TaskType.SEQ_CLS
)
```
</hfoption>
</hfoptions>
## PEFT models
With a PEFT configuration in hand, you can now apply it to any pretrained model to create a [`PeftModel`]. Choose from any of the state-of-the-art models from the [Transformers](https://hf.co/docs/transformers) library, a custom model, and even new and unsupported transformer architectures.
For this tutorial, load a base [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) model to finetune.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
```
Use the [`get_peft_model`] function to create a [`PeftModel`] from the base facebook/opt-350m model and the `lora_config` you created earlier.
```py
from peft import get_peft_model
lora_model = get_peft_model(model, lora_config)
lora_model.print_trainable_parameters()
"trainable params: 1,572,864 || all params: 332,769,280 || trainable%: 0.472659014678278"
```
Now you can train the [`PeftModel`] with your preferred training framework! After training, you can save your model locally with [`~PeftModel.save_pretrained`] or upload it to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method.
```py
# save locally
lora_model.save_pretrained("your-name/opt-350m-lora")
# push to Hub
lora_model.push_to_hub("your-name/opt-350m-lora")
```
To load a [`PeftModel`] for inference, you'll need to provide the [`PeftConfig`] used to create it and the base model it was trained from.
```py
from peft import PeftModel, PeftConfig
config = PeftConfig.from_pretrained("ybelkada/opt-350m-lora")
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora")
```
<Tip>
By default, the [`PeftModel`] is set for inference, but if you'd like to train the adapter some more you can set `is_trainable=True`.
```py
lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora", is_trainable=True)
```
</Tip>
The [`PeftModel.from_pretrained`] method is the most flexible way to load a [`PeftModel`] because it doesn't matter what model framework was used (Transformers, timm, a generic PyTorch model). Other classes, like [`AutoPeftModel`], are just a convenient wrapper around the base [`PeftModel`], and makes it easier to load PEFT models directly from the Hub or locally where the PEFT weights are stored.
```py
from peft import AutoPeftModelForCausalLM
lora_model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora")
```
Take a look at the [AutoPeftModel](package_reference/auto_class) API reference to learn more about the [`AutoPeftModel`] classes.
## Next steps
With the appropriate [`PeftConfig`], you can apply it to any pretrained model to create a [`PeftModel`] and train large powerful models faster on freely available GPUs! To learn more about PEFT configurations and models, the following guide may be helpful:
* Learn how to configure a PEFT method for models that aren't from Transformers in the [Working with custom models](../developer_guides/custom_models) guide.
|
peft/docs/source/tutorial/peft_model_config.md/0
|
{
"file_path": "peft/docs/source/tutorial/peft_model_config.md",
"repo_id": "peft",
"token_count": 2415
}
| 189
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple, Union
import torch
from diffusers.models import UNet2DConditionModel
from diffusers.utils import BaseOutput, logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet2DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet2DConditionNewModel(UNet2DConditionModel):
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
guided_hint: Optional[torch.Tensor] = None,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
) -> Union[UNet2DConditionOutput, Tuple]:
r"""
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
encoder_attention_mask (`torch.Tensor`):
(batch, sequence_length) cross-attention mask, applied to encoder_hidden_states. True = keep, False =
discard. Mask will be converted into a bias, which adds large negative values to attention scores
corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
added_cond_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified includes additonal conditions that can be used for additonal time
embeddings or encoder hidden states projections. See the configurations `encoder_hid_dim_type` and
`addition_embed_type` for more information.
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
logger.info("Forward upsample size to force interpolation output size.")
forward_upsample_size = True
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None:
encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
emb = self.time_embedding(t_emb, timestep_cond)
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when num_class_embeds > 0")
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
# `Timesteps` does not contain any weights and will always return f32 tensors
# there might be better ways to encapsulate this.
class_labels = class_labels.to(dtype=sample.dtype)
class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
if self.config.addition_embed_type == "text":
aug_emb = self.add_embedding(encoder_hidden_states)
emb = emb + aug_emb
elif self.config.addition_embed_type == "text_image":
# Kadinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
aug_emb = self.add_embedding(text_embs, image_embs)
emb = emb + aug_emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
# Kadinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
# 2. pre-process and insert conditioning (ControlNet)
# Note: the added "guided_hint" is the only difference between this implementation and the original UNet2DConditionModel
sample = self.conv_in(sample)
sample = guided_hint + sample if guided_hint is not None else sample
# 3. down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = down_block_res_sample + down_block_additional_residual
new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# 4. mid
if self.mid_block is not None:
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
if mid_block_additional_residual is not None:
sample = sample + mid_block_additional_residual
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
)
# 6. post-process
if self.conv_norm_out:
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if not return_dict:
return (sample,)
return UNet2DConditionOutput(sample=sample)
|
peft/examples/boft_controlnet/utils/unet_2d_condition.py/0
|
{
"file_path": "peft/examples/boft_controlnet/utils/unet_2d_condition.py",
"repo_id": "peft",
"token_count": 5908
}
| 190
|
<jupyter_start><jupyter_text>Training PEFT models with new tokens being added to the embedding layers and tokenizerIn this example, we will learn how to train a LoRA model when adding new tokens to the tokenizer and model. This is a common usecase when doing the following:1. Instruction finetuning with new tokens beind added such as ``, ``, ``, ``, `` to properly format the conversations2. Finetuning on a specific language wherein language spoecific tokens are added, e.g., korean tokens being added to vocabulary for finetuning LLM on Korean datasets.3. Instruction finetuning to return outputs in certain format to enable agent behaviour new tokens such as ``, ``, ``, ``, ``, ``, ``.In such cases, you add the Embedding modules to the LORA `target_modules`. PEFT will take care of saving the embedding layers with the new added tokens along with the adapter weights that were trained on the specific initialization of the embeddings weights of the added tokens. Let's import the necessary libraries<jupyter_code>import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
os.environ["WANDB_PROJECT"] = "PeftExamples"
import transformers
from peft import (
LoraConfig,
PeftConfig,
PeftModel,
get_peft_model,
prepare_model_for_kbit_training,
)
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
HfArgumentParser,
TrainingArguments,
Trainer,
default_data_collator,
)
import torch
from dataclasses import dataclass, field
from typing import Optional
from dataclass_csv import DataclassReader
from torch.utils.data import Dataset, DataLoader
from enum import Enum<jupyter_output><empty_output><jupyter_text>Prepare Model and Tokenizer Now, we will be adding 27 new tokens as well as replace the existing pad, bos and eos tokens of the model.<jupyter_code>class SpecialTokens(str, Enum):
begin_target = "<|begintarget|>"
end_target = "<|endtarget|>"
begin_context = "<|begincontext|>"
end_context = "<|endcontext|>"
system = "<|system|>"
user = "<|user|>"
begin_last_user_utterance = "<|beginlastuserutterance|>"
end_last_user_utterance = "<|endlastuserutterance|>"
begin_dsts = "<|begindsts|>"
end_dsts = "<|enddsts|>"
begin_dst = "<|begindst|>"
end_dst = "<|enddst|>"
begin_belief = "<|beginbelief|>"
end_belief = "<|endbelief|>"
begin_response = "<|beginresponse|>"
end_response = "<|endresponse|>"
begin_action = "<|beginaction|>"
end_action = "<|endaction|>"
begin_user_action = "<|beginuseraction|>"
end_user_action = "<|enduseraction|>"
sys_actions = "<|sysactions|>"
begin_intent = "<|beginintent|>"
end_intent = "<|endintent|>"
begin_requested_slots = "<|beginrequestedslots|>"
end_requested_slots = "<|endrequestedslots|>"
pad_token = "<|pad|>"
bos_token = "<|startoftext|>"
@classmethod
def list(cls):
return [c.value for c in cls]<jupyter_output><empty_output><jupyter_text>We will be finetuning Mistral-7B model. Let's load the tokenizer and add the special tokens followed by loading the base model and resizzing the embedding layers to accomodate the newly added tokens.<jupyter_code>model_name = "mistralai/Mistral-7B-v0.1"
tokenizer = AutoTokenizer.from_pretrained(
model_name,
pad_token=SpecialTokens.pad_token.value,
bos_token=SpecialTokens.bos_token.value,
eos_token=SpecialTokens.end_target.value,
additional_special_tokens=SpecialTokens.list(),
)
model = AutoModelForCausalLM.from_pretrained(
model_name,
low_cpu_mem_usage=True
# attn_implementation ="flash_attention_2", # leading to an error
)
model.resize_token_embeddings(len(tokenizer))<jupyter_output><empty_output><jupyter_text>Apply LoRA<jupyter_code>config = LoraConfig(
r=64, lora_alpha=128, lora_dropout=0.0, target_modules=["embed_tokens", "lm_head", "q_proj", "v_proj"]
)
model = get_peft_model(model, config)
print(model.print_trainable_parameters())
print(model)<jupyter_output>trainable params: 31,886,720 || all params: 7,273,840,000 || trainable%: 0.43837532857472805
None
PeftModel(
(base_model): LoraModel(
(model): MistralForCausalLM(
(model): MistralModel(
(embed_tokens): lora.Embedding(
(base_layer): Embedding(32027, 4096)
(lora_dropout): ModuleDict(
(default): Identity()
)
(lora_A): ModuleDict()
(lora_B): ModuleDict()
(lora_embedding_A): ParameterDict( (default): Parameter containing: [torch.FloatTensor of size 64x32027])
(lora_embedding_B): ParameterDict( (default): Parameter containing: [torch.FloatTensor of size 4096x64])
)
(layers): ModuleList(
(0-31): 32 x MistralDecoderLayer(
(self_attn): MistralAttention(
(q_proj): lora.Linear(
(base_layer): Linear(in_features=4096, out_features=4096, bias=False)
(lora_dropout): ModuleDict(
(default): Identity([...]<jupyter_text>Preapre Dataset<jupyter_code>from datasets import load_dataset
dataset = load_dataset("smangrul/assistant_chatbot_dataset")
dataset = dataset["train"].train_test_split(0.2)
text_column = "context"
label_column = "target"
max_length = 512
def preprocess_function(examples):
batch_size = len(examples[text_column])
targets = [str(x) for x in examples[label_column]]
model_inputs = tokenizer(examples[text_column])
labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id]
# print(i, sample_input_ids, label_input_ids)
model_inputs["input_ids"][i] = sample_input_ids + label_input_ids
labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids
model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i])
# print(model_inputs)
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i]
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * (
max_length - len(sample_input_ids)
) + sample_input_ids
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[
"attention_mask"
][i]
labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids
model_inputs["input_ids"][i] = model_inputs["input_ids"][i][:max_length]
model_inputs["attention_mask"][i] = model_inputs["attention_mask"][i][:max_length]
labels["input_ids"][i] = labels["input_ids"][i][:max_length]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
processed_datasets = dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
train_dataset
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=8, pin_memory=True
)
next(iter(train_dataloader))
tokenizer.decode(train_dataset[0]["input_ids"])<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>training_args = TrainingArguments(
output_dir="mistral_lora_clm_with_added_tokens",
num_train_epochs=2,
save_total_limit=5,
per_device_train_batch_size=8,
warmup_steps=10,
weight_decay=0.0001,
dataloader_drop_last=True,
bf16=True,
logging_steps=10,
learning_rate=1e-5,
gradient_checkpointing=True,
gradient_checkpointing_kwargs={"use_reentrant": False},
remove_unused_columns=False,
hub_model_id="smangrul/mistral_lora_clm_with_added_tokens",
push_to_hub=True,
hub_private_repo=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
data_collator=default_data_collator,
)
# model.config.use_cache = False
trainer.train()<jupyter_output>Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.
[34m[1mwandb[0m: Currently logged in as: [33msmangrul[0m. Use [1m`wandb login --relogin`[0m to force relogin<jupyter_text>Check the model output on a sample from evaluation dataset<jupyter_code>import random
i = random.randint(0, len(dataset["test"]))
context = dataset["test"][i]["context"]
batch = tokenizer(context, return_tensors="pt")
batch = {k: v.to("cuda") for k, v in batch.items()}
model.eval()
output_tokens = model.generate(
**batch,
max_new_tokens=256,
do_sample=True,
temperature=0.2,
top_p=0.95,
top_k=50,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
)
target_predicted = tokenizer.decode(output_tokens[0], skip_special_tokens=False).split("<|endcontext|>")[1]
target = dataset["test"][i]["target"]
print(f"{context=} \n\n {target_predicted=} \n\n {target=}")<jupyter_output>context="<|begincontext|><|user|>Can you find me a place to eat please?<|system|>Where at? And what kind of cuisine are you craving?<|user|>Somewhere in SF, and I am really craving Thai food at the moment!<|system|>I found a bunch of restaurants, there's actually 10 that you might like in San Francisco, one of them being Baan Thai House & Wine Bar<|user|>How can I reach them? And what's their address?<|system|>You can reach them by phone at 415-379-4505 and visit them at 534 Irving Street<|beginlastuserutterance|>Great, that restaurant sounds good<|endlastuserutterance|><|endcontext|>"
target_predicted='<|begintarget|><|begindsts|><|begindst|><|beginintent|> FindRestaurants<|endintent|><|beginbelief|> Restaurants^city->SF~San Francisco|Restaurants^cuisine->Thai|Restaurants^restaurant_name->Baan Thai House & Wine Bar<|endbelief|><|enddst|><|enddsts|><|beginuseraction|> REQUEST->Restaurants^phone_number~|REQUEST->Restaurants^street_address~<|enduseraction|><|beginaction|> INFORM->Rest[...]<jupyter_text>Save the Adapter model When the lora layers are applied to embedding layers, the corresponding base model embedding layers are also saved.<jupyter_code>trainer.push_to_hub()
trainer.model.push_to_hub(training_args.output_dir)<jupyter_output>/raid/sourab/peft/src/peft/utils/save_and_load.py:128: UserWarning: Setting `is_embedding_layer_resized` to `True` as embedding layers found in `target_modules`
warnings.warn("Setting `is_embedding_layer_resized` to `True` as embedding layers found in `target_modules`")<jupyter_text>Check the model loading is working as expected and generating plausible outputs.<jupyter_code>from peft import PeftModel
inference_model = AutoModelForCausalLM.from_pretrained(
model_name,
low_cpu_mem_usage=True,
# attn_implementation ="flash_attention_2",
)
inference_model.resize_token_embeddings(len(tokenizer))
inference_model = PeftModel.from_pretrained(inference_model, "smangrul/mistral_lora_clm_with_added_tokens")
inference_model.to("cuda")
inference_model.eval()
output_tokens = inference_model.generate(
**batch,
max_new_tokens=256,
do_sample=True,
temperature=0.2,
top_p=0.95,
top_k=50,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
)
target_predicted = tokenizer.decode(output_tokens[0], skip_special_tokens=False).split("<|endcontext|>")[1]
print(f"{context=} \n\n {target_predicted=} \n\n {target=}")<jupyter_output><empty_output>
|
peft/examples/causal_language_modeling/peft_lora_clm_with_additional_tokens.ipynb/0
|
{
"file_path": "peft/examples/causal_language_modeling/peft_lora_clm_with_additional_tokens.ipynb",
"repo_id": "peft",
"token_count": 4577
}
| 191
|
<jupyter_start><jupyter_text>PEFT with DNA Language Models This notebook demonstrates how to utilize parameter-efficient fine-tuning techniques (PEFT) from the PEFT library to fine-tune a DNA Language Model (DNA-LM). The fine-tuned DNA-LM will be applied to solve a task from the nucleotide benchmark dataset. Parameter-efficient fine-tuning (PEFT) techniques are crucial for adapting large pre-trained models to specific tasks with limited computational resources. 1. Import relevant libraries We'll start by importing the required libraries, including the PEFT library and other dependencies.<jupyter_code>import torch
import transformers
import peft
import tqdm
import numpy as np<jupyter_output>/opt/homebrew/anaconda3/envs/peft/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm<jupyter_text>2. Load models We'll load a pre-trained DNA Language Model, "SpeciesLM", that serves as the base for fine-tuning. This is done using the transformers library from HuggingFace.The tokenizer and the model comes from the paper, "Species-aware DNA language models capture regulatory elements and their evolution". [Paper Link](https://www.biorxiv.org/content/10.1101/2023.01.26.525670v2), [Code Link](https://github.com/gagneurlab/SpeciesLM). They introduce a species-aware DNA language model, which is trained on more than 800 species spanning over 500 million years of evolution.<jupyter_code>from transformers import AutoTokenizer, AutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained("gagneurlab/SpeciesLM", revision = "downstream_species_lm")
lm = AutoModelForMaskedLM.from_pretrained("gagneurlab/SpeciesLM", revision = "downstream_species_lm")
lm.eval()
lm.to("cuda");<jupyter_output><empty_output><jupyter_text>2. Prepare datasets We'll load the `nucleotide_transformer_downstream_tasks` dataset, which contains 18 downstream tasks from the Nucleotide Transformer paper. This dataset provides a consistent genomics benchmark with binary classification tasks.<jupyter_code>from datasets import load_dataset
raw_data = load_dataset("InstaDeepAI/nucleotide_transformer_downstream_tasks", "H3")<jupyter_output><empty_output><jupyter_text>We'll use the "H3" subset of this dataset, which contains a total of 13,468 rows in the training data, and 1497 rows in the test data.<jupyter_code>raw_data<jupyter_output><empty_output><jupyter_text>The dataset consists of three columns, ```sequence```, ```name``` and ```label```. An row in this dataset looks like:<jupyter_code>raw_data['train'][0]<jupyter_output><empty_output><jupyter_text>We split out dataset into training, test, and validation sets.<jupyter_code>from datasets import Dataset, DatasetDict
train_valid_split = raw_data['train'].train_test_split(test_size=0.15, seed=42)
train_valid_split = DatasetDict({
'train': train_valid_split['train'],
'validation': train_valid_split['test']
})
ds = DatasetDict({
'train': train_valid_split['train'],
'validation': train_valid_split['validation'],
'test': raw_data['test']
})<jupyter_output><empty_output><jupyter_text>Then, we use the tokenizer and a utility function we created, ```get_kmers``` to generate the final data and labels. The ```get_kmers``` function is essential for generating overlapping 6-mers needed by the language model (LM). By using k=6 and stride=1, we ensure that the model receives continuous and overlapping subsequences, capturing the local context within the biological sequence for more effective analysis and prediction.<jupyter_code>def get_kmers(seq, k=6, stride=1):
return [seq[i:i + k] for i in range(0, len(seq), stride) if i + k <= len(seq)]
test_sequences = []
train_sequences = []
val_sequences = []
dataset_limit = 200 # NOTE: This dataset limit is set to 200, so that the training runs faster. It can be set to None to use the
# entire dataset
for i in range(0, len(ds['train'])):
if dataset_limit and i == dataset_limit:
break
sequence = ds['train'][i]['sequence']
sequence = "candida_glabrata " + " ".join(get_kmers(sequence))
sequence = tokenizer(sequence)["input_ids"]
train_sequences.append(sequence)
for i in range(0, len(ds['validation'])):
if dataset_limit and i == dataset_limit:
break
sequence = ds['validation'][i]['sequence']
sequence = "candida_glabrata " + " ".join(get_kmers(sequence))
sequence = tokenizer(sequence)["input_ids"]
val_sequences.append(sequence)
for i in range(0, len(ds['test'])):
if dataset_limit and i == dataset_limit:
break
sequence = ds['test'][i]['sequence']
sequence = "candida_glabrata " + " ".join(get_kmers(sequence))
sequence = tokenizer(sequence)["input_ids"]
test_sequences.append(sequence)
train_labels = ds['train']['label']
test_labels = ds['test']['label']
val_labels = ds['validation']['label']
if dataset_limit:
train_labels = train_labels[0:dataset_limit]
test_labels = test_labels[0:dataset_limit]
val_labels = val_labels[0:dataset_limit]<jupyter_output><empty_output><jupyter_text>Finally, we create a Dataset object for each our sets.<jupyter_code>from datasets import Dataset
train_dataset = Dataset.from_dict({"input_ids": train_sequences, "labels": train_labels})
val_dataset = Dataset.from_dict({"input_ids": val_sequences, "labels": val_labels})
test_dataset = Dataset.from_dict({"input_ids": test_sequences, "labels": test_labels})<jupyter_output><empty_output><jupyter_text>4. Train model Now, we'll train our DNA Language Model with the training dataset. We'll add a linear layer in the final layer of our language model, and then, train all the parameteres of our model with the training dataset.<jupyter_code>from transformers import DataCollatorWithPadding
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
import torch
from torch import nn
class DNA_LM(nn.Module):
def __init__(self, model, num_labels):
super(DNA_LM, self).__init__()
self.model = model.bert
self.in_features = model.config.hidden_size
self.out_features = num_labels
self.classifier = nn.Linear(self.in_features, self.out_features)
def forward(self, input_ids, attention_mask=None, labels=None):
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True)
sequence_output = outputs.hidden_states[-1]
# Use the [CLS] token for classification
cls_output = sequence_output[:, 0, :]
logits = self.classifier(cls_output)
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.out_features), labels.view(-1))
return (loss, logits) if loss is not None else logits
# Number of classes for your classification task
num_labels = 2
classification_model = DNA_LM(lm, num_labels)
classification_model.to('cuda');
from transformers import DataCollatorWithPadding
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
from transformers import Trainer, TrainingArguments
# Define training arguments
training_args = TrainingArguments(
output_dir='./results',
eval_strategy="epoch",
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=5,
weight_decay=0.01,
eval_steps=1,
logging_steps=1,
)
# Initialize Trainer
trainer = Trainer(
model=classification_model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Train the model
trainer.train()<jupyter_output><empty_output><jupyter_text>5. Evaluation<jupyter_code># Generate predictions
predictions = trainer.predict(test_dataset)
logits = predictions.predictions
predicted_labels = logits.argmax(axis=-1)
print(predicted_labels)<jupyter_output><empty_output><jupyter_text>Then, we create a function to calculate the accuracy from the test and predicted labels.<jupyter_code>def calculate_accuracy(true_labels, predicted_labels):
assert len(true_labels) == len(predicted_labels), "Arrays must have the same length"
correct_predictions = np.sum(true_labels == predicted_labels)
accuracy = correct_predictions / len(true_labels)
return accuracy
accuracy = calculate_accuracy(test_labels, predicted_labels)
print(f"Accuracy: {accuracy:.2f}")<jupyter_output>Accuracy: 0.53<jupyter_text>The results aren't that good, which we can attribute to the small dataset size. 7. Parameter Efficient Fine-Tuning Techniques In this section, we demonstrate how to employ parameter-efficient fine-tuning (PEFT) techniques to adapt a pre-trained model for specific genomics tasks using the PEFT library. The LoraConfig object is instantiated to configure the PEFT parameters:- task_type: Specifies the type of task, in this case, sequence classification (SEQ_CLS).- r: The rank of the LoRA matrices.- lora_alpha: Scaling factor for adaptive re-parameterization.- target_modules: Modules within the model to apply PEFT re-parameterization (query, key, value in this example).- lora_dropout: Dropout rate used during PEFT fine-tuning.<jupyter_code># Number of classes for your classification task
num_labels = 2
classification_model = DNA_LM(lm, num_labels)
classification_model.to('cuda');
from peft import LoraConfig, TaskType
peft_config = LoraConfig(
r=8,
lora_alpha=32,
target_modules=["query", "key", "value"],
lora_dropout=0.01,
)
from peft import get_peft_model
peft_model = get_peft_model(classification_model, peft_config)
peft_model.print_trainable_parameters()
peft_model
# Define training arguments
training_args = TrainingArguments(
output_dir='./results',
eval_strategy="epoch",
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=5,
weight_decay=0.01,
eval_steps=1,
logging_steps=1,
)
# Initialize Trainer
trainer = Trainer(
model=peft_model.model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Train the model
trainer.train()<jupyter_output><empty_output><jupyter_text>8. Evaluate PEFT Model<jupyter_code># Generate predictions
predictions = trainer.predict(test_dataset)
logits = predictions.predictions
predicted_labels = logits.argmax(axis=-1)
print(predicted_labels)
def calculate_accuracy(true_labels, predicted_labels):
assert len(true_labels) == len(predicted_labels), "Arrays must have the same length"
correct_predictions = np.sum(true_labels == predicted_labels)
accuracy = correct_predictions / len(true_labels)
return accuracy
accuracy = calculate_accuracy(test_labels, predicted_labels)
print(f"Accuracy: {accuracy:.2f}")<jupyter_output>Accuracy: 0.52
|
peft/examples/dna_language_models/dna_lm.ipynb/0
|
{
"file_path": "peft/examples/dna_language_models/dna_lm.ipynb",
"repo_id": "peft",
"token_count": 3782
}
| 192
|
<jupyter_start><jupyter_code>!git clone https://huggingface.co/spaces/smangrul/peft-lora-sd-dreambooth
%cd "peft-lora-sd-dreambooth"
!pip install -r requirements.txt
!python colab.py<jupyter_output><empty_output>
|
peft/examples/lora_dreambooth/colab_notebook.ipynb/0
|
{
"file_path": "peft/examples/lora_dreambooth/colab_notebook.ipynb",
"repo_id": "peft",
"token_count": 91
}
| 193
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass, field
from typing import List, Optional
import torch
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments
from trl import SFTTrainer
from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
@dataclass
class TrainingArguments(TrainingArguments):
# model configs
base_model_name_or_path: Optional[str] = field(
default=None, metadata={"help": "The name or path of the fp32/16 base model."}
)
residual_model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The name or path of the fp32/16 residual model. (`['fxmeng/pissa-llama-2-7b-r16-alpha-16']`)"
},
)
bits: str = field(default="fp32", metadata={"help": "(`['fp4', 'nf4', 'int8', 'bf16', 'fp16', fp32]`)"})
init_lora_weights: str = field(default="pissa", metadata={"help": "(`['gaussian', 'pissa', 'pissa_niter_4']`)"})
lora_r: int = field(default=16)
lora_alpha: int = field(default=16)
lora_dropout: float = field(default=0)
convert_pissa_to_lora: bool = field(default=False)
merge_and_save: bool = field(default=False)
# dataset configs
data_path: str = field(default="imdb", metadata={"help": "Path to the training data."})
dataset_split: str = field(default="train[:1%]", metadata={"help": "(`['train', 'test', 'eval']`):"})
dataset_field: List[str] = field(default=None, metadata={"help": "Fields of dataset input and output."})
max_seq_length: int = field(
default=512,
metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
)
parser = HfArgumentParser(TrainingArguments)
script_args = parser.parse_args_into_dataclasses()[0]
print(script_args)
print(f"Load pre-processed residual model in {script_args.bits} bits.")
if script_args.bits in ["nf4", "fp4", "int8"]:
quantization_config = BitsAndBytesConfig(
load_in_4bit=(script_args.bits == "nf4" or script_args.bits == "fp4"),
load_in_8bit=script_args.bits == "int8",
bnb_4bit_quant_type=script_args.bits,
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=torch.bfloat16,
)
res_model = AutoModelForCausalLM.from_pretrained(
script_args.residual_model_name_or_path, quantization_config=quantization_config, low_cpu_mem_usage=True
)
res_model = prepare_model_for_kbit_training(res_model)
print("Wrapping the residual model with PiSSA.")
peft_model = PeftModel.from_pretrained(
res_model, script_args.residual_model_name_or_path, subfolder="pissa_init", is_trainable=True
)
tokenizer = AutoTokenizer.from_pretrained(script_args.residual_model_name_or_path)
elif script_args.residual_model_name_or_path is not None:
res_model = AutoModelForCausalLM.from_pretrained(
script_args.residual_model_name_or_path,
torch_dtype=(
torch.float16
if script_args.bits == "fp16"
else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32)
),
device_map="auto",
)
print("Wrapping the residual model with PiSSA.")
peft_model = PeftModel.from_pretrained(
res_model, script_args.residual_model_name_or_path, subfolder="pissa_init", is_trainable=True
)
tokenizer = AutoTokenizer.from_pretrained(script_args.residual_model_name_or_path)
elif script_args.base_model_name_or_path is not None:
print(
f"No available pre-processed model, manually initialize a PiSSA using {script_args.base_model_name_or_path}."
)
model = AutoModelForCausalLM.from_pretrained(
script_args.base_model_name_or_path,
torch_dtype=(
torch.float16
if script_args.bits == "fp16"
else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32)
),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name_or_path)
tokenizer.pad_token_id = tokenizer.eos_token_id
lora_config = LoraConfig(
r=script_args.lora_r,
lora_alpha=script_args.lora_alpha,
init_lora_weights=script_args.init_lora_weights,
lora_dropout=script_args.lora_dropout,
target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"],
bias="none",
task_type="CAUSAL_LM",
)
peft_model = get_peft_model(model, lora_config)
print(peft_model)
peft_model.print_trainable_parameters()
print(f"Training PiSSA with trl on the {script_args.data_path}[{script_args.dataset_split}] dataset.")
dataset = load_dataset(script_args.data_path, split=script_args.dataset_split)
dataset = dataset.map(
lambda example: {
"text": f"### USER: {example[script_args.dataset_field[0]]}\n### ASSISTANT: {example[script_args.dataset_field[1]]}"
}
)
trainer = SFTTrainer(
model=peft_model,
args=script_args,
train_dataset=dataset,
dataset_text_field="text",
max_seq_length=script_args.max_seq_length,
tokenizer=tokenizer,
)
trainer.train()
trainer.save_state()
############################## Upon training completion, convert and save PiSSA in LoRA format ##############################
if script_args.convert_pissa_to_lora:
peft_model.save_pretrained(
os.path.join(script_args.output_dir, "pissa_lora"),
convert_pissa_to_lora=os.path.join(script_args.residual_model_name_or_path, "pissa_init"),
)
else:
peft_model.save_pretrained(
os.path.join(script_args.output_dir, "pissa_ft"),
)
if script_args.merge_and_save:
model = peft_model.merge_and_unload()
model.save_pretrained(os.path.join(script_args.output_dir, "pissa_merged"))
tokenizer.save_pretrained(os.path.join(script_args.output_dir, "pissa_merged"))
|
peft/examples/pissa_finetuning/pissa_finetuning.py/0
|
{
"file_path": "peft/examples/pissa_finetuning/pissa_finetuning.py",
"repo_id": "peft",
"token_count": 2619
}
| 194
|
<jupyter_start><jupyter_text>IntroductionIn this notebook, we are going to fine-tune the LayoutLM model by Microsoft Research on the [FUNSD](https://guillaumejaume.github.io/FUNSD/) dataset, which is a collection of annotated form documents. The goal of our model is to learn the annotations of a number of labels ("question", "answer", "header" and "other") on those forms, such that it can be used to annotate unseen forms in the future.* Original LayoutLM paper: https://arxiv.org/abs/1912.13318* Original FUNSD paper: https://arxiv.org/abs/1905.13538 Install librariesCurrently you have to first install the `unilm` package, and then the `transformers` package (which updates the outdated `transformers` package that is included in the `unilm` package). The reason we also install the `unilm` package is because we need its preprocessing files. I've forked it, and removed some statements which introduced some issues.<jupyter_code># ! rm -r unilm
# ! pip install unilm<jupyter_output><empty_output><jupyter_text>Getting the dataHere we download the data of the [FUNSD dataset](https://guillaumejaume.github.io/FUNSD/) from the web. This results in a directory called "data" being created, which has 2 subdirectories, one for training and one for testing. Each of those has 2 subdirectories in turn, one containing the images as png files and one containing the annotations in json format.<jupyter_code># ! wget https://guillaumejaume.github.io/FUNSD/dataset.zip
# ! unzip dataset.zip && mv dataset data && rm -rf dataset.zip __MACOSX<jupyter_output><empty_output><jupyter_text>Let's take a look at a training example. For this, we are going to use PIL (Python Image Library).<jupyter_code>from PIL import Image, ImageDraw, ImageFont
import os
base_path = "/home/sourab/temp/data/dataset"
image = Image.open(os.path.join(base_path, "training_data/images/0000971160.png"))
image = image.convert("RGB")
image<jupyter_output><empty_output><jupyter_text>Now let's plot its corresponding annotations. Basically, if you type `data['form']`, you get a list of all general annotations. Each general annotation has a label, a bounding box, and one or more words, which in also have their own bounding box. The bounding boxes are in [xleft, ytop, xright, ybottom] format.<jupyter_code>import json
with open(os.path.join(base_path, "training_data/annotations/0000971160.json")) as f:
data = json.load(f)
for annotation in data["form"]:
print(annotation)<jupyter_output><empty_output><jupyter_text>The PIL library has a handy ImageDraw module, which -you guessed it- allows to draw things (such as rectangles) on an image:<jupyter_code>draw = ImageDraw.Draw(image, "RGBA")
font = ImageFont.load_default()
label2color = {"question": "blue", "answer": "green", "header": "orange", "other": "violet"}
for annotation in data["form"]:
label = annotation["label"]
general_box = annotation["box"]
draw.rectangle(general_box, outline=label2color[label], width=2)
draw.text((general_box[0] + 10, general_box[1] - 10), label, fill=label2color[label], font=font)
words = annotation["words"]
for word in words:
box = word["box"]
draw.rectangle(box, outline=label2color[label], width=1)
image<jupyter_output><empty_output><jupyter_text>Preprocessing the dataNext, we need to turn the document images into individual tokens and corresponding labels (BIOES format, see further). We do this both for the training and test datasets. Make sure to run this from the `/content` directory:<jupyter_code># ! python unilm/layoutlm/examples/seq_labeling/preprocess.py --data_dir data/dataset/training_data/annotations \
# --data_split train \
# --output_dir data \
# --model_name_or_path microsoft/layoutlm-base-uncased \
# --max_len 510
# ! python unilm/layoutlm/examples/seq_labeling/preprocess.py --data_dir data/dataset/testing_data/annotations \
# --data_split test \
# --output_dir data \
# --model_name_or_path microsoft/layoutlm-base-uncased \
# --max_len 510<jupyter_output><empty_output><jupyter_text>Next, we create a labels.txt file that contains the unique labels of the FUNSD dataset:<jupyter_code># ! cat data/train.txt | cut -d$'\t' -f 2 | grep -v "^$"| sort | uniq > data/labels.txt<jupyter_output><empty_output><jupyter_text>Define a PyTorch datasetFirst, we create a list containing the unique labels based on `data/labels.txt` (run this from the content directory):<jupyter_code>from torch.nn import CrossEntropyLoss
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
labels = get_labels("data/labels.txt")
num_labels = len(labels)
label_map = {i: label for i, label in enumerate(labels)}
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index<jupyter_output><empty_output><jupyter_text>We can see that the dataset uses the so-called BIOES annotation scheme to annotate the tokens. This means that a given token can be either at the beginning (B), inside (I), outside (O), at the end (E) or start (S) of a given entity. Entities include ANSWER, QUESTION, HEADER and OTHER:<jupyter_code>print(labels)<jupyter_output>['B-ANSWER', 'B-HEADER', 'B-QUESTION', 'E-ANSWER', 'E-HEADER', 'E-QUESTION', 'I-ANSWER', 'I-HEADER', 'I-QUESTION', 'O', 'S-ANSWER', 'S-HEADER', 'S-QUESTION']<jupyter_text>Next, we can create a PyTorch dataset and corresponding dataloader (both for training and evaluation):<jupyter_code>import logging
import os
import torch
from torch.utils.data import Dataset
logger = logging.getLogger(__name__)
class FunsdDataset(Dataset):
def __init__(self, args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
if args.local_rank == 0 and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
self.features = features
# Convert to Tensors and build dataset
self.all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
self.all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
self.all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
self.all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
self.all_bboxes = torch.tensor([f.boxes for f in features], dtype=torch.long)
def __len__(self):
return len(self.features)
def __getitem__(self, index):
return (
self.all_input_ids[index],
self.all_input_mask[index],
self.all_segment_ids[index],
self.all_label_ids[index],
self.all_bboxes[index],
)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
input_ids,
input_mask,
segment_ids,
label_ids,
boxes,
actual_bboxes,
file_name,
page_size,
):
assert (
0 <= all(boxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(boxes)
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode))
image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f, open(box_file_path, encoding="utf-8") as fb, open(
image_file_path, encoding="utf-8"
) as fi:
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
for line, bline, iline in zip(f, fb, fi):
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(
InputExample(
guid="{}-{}".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
guid_index += 1
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
else:
splits = line.split("\t")
bsplits = bline.split("\t")
isplits = iline.split("\t")
assert len(splits) == 2
assert len(bsplits) == 2
assert len(isplits) == 4
assert splits[0] == bsplits[0]
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
box = bsplits[-1].replace("\n", "")
box = [int(b) for b in box.split()]
boxes.append(box)
actual_bbox = [int(b) for b in isplits[1].split()]
actual_bboxes.append(actual_bbox)
page_size = [int(i) for i in isplits[2].split()]
file_name = isplits[3].strip()
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(
InputExample(
guid="%s-%d".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return examples
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
"""Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for ex_index, example in enumerate(examples):
file_name = example.file_name
page_size = example.page_size
width, height = page_size
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
token_boxes = []
actual_bboxes = []
label_ids = []
for word, label, box, actual_bbox in zip(example.words, example.labels, example.boxes, example.actual_bboxes):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
actual_bboxes.extend([actual_bbox] * len(word_tokens))
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
token_boxes = token_boxes[: (max_seq_length - special_tokens_count)]
actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
token_boxes += [cls_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
token_boxes = [cls_token_box] + token_boxes
actual_bboxes = [[0, 0, width, height]] + actual_bboxes
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
token_boxes = ([pad_token_box] * padding_length) + token_boxes
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
token_boxes += [pad_token_box] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(token_boxes) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
logger.info("boxes: %s", " ".join([str(x) for x in token_boxes]))
logger.info("actual_bboxes: %s", " ".join([str(x) for x in actual_bboxes]))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
boxes=token_boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return features
from transformers import LayoutLMTokenizer
# from .unilm.layoutlm.data.funsd import FunsdDataset, InputFeatures
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
batch_size = 16
args = {
"local_rank": -1,
"overwrite_cache": True,
"data_dir": "/home/sourab/temp/data/",
"model_name_or_path": "microsoft/layoutlm-base-uncased",
"max_seq_length": 512,
"model_type": "layoutlm",
}
# class to turn the keys of a dict into attributes (thanks Stackoverflow)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
args = AttrDict(args)
tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
# the LayoutLM authors already defined a specific FunsdDataset, so we are going to use this here
train_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode="train")
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=batch_size)
eval_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode="test")
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=batch_size)
len(train_dataloader)
len(eval_dataloader)
batch = next(iter(train_dataloader))
input_ids = batch[0][0]
tokenizer.decode(input_ids)<jupyter_output><empty_output><jupyter_text>Define and fine-tune the modelAs this is a sequence labeling task, we are going to load `LayoutLMForTokenClassification` (the base sized model) from the hub. We are going to fine-tune it on a downstream task, namely FUNSD.<jupyter_code>from peft import get_peft_config, PeftModel, get_peft_model, LoraConfig, TaskType
peft_config = LoraConfig(
task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all"
)
peft_config
from transformers import LayoutLMForTokenClassification
import torch
from transformers import set_seed
seed = 100
set_seed(seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=num_labels)
model = get_peft_model(model, peft_config)
model.to(device)
print(model.model.layoutlm.encoder.layer[0].attention.self.query.weight)
print(model.model.layoutlm.encoder.layer[0].attention.self.query.lora_A.weight)
print(model.model.classifier.weight)<jupyter_output><empty_output><jupyter_text>Now we can start training:<jupyter_code>from transformers import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm
num_train_epochs = 100
optimizer = torch.optim.AdamW(model.parameters(), lr=3e-3)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0.06 * (len(train_dataloader) * num_train_epochs),
num_training_steps=(len(train_dataloader) * num_train_epochs),
)
global_step = 0
t_total = len(train_dataloader) * num_train_epochs # total number of training steps
# put the model in training mode
model.train()
for epoch in range(num_train_epochs):
for batch in tqdm(train_dataloader, desc="Training"):
input_ids = batch[0].to(device)
bbox = batch[4].to(device)
attention_mask = batch[1].to(device)
token_type_ids = batch[2].to(device)
labels = batch[3].to(device)
# forward pass
outputs = model(
input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels
)
loss = outputs.loss
# print loss every 100 steps
if global_step % 10 == 0:
print(f"Loss after {global_step} steps: {loss.item()}")
# backward pass to get the gradients
loss.backward()
# print("Gradients on classification head:")
# print(model.classifier.weight.grad[6,:].sum())
# update
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
global_step += 1
import numpy as np
from seqeval.metrics import (
classification_report,
f1_score,
precision_score,
recall_score,
)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
# put model in evaluation mode
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
with torch.no_grad():
input_ids = batch[0].to(device)
bbox = batch[4].to(device)
attention_mask = batch[1].to(device)
token_type_ids = batch[2].to(device)
labels = batch[3].to(device)
# forward pass
outputs = model(
input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels
)
# get the loss and logits
tmp_eval_loss = outputs.loss
logits = outputs.logits
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
# compute the predictions
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = labels.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)
# compute average evaluation loss
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
print(results)
model.print_trainable_parameters()
model.save_pretrained("peft_layoutlm")
!du -h "peft_layoutlm/adapter_model.bin"<jupyter_output>2,8M layoutlm_funsd.pt
|
peft/examples/token_classification/peft_lora_token_cls.ipynb/0
|
{
"file_path": "peft/examples/token_classification/peft_lora_token_cls.ipynb",
"repo_id": "peft",
"token_count": 11949
}
| 195
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
from contextlib import contextmanager
from typing import Any, Optional, Union
import torch
from accelerate.hooks import remove_hook_from_submodules
from torch import nn
from transformers.utils import PushToHubMixin
from peft.utils.constants import DUMMY_MODEL_CONFIG
from .config import PeftConfig
from .peft_model import PeftModel
from .tuners import (
AdaLoraModel,
IA3Model,
LoHaModel,
LoKrModel,
LoraModel,
MixedModel,
OFTModel,
)
from .tuners.mixed import COMPATIBLE_TUNER_TYPES
from .utils import PeftType, _set_adapter, _set_trainable
PEFT_TYPE_TO_MODEL_MAPPING = {
PeftType.LORA: LoraModel,
PeftType.LOHA: LoHaModel,
PeftType.LOKR: LoKrModel,
PeftType.ADALORA: AdaLoraModel,
PeftType.IA3: IA3Model,
PeftType.OFT: OFTModel,
}
def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None:
r"""
Prepares the model for gradient checkpointing if necessary
"""
# Note: same as PeftModel._prepare_model_for_gradient_checkpointing
if not getattr(model, "is_gradient_checkpointing", True):
return model
if not (
getattr(model, "is_loaded_in_8bit", False)
or getattr(model, "is_loaded_in_4bit", False)
or getattr(model, "is_quantized", False)
):
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
elif hasattr(model, "get_input_embeddings"):
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
def _check_config_compatible(peft_config: PeftConfig) -> None:
if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES:
raise ValueError(
f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. "
f"Compatible types are: {COMPATIBLE_TUNER_TYPES}"
)
class PeftMixedModel(PushToHubMixin, torch.nn.Module):
"""
PeftMixedModel for loading mixing different types of adapters for inference.
This class does not support loading/saving, and it shouldn't usually be initialized directly. Instead, use
`get_peft_model` with the argument `mixed=True`.
<Tip>
Read the [Mixed adapter types](https://huggingface.co/docs/peft/en/developer_guides/mixed_models) guide to learn
more about using different adapter types.
</Tip>
Example:
```py
>>> base_model = ... # load the base model, e.g. from transformers
>>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval()
>>> peft_model.load_adapter(path_to_adapter2, "adapter2")
>>> peft_model.set_adapter(["adapter1", "adapter2"]) # activate both adapters
>>> peft_model(data) # forward pass using both adapters
```
Args:
model (`torch.nn.Module`):
The model to be tuned.
config (`PeftConfig`):
The config of the model to be tuned. The adapter type must be compatible.
adapter_name (`str`, `optional`, defaults to `"default"`):
The name of the first adapter.
"""
def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
super().__init__()
_check_config_compatible(peft_config)
_prepare_model_for_gradient_checkpointing(model)
self.modules_to_save = None
self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name)
self.set_modules_to_save(peft_config, adapter_name)
self.config = getattr(model, "config", DUMMY_MODEL_CONFIG)
# the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid
# numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected
# behavior we disable that in this line.
if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"):
self.base_model.config.pretraining_tp = 1
@property
def peft_config(self) -> dict[str, PeftConfig]:
return self.base_model.peft_config
@property
def active_adapter(self) -> str:
return self.base_model.active_adapter
@property
def active_adapters(self) -> list[str]:
return self.base_model.active_adapters
def get_nb_trainable_parameters(self):
r"""
Returns the number of trainable parameters and number of all parameters in the model.
"""
# note: same as PeftModel.get_nb_trainable_parameters
trainable_params = 0
all_param = 0
for _, param in self.named_parameters():
num_params = param.numel()
# if using DS Zero 3 and the weights are initialized empty
if num_params == 0 and hasattr(param, "ds_numel"):
num_params = param.ds_numel
# Due to the design of 4bit linear layers from bitsandbytes
# one needs to multiply the number of parameters by 2 to get
# the correct number of parameters
if param.__class__.__name__ == "Params4bit":
num_params = num_params * 2
all_param += num_params
if param.requires_grad:
trainable_params += num_params
return trainable_params, all_param
def print_trainable_parameters(self):
"""
Prints the number of trainable parameters in the model.
Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from
num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns
(trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model.
For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for
prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number
of trainable parameters of the backbone transformer model which can be different.
"""
# note: same as PeftModel.print_trainable_parameters
trainable_params, all_param = self.get_nb_trainable_parameters()
print(
f"trainable params: {trainable_params:,d} || "
f"all params: {all_param:,d} || "
f"trainable%: {100 * trainable_params / all_param:.4f}"
)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "base_model": # see #1892: prevent infinite recursion if class is not initialized
raise
return getattr(self.base_model, name)
def forward(self, *args: Any, **kwargs: Any):
"""
Forward pass of the model.
"""
return self.base_model(*args, **kwargs)
def generate(self, *args: Any, **kwargs: Any):
"""
Generate output.
"""
return self.base_model.generate(*args, **kwargs)
@contextmanager
def disable_adapter(self):
"""
Disables the adapter module.
"""
try:
self.base_model.disable_adapter_layers()
yield
finally:
self.base_model.enable_adapter_layers()
def add_adapter(self, adapter_name: str, peft_config: PeftConfig):
_check_config_compatible(peft_config)
try:
self.peft_config[adapter_name] = peft_config
self.base_model.inject_adapter(self, adapter_name)
except Exception: # something went wrong, roll back
if adapter_name in self.peft_config:
del self.peft_config[adapter_name]
raise
self.set_modules_to_save(peft_config, adapter_name)
def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None:
if (modules_to_save := getattr(peft_config, "modules_to_save", None)) is None:
return
if self.modules_to_save is None:
self.modules_to_save = set(modules_to_save)
else:
self.modules_to_save.update(modules_to_save)
_set_trainable(self, adapter_name)
def set_adapter(self, adapter_name: Union[str, list[str]]) -> None:
"""
Sets the active adapter(s) for the model.
Note that the order in which the adapters are applied during the forward pass may not be the same as the order
in which they are passed to this function. Instead, the order during the forward pass is determined by the
order in which the adapters were loaded into the model. The active adapters only determine which adapters are
active during the forward pass, but not the order in which they are applied.
Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
not desired, use the following code.
```py
>>> for name, param in model_peft.named_parameters():
... if ...: # some check on name (ex. if 'lora' in name)
... param.requires_grad = False
```
Args:
adapter_name (`str` or `List[str]`):
The name of the adapter(s) to be activated.
"""
if isinstance(adapter_name, str):
adapter_name = [adapter_name]
mismatched = set(adapter_name) - set(self.peft_config.keys())
if mismatched:
raise ValueError(
f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
)
self.base_model.set_adapter(adapter_name)
_set_adapter(self, adapter_name)
def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None:
if isinstance(adapter_name, str):
adapter_name = [adapter_name]
mismatched = set(adapter_name) - set(self.peft_config.keys())
if mismatched:
raise ValueError(
f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
)
self.base_model.delete_adapter(adapter_name)
def merge_and_unload(self, *args: Any, **kwargs: Any):
r"""
This method merges the adapter layers into the base model. This is needed if someone wants to use the base
model as a standalone model.
Args:
progressbar (`bool`):
whether to show a progressbar indicating the unload and merge process
safe_merge (`bool`):
whether to activate the safe merging check to check if there is any potential Nan in the adapter
weights
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
return self.base_model.merge_and_unload(*args, **kwargs)
def unload(self, *args: Any, **kwargs: Any):
"""
Gets back the base model by removing all the adapter modules without merging. This gives back the original base
model.
"""
return self.base_model.unload(*args, **kwargs)
def get_layer_status(self):
raise TypeError(f"get_layer_status is not supported for {self.__class__.__name__}.")
def get_model_status(self):
raise TypeError(f"get_model_status is not supported for {self.__class__.__name__}.")
@classmethod
def _split_kwargs(cls, kwargs: dict[str, Any]):
return PeftModel._split_kwargs(kwargs)
def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any):
output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs)
# TODO: not quite clear why this is necessary but tests fail without it
self.set_adapter(self.active_adapters)
return output
def create_or_update_model_card(self, output_dir: str):
raise NotImplementedError(f"Model card creation is not supported for {self.__class__.__name__} (yet).")
def save_pretrained(
self,
save_directory: str,
safe_serialization: bool = False,
selected_adapters: Optional[list[str]] = None,
**kwargs: Any,
):
raise NotImplementedError(f"Saving is not supported for {self.__class__.__name__} (yet).")
@classmethod
def from_pretrained(
cls,
model: nn.Module,
model_id: str | os.PathLike,
adapter_name: str = "default",
is_trainable: bool = False,
config: Optional[PeftConfig] = None,
**kwargs: Any,
):
r"""
Instantiate a PEFT mixed model from a pretrained model and loaded PEFT weights.
Note that the passed `model` may be modified inplace.
Args:
model (`nn.Module`):
The model to be adapted.
model_id (`str` or `os.PathLike`):
The name of the PEFT configuration to use. Can be either:
- A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face
Hub.
- A path to a directory containing a PEFT configuration file saved using the `save_pretrained`
method (`./my_peft_config_directory/`).
adapter_name (`str`, *optional*, defaults to `"default"`):
The name of the adapter to be loaded. This is useful for loading multiple adapters.
is_trainable (`bool`, *optional*, defaults to `False`):
Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for
inference
config ([`~peft.PeftConfig`], *optional*):
The configuration object to use instead of an automatically loaded configuration. This configuration
object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already
loaded before calling `from_pretrained`.
kwargs: (`optional`):
Additional keyword arguments passed along to the specific PEFT configuration class.
"""
# note: adapted from PeftModel.from_pretrained
from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
# load the config
if config is None:
config = PEFT_TYPE_TO_CONFIG_MAPPING[
PeftConfig._get_peft_type(
model_id,
subfolder=kwargs.get("subfolder", None),
revision=kwargs.get("revision", None),
cache_dir=kwargs.get("cache_dir", None),
use_auth_token=kwargs.get("use_auth_token", None),
)
].from_pretrained(model_id, **kwargs)
elif isinstance(config, PeftConfig):
config.inference_mode = not is_trainable
else:
raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}")
# note: this is different from PeftModel.from_pretrained
if config.peft_type not in PEFT_TYPE_TO_MODEL_MAPPING:
raise ValueError(f"Adapter of type {config.peft_type} is not supported for mixed models.")
if (getattr(model, "hf_device_map", None) is not None) and len(
set(model.hf_device_map.values()).intersection({"cpu", "disk"})
) > 0:
remove_hook_from_submodules(model)
if config.is_prompt_learning and is_trainable:
# note: should not be possible to reach, but just in case
raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.")
else:
config.inference_mode = not is_trainable
# note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel
model = cls(model, config, adapter_name)
model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs)
return model
|
peft/src/peft/mixed_model.py/0
|
{
"file_path": "peft/src/peft/mixed_model.py",
"repo_id": "peft",
"token_count": 6949
}
| 196
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
import torch.nn as nn
from peft.utils import _freeze_adapter, _get_submodules
from .config import AdaptionPromptConfig, prepare_config
from .layer import AdaptedAttention
from .utils import is_adaption_prompt_trainable
class AdaptionPromptModel(nn.Module):
"""
Implements adaption prompts as described in https://arxiv.org/pdf/2303.16199.pdf.
The top L attention modules are replaced with AdaptedAttention modules that wrap the original ones, but insert
trainable prompts with gates (for zero init).
Notes on the multi-adapter pattern:
- We store the states of different adapters by keeping a dictionary of AdaptedAttention modules indexed by adapter
name.
- Every time we switch adapters, we remove the modules of the currently active adapter from the model, store them
in the dictionary, and replace them with the modules of the new adapter.
- To avoid duplicated and potentially inconsistent state, the currently active adapter is always removed from the
dictionary.
- Disabling the adapter would also result in the modules being removed from the model.
"""
def __init__(self, model, configs: Dict, adapter_name: str):
super().__init__()
self.model = model
# Store adapter configs by name.
self.peft_config: Dict[str, AdaptionPromptConfig] = {}
# Store lists of the parents of the affected attention modules by adapter name.
# We keep references to the parents so we can swap the adapters in-and-out of the model.
self._parents: Dict[str, List[nn.Module]] = {}
# Store lists of cached AdaptedAttention modules by name.
self._cached_adapters: Dict[str, List] = {}
# The name of the currently active adapter.
self._active_adapter = None
# Whether the adapter is enabled.
self._enabled = True
self.forward = self.model.forward
self.add_adapter(adapter_name, configs[adapter_name])
self._mark_only_adaption_prompts_as_trainable(self.model)
def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None:
"""Add an adapter with the given name and config."""
config = prepare_config(config, self.model)
if adapter_name in self.peft_config:
raise ValueError(f"Adapter with name '{adapter_name}' already exists.")
parents = []
for name, _ in self.model.named_modules():
if name.endswith(config.target_modules):
par, _, _ = _get_submodules(self.model, name)
parents.append(par)
if len(parents) < config.adapter_layers:
raise ValueError(
f"Config specifies more adapter layers '{config.adapter_layers}'"
f" than the model has '{len(parents)}'."
)
# Note that if the target modules are not in Sequential, ModuleList, or
# some other PyTorch ordered container, the behavior is undefined as we
# assume here that the order of the modules is the same as the order of
# the transformer decoder layers.
parents = parents[-config.adapter_layers :]
self._parents[adapter_name] = parents
# It is only None during initialization.
# If it is disabled, we don't have to remove the modules.
if self._active_adapter is not None and self._enabled:
self._remove_adapted_attentions(self._active_adapter)
self._active_adapter = adapter_name
self.peft_config[adapter_name] = config
self._create_adapted_attentions(config, parents)
if not self._enabled:
self._remove_adapted_attentions(self._active_adapter)
if config.inference_mode:
_freeze_adapter(self.model, adapter_name)
def set_adapter(self, adapter_name: str) -> None:
"""Set the model to use the adapter with the given name."""
if self._active_adapter == adapter_name:
return
if adapter_name not in self.peft_config:
raise ValueError(f"Adapter with name '{adapter_name}' does not exist.")
if self._enabled:
self._remove_adapted_attentions(self._active_adapter)
self._set_adapted_attentions(adapter_name)
self._active_adapter = adapter_name
def enable_adapter_layers(self):
"""Enable adapter layers by swapping in cached AdaptedAttention modules."""
self._enabled = True
self._set_adapted_attentions(self._active_adapter)
def disable_adapter_layers(self):
"""Disable adapter layers by swapping out AdaptedAttention modules."""
self._enabled = False
self._remove_adapted_attentions(self._active_adapter)
def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None:
"""Wrap LlamaAttention modules with newly created AdaptedAttention modules."""
for par in parents:
attn = AdaptedAttention(
model_type=self.model.config.model_type,
adapter_len=config.adapter_len,
model=getattr(par, config.target_modules),
)
setattr(par, config.target_modules, attn)
def _set_adapted_attentions(self, adapter_name: str) -> None:
"""Replace LlamaAttention modules with cached AdaptedAttention modules."""
cached = self._cached_adapters[adapter_name]
del self._cached_adapters[adapter_name]
config = self.peft_config[adapter_name]
for i, par in enumerate(self._parents[adapter_name]):
setattr(par, config.target_modules, cached[i])
def _remove_adapted_attentions(self, adapter_name: str) -> None:
"""Remove AdaptedAttention modules from the model and store them in the cache."""
config = self.peft_config[adapter_name]
adapted_attentions = []
for par in self._parents[adapter_name]:
attn = getattr(par, config.target_modules)
adapted_attentions.append(attn)
setattr(par, config.target_modules, attn.model)
self._cached_adapters[adapter_name] = adapted_attentions
def _mark_only_adaption_prompts_as_trainable(self, model: nn.Module) -> None:
"""Freeze all parameters of the model except the adaption prompts."""
for n, p in model.named_parameters():
if not is_adaption_prompt_trainable(n):
p.requires_grad = False
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
# This is necessary as e.g. causal models have various methods that we
# don't want to re-implement here.
if name == "model": # see #1892: prevent infinite recursion if class is not initialized
raise
return getattr(self.model, name)
|
peft/src/peft/tuners/adaption_prompt/model.py/0
|
{
"file_path": "peft/src/peft/tuners/adaption_prompt/model.py",
"repo_id": "peft",
"token_count": 2864
}
| 197
|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import asdict
from enum import Enum
from typing import List, Optional
import torch
from torch import nn
from tqdm import tqdm
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import (
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
ModulesToSaveWrapper,
_get_submodules,
)
from .config import HRAConfig
from .layer import HRAConv2d, HRALayer, HRALinear
class HRAModel(BaseTuner):
"""
Creates Householder reflection adaptation (HRA) model from a pretrained model. The method is described in
https://arxiv.org/abs/2405.17484
Args:
model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached.
config ([`HRAConfig`]): The configuration of the HRA model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
Returns:
`torch.nn.Module`: The HRA model.
Example:
```py
>>> from diffusers import StableDiffusionPipeline
>>> from peft import HRAModel, HRAConfig
>>> config_te = HRAConfig(
... r=8,
... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
... init_weights=True,
... )
>>> config_unet = HRAConfig(
... r=8,
... target_modules=[
... "proj_in",
... "proj_out",
... "to_k",
... "to_q",
... "to_v",
... "to_out.0",
... "ff.net.0.proj",
... "ff.net.2",
... ],
... init_weights=True,
... )
>>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
>>> model.text_encoder = HRAModel(model.text_encoder, config_te, "default")
>>> model.unet = HRAModel(model.unet, config_unet, "default")
```
**Attributes**:
- **model** ([`~torch.nn.Module`]) -- The model to be adapted.
- **peft_config** ([`HRAConfig`]): The configuration of the HRA model.
"""
prefix: str = "hra_"
def _check_new_adapter_config(self, config: HRAConfig) -> None:
"""
A helper method to check the config when a new adapter is being added.
Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
"""
# TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check
# does not fully correspond to the error message.
if (len(self.peft_config) > 1) and (config.bias != "none"):
raise ValueError(
f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, "
"set bias to 'none' for all adapters."
)
@staticmethod
def _check_target_module_exists(hra_config, key):
return check_target_module_exists(hra_config, key)
def _create_and_replace(
self,
hra_config,
adapter_name,
target,
target_name,
parent,
current_key,
**optional_kwargs,
):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
bias = hasattr(target, "bias") and target.bias is not None
kwargs = {
"r": hra_config.r,
"apply_GS": hra_config.apply_GS,
"init_weights": hra_config.init_weights,
}
kwargs["bias"] = bias
# If it is not a HRALayer, create a new module, else update it with new adapters
if not isinstance(target, HRALayer):
new_module = self._create_new_module(hra_config, adapter_name, target, **kwargs)
if adapter_name not in self.active_adapters:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
else:
target.update_layer(
adapter_name,
r=hra_config.r,
apply_GS=hra_config.apply_GS,
init_weights=hra_config.init_weights,
)
def _replace_module(self, parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
# It's not necessary to set requires_grad here, as that is handled by
# _mark_only_adapters_as_trainable
# child layer wraps the original module, unpack it
if hasattr(child, "base_layer"):
child = child.base_layer
if not hasattr(new_module, "base_layer"):
new_module.weight = child.weight
if hasattr(child, "bias"):
new_module.bias = child.bias
if getattr(child, "state", None) is not None:
if hasattr(new_module, "base_layer"):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
# dispatch to correct device
for name, module in new_module.named_modules():
if self.prefix in name:
module.to(child.weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for n, p in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = self.peft_config[active_adapter].bias
if bias == "none":
continue
if bias == "all":
for n, p in model.named_parameters():
if "bias" in n:
p.requires_grad = True
elif bias == "hra_only":
for name, m in model.named_modules():
if isinstance(m, HRALayer) and hasattr(m, "bias") and m.bias is not None:
m.bias.requires_grad = True
else:
raise NotImplementedError(f"Requested bias: {bias}, is not implemented.")
@staticmethod
def _create_new_module(hra_config, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
new_module = HRALinear(target, adapter_name, **kwargs)
elif isinstance(target_base_layer, torch.nn.Conv2d):
new_module = HRAConv2d(target, adapter_name, **kwargs)
else:
raise ValueError(
f"Target module {target} is not supported. "
"Currently, only `torch.nn.Linear` and `torch.nn.Conv2d` are supported."
)
return new_module
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "base_model":
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool = False):
config_dict = {}
for key, value in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}
if inference:
config["inference_mode"] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self):
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self):
for active_adapter in self.active_adapters:
val = self.peft_config[active_adapter].bias
if val != "none":
msg = (
f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same "
"output as the the base model would without adaption."
)
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name):
for module in self.model.modules():
if isinstance(module, HRALayer):
if module.merged:
warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
raise ValueError("Please specify `target_modules` in `peft_config`")
peft_config.target_modules = set(
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]]
)
return peft_config
def _unload_and_optionally_merge(
self,
merge=True,
progressbar: bool = False,
safe_merge: bool = False,
adapter_names: Optional[List[str]] = None,
):
self._unloading_checks(adapter_names)
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
desc = "Unloading " + ("and merging " if merge else "") + "model"
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
parent, target, target_name = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, "base_layer"):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
# save any additional trainable modules part of `modules_to_save`
setattr(parent, target_name, target.modules_to_save[target.active_adapter])
return self.model
def delete_adapter(self, adapter_name: str) -> None:
"""
Deletes an existing adapter.
Args:
adapter_name (str): Name of the adapter to be deleted.
"""
if adapter_name not in list(self.peft_config.keys()):
raise ValueError(f"Adapter {adapter_name} does not exist")
del self.peft_config[adapter_name]
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
new_adapter = None
for key in key_list:
_, target, _ = _get_submodules(self.model, key)
if isinstance(target, HRALayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapters[:]
self.active_adapter = new_adapter or []
def merge_and_unload(
self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[List[str]] = None
) -> torch.nn.Module:
r"""
This method merges the HRA layers into the base model. This is needed if someone wants to use the base model as
a standalone model.
Args:
progressbar (`bool`):
whether to show a progressbar indicating the unload and merge process
safe_merge (`bool`):
whether to activate the safe merging check to check if there is any potential Nan in the adapter
weights
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
return self._unload_and_optionally_merge(
progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names
)
def unload(self) -> torch.nn.Module:
"""
Gets back the base model by removing all the hra modules without merging. This gives back the original base
model.
"""
return self._unload_and_optionally_merge(merge=False)
|
peft/src/peft/tuners/hra/model.py/0
|
{
"file_path": "peft/src/peft/tuners/hra/model.py",
"repo_id": "peft",
"token_count": 5924
}
| 198
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Any, Optional, Set, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners.lycoris_utils import LycorisLayer
class LoKrLayer(nn.Module, LycorisLayer):
# All names of layers that may contain adapter weights
adapter_layer_names = (
"lokr_w1",
"lokr_w1_a",
"lokr_w1_b",
"lokr_w2",
"lokr_w2_a",
"lokr_w2_b",
"lokr_t2",
)
# other_param_names is defined on parent class
def __init__(self, base_layer: nn.Module) -> None:
super().__init__()
LycorisLayer.__init__(self, base_layer)
# LoKr info
self.lokr_w1 = nn.ParameterDict({})
self.lokr_w1_a = nn.ParameterDict({})
self.lokr_w1_b = nn.ParameterDict({})
self.lokr_w2 = nn.ParameterDict({})
self.lokr_w2_a = nn.ParameterDict({})
self.lokr_w2_b = nn.ParameterDict({})
self.lokr_t2 = nn.ParameterDict({})
@property
def _available_adapters(self) -> Set[str]:
return {
*self.lokr_w1,
*self.lokr_w1_a,
*self.lokr_w1_b,
*self.lokr_w2,
*self.lokr_w2_a,
*self.lokr_w2_b,
*self.lokr_t2,
}
def create_adapter_parameters(
self,
adapter_name: str,
r: int,
shape,
use_w1: bool,
use_w2: bool,
use_effective_conv2d: bool,
):
if use_w1:
self.lokr_w1[adapter_name] = nn.Parameter(torch.empty(shape[0][0], shape[1][0]))
else:
self.lokr_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0][0], r))
self.lokr_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][0]))
if len(shape) == 4:
# Conv2d
if use_w2:
self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1], *shape[2:]))
elif use_effective_conv2d:
self.lokr_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0][1])) # b, 1-mode
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) # d, 2-mode
else:
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1] * shape[2] * shape[3]))
else:
# Linear
if use_w2:
self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1]))
else:
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1]))
def reset_adapter_parameters(self, adapter_name: str):
if adapter_name in self.lokr_w1:
nn.init.zeros_(self.lokr_w1[adapter_name])
else:
nn.init.zeros_(self.lokr_w1_a[adapter_name])
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_w2:
nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_t2:
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
def reset_adapter_parameters_random(self, adapter_name: str):
if adapter_name in self.lokr_w1:
nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_w2:
nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_t2:
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
def update_layer(
self,
adapter_name: str,
r: int,
alpha: float,
rank_dropout: float,
module_dropout: float,
init_weights: bool,
use_effective_conv2d: bool,
decompose_both: bool,
decompose_factor: int,
**kwargs,
) -> None:
"""Internal function to create lokr adapter
Args:
adapter_name (`str`): Name for the adapter to add.
r (`int`): Rank for the added adapter.
alpha (`float`): Alpha for the added adapter.
rank_dropout (`float`): The dropout probability for rank dimension during training
module_dropout (`float`): The dropout probability for disabling adapter during training.
init_weights (`bool`): Whether to initialize adapter weights.
use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1.
decompose_both (`bool`): Perform rank decomposition of left kronecker product matrix.
decompose_factor (`int`): Kronecker product decomposition factor.
"""
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
self.alpha[adapter_name] = alpha
self.scaling[adapter_name] = alpha / r
self.rank_dropout[adapter_name] = rank_dropout
self.module_dropout[adapter_name] = module_dropout
base_layer = self.get_base_layer()
# Determine shape of LoKr weights
if isinstance(base_layer, nn.Linear):
in_dim, out_dim = base_layer.in_features, base_layer.out_features
in_m, in_n = factorization(in_dim, decompose_factor)
out_l, out_k = factorization(out_dim, decompose_factor)
shape = ((out_l, out_k), (in_m, in_n)) # ((a, b), (c, d)), out_dim = a*c, in_dim = b*d
use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
use_w2 = not (r < max(shape[0][1], shape[1][1]) / 2)
use_effective_conv2d = False
elif isinstance(base_layer, nn.Conv2d):
in_dim, out_dim = base_layer.in_channels, base_layer.out_channels
k_size = base_layer.kernel_size
in_m, in_n = factorization(in_dim, decompose_factor)
out_l, out_k = factorization(out_dim, decompose_factor)
shape = ((out_l, out_k), (in_m, in_n), *k_size) # ((a, b), (c, d), *k_size)
use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
use_w2 = r >= max(shape[0][1], shape[1][1]) / 2
use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
else:
raise TypeError(f"LoKr is not implemented for base layers of type {type(base_layer).__name__}")
# Create weights with provided shape
self.create_adapter_parameters(adapter_name, r, shape, use_w1, use_w2, use_effective_conv2d)
# Initialize weights
if init_weights:
self.reset_adapter_parameters(adapter_name)
else:
self.reset_adapter_parameters_random(adapter_name)
# Move new weights to device
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
# https://github.com/KohakuBlueleaf/LyCORIS/blob/e4259b870d3354a9615a96be61cb5d07455c58ea/lycoris/modules/lokr.py#L224
if adapter_name in self.lokr_w1:
w1 = self.lokr_w1[adapter_name]
else:
w1 = self.lokr_w1_a[adapter_name] @ self.lokr_w1_b[adapter_name]
if adapter_name in self.lokr_w2:
w2 = self.lokr_w2[adapter_name]
elif adapter_name in self.lokr_t2:
w2 = make_weight_cp(self.lokr_t2[adapter_name], self.lokr_w2_a[adapter_name], self.lokr_w2_b[adapter_name])
else:
w2 = self.lokr_w2_a[adapter_name] @ self.lokr_w2_b[adapter_name]
# Make weights with Kronecker product
weight = make_kron(w1, w2)
weight = weight.reshape(self.get_base_layer().weight.shape)
# Perform rank dropout during training - drop rows of addition weights
rank_dropout = self.rank_dropout[adapter_name]
if self.training and rank_dropout:
drop = (torch.rand(weight.size(0)) > rank_dropout).float()
drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
drop /= drop.mean()
weight *= drop
return weight
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
# Execute all the adapters
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
module_dropout = self.module_dropout[active_adapter]
# Modify current execution weights
if (not self.training) or (self.training and torch.rand(1) > module_dropout):
result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
result = result.to(previous_dtype)
return result
class Linear(LoKrLayer):
"""LoKr implemented in Linear layer"""
def __init__(
self,
base_layer: nn.Module,
device: Optional[Union[str, torch.device]] = None,
dtype: Optional[torch.dtype] = None,
adapter_name: str = "default",
r: int = 0,
alpha: float = 0.0,
rank_dropout: float = 0.0,
module_dropout: float = 0.0,
init_weights: bool = True,
**kwargs,
):
super().__init__(base_layer)
# Create adapter and set it active
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
def _get_delta_activations(
self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
# don't add bias here, because the bias is already included in the output of the base_layer
return F.linear(input, delta_weight)
def __repr__(self) -> str:
rep = super().__repr__()
return "lokr." + rep
class Conv2d(LoKrLayer):
"""LoKr implemented in Conv2d layer"""
def __init__(
self,
base_layer: nn.Module,
device: Optional[Union[str, torch.device]] = None,
dtype: Optional[torch.dtype] = None,
adapter_name: str = "default",
r: int = 0,
alpha: float = 0.0,
rank_dropout: float = 0.0,
module_dropout: float = 0.0,
use_effective_conv2d: bool = False,
init_weights: bool = True,
**kwargs,
):
super().__init__(base_layer)
# Create adapter and set it active
self._active_adapter = adapter_name
self.update_layer(
adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs
)
def _get_delta_activations(
self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
# don't add bias here, because the bias is already included in the output of the base_layer
base_layer = self.get_base_layer()
return F.conv2d(
input,
delta_weight,
stride=base_layer.stride,
padding=base_layer.padding,
dilation=base_layer.dilation,
groups=base_layer.groups,
)
def __repr__(self) -> str:
rep = super().__repr__()
return "lokr." + rep
# Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py#L11
def factorization(dimension: int, factor: int = -1) -> Tuple[int, int]:
"""Factorizes the provided number into the product of two numbers
Args:
dimension (`int`): The number that needs to be factorized.
factor (`int`, optional):
Factorization divider. The algorithm will try to output two numbers, one of each will be as close to the
factor as possible. If -1 is provided, the decomposition algorithm would try to search dividers near the
square root of the dimension. Defaults to -1.
Returns:
Tuple[`int`, `int`]: A tuple of two numbers, whose product is equal to the provided number. The first number is
always less than or equal to the second.
Example:
```py
>>> factorization(256, factor=-1)
(16, 16)
>>> factorization(128, factor=-1)
(8, 16)
>>> factorization(127, factor=-1)
(1, 127)
>>> factorization(128, factor=4)
(4, 32)
```
"""
if factor > 0 and (dimension % factor) == 0:
m = factor
n = dimension // factor
return m, n
if factor == -1:
factor = dimension
m, n = 1, dimension
length = m + n
while m < n:
new_m = m + 1
while dimension % new_m != 0:
new_m += 1
new_n = dimension // new_m
if new_m + new_n > length or new_m > factor:
break
else:
m, n = new_m, new_n
if m > n:
n, m = m, n
return m, n
def make_weight_cp(t, wa, wb):
rebuild2 = torch.einsum("i j k l, i p, j r -> p r k l", t, wa, wb) # [c, d, k1, k2]
return rebuild2
def make_kron(w1, w2, scale=1.0):
if len(w2.shape) == 4:
w1 = w1.unsqueeze(2).unsqueeze(2)
w2 = w2.contiguous()
rebuild = torch.kron(w1, w2)
return rebuild * scale
|
peft/src/peft/tuners/lokr/layer.py/0
|
{
"file_path": "peft/src/peft/tuners/lokr/layer.py",
"repo_id": "peft",
"token_count": 7398
}
| 199
|
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from typing import Any, Optional, Union
from torch import nn
from tqdm import tqdm
from peft.tuners import adalora, loha, lokr, lora, oft
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import (
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
ModulesToSaveWrapper,
PeftType,
_get_submodules,
get_auto_gptq_quant_linear,
)
# Collection of constants used for all tuners
COMPATIBLE_TUNER_TYPES = (PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.OFT)
PREFIXES = [lora.LoraModel.prefix, lokr.LoKrModel.prefix, loha.LoHaModel.prefix, oft.OFTModel.prefix]
Configs = Union[lora.LoraConfig, loha.LoHaConfig, lokr.LoKrConfig, adalora.AdaLoraConfig, oft.OFTConfig]
Layers = (lora.layer.LoraLayer, loha.layer.LoHaLayer, lokr.layer.LoKrLayer, adalora.layer.AdaLoraLayer, oft.OFTLayer)
class MixedModel(BaseTuner):
"""
A class that allows to mix different types of adapters in a single model.
Note: This class should usually not be initialized directly. Instead, use `get_peft_model` with the argument
`mixed=True`.
Args:
model (:obj:`nn.Module`):
The model to be tuned.
config (:obj:`PeftConfig`):
The config of the model to be tuned. The adapter type must be compatible.
adapter_name (:obj:`str`):
The name of the first adapter.
"""
def __init__(self, model: nn.Module, config: Configs, adapter_name: str) -> None:
super().__init__(model, config, adapter_name)
def _check_new_adapter_config(self, config: Configs) -> None:
"""
A helper method to check the config when a new adapter is being added.
Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
"""
if not isinstance(config, Configs.__args__):
raise ValueError(
f"{self.__class__.__name__} only supports {COMPATIBLE_TUNER_TYPES} configs, but got {type(config)}."
)
biases = (getattr(config, "bias", None) for config in self.peft_config)
biases = [bias for bias in biases if bias not in (None, "none")]
if len(biases) > 1:
raise ValueError(
f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, "
"set bias to 'none' for all adapters."
)
@staticmethod
def _check_target_module_exists(config: Configs, key: str):
return check_target_module_exists(config, key)
def _create_and_replace(
self,
config: Configs,
*args: Any,
**kwargs: Any,
) -> None:
if isinstance(config, adalora.AdaLoraConfig):
adalora.AdaLoraModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, lora.LoraConfig):
lora.LoraModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, loha.LoHaConfig):
loha.LoHaModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, lokr.LoKrConfig):
lokr.LoKrModel._create_and_replace(self, config, *args, **kwargs)
elif isinstance(config, oft.OFTConfig):
oft.OFTModel._create_and_replace(self, config, *args, **kwargs)
else:
raise ValueError(f"Unsupported config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.")
def _replace_module(self, parent, child_name, new_module, child) -> None:
setattr(parent, child_name, new_module)
# It's not necessary to set requires_grad here, as that is handled by
# _mark_only_adapters_as_trainable
# child layer wraps the original module, unpack it
if hasattr(child, "base_layer"):
child = child.get_base_layer()
elif hasattr(child, "quant_linear_module"):
# TODO maybe not necessary to have special treatment?
child = child.quant_linear_module
if not hasattr(new_module, "base_layer"):
new_module.weight = child.weight
if hasattr(child, "bias"):
new_module.bias = child.bias
if getattr(child, "state", None) is not None:
if hasattr(new_module, "base_layer"):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
# dispatch to correct device
for name, module in new_module.named_modules():
if any(prefix in name for prefix in PREFIXES):
module.to(child.weight.device)
if "ranknum" in name:
module.to(child.weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for n, p in model.named_parameters():
if not any(prefix in n for prefix in PREFIXES):
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = getattr(self.peft_config[active_adapter], "bias", "none")
if bias == "none":
continue
if bias == "all":
for n, p in model.named_parameters():
if "bias" in n:
p.requires_grad = True
elif bias == "lora_only":
# TODO: check if this is needed for other supported types
for m in model.modules():
if isinstance(m, Layers) and hasattr(m, "bias") and m.bias is not None:
m.bias.requires_grad = True
else:
raise ValueError(f"Requested bias: {bias}, is not implemented.")
@staticmethod
def _create_new_module(config, adapter_name, target, **kwargs):
gptq_quantization_config = kwargs.get("gptq_quantization_config", None)
AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config)
if (gptq_quantization_config is not None) or (AutoGPTQQuantLinear is not None):
raise ValueError(f"GPTQ quantization not supported for {config.peft_type.value} (yet).")
loaded_in_8bit = kwargs.pop("loaded_in_8bit", False)
loaded_in_4bit = kwargs.pop("loaded_in_4bit", False)
if loaded_in_8bit or loaded_in_4bit:
raise ValueError(f"8bit and 4bit quantization not supported for {config.peft_type.value} (yet).")
if isinstance(config, adalora.AdaLoraConfig):
new_module = adalora.AdaLoraModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, lora.LoraConfig):
new_module = lora.LoraModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, loha.LoHaConfig):
new_module = loha.LoHaModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, lokr.LoKrConfig):
new_module = lokr.LoKrModel._create_new_module(config, adapter_name, target, **kwargs)
elif isinstance(config, oft.OFTConfig):
new_module = oft.OFTModel._create_new_module(config, adapter_name, target, **kwargs)
else:
raise ValueError(f"Unknown config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.")
return new_module
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "model": # see #1892: prevent infinite recursion if class is not initialized
raise
return getattr(self.model, name)
def _set_adapter_layers(self, enabled=True):
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self):
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self):
for active_adapter in self.active_adapters:
val = getattr(self.peft_config[active_adapter], "bias", "none")
if val != "none":
msg = (
f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same "
"output as the the base model would without adaption."
)
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name: Union[str, list[str]]) -> None:
for module in self.model.modules():
if isinstance(module, Layers):
if module.merged:
warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:
raise ValueError("Please specify `target_modules` in `peft_config`")
peft_config.target_modules = set(
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]]
)
return peft_config
def _unload_and_optionally_merge(
self,
merge=True,
progressbar: bool = False,
safe_merge: bool = False,
adapter_names: Optional[list[str]] = None,
):
if merge:
if getattr(self.model, "quantization_method", None) == "gptq":
raise ValueError("Cannot merge layers when the model is gptq quantized")
def merge_recursively(module):
# helper function to recursively merge the base_layer of the target
path = []
layer = module
while hasattr(layer, "base_layer"):
path.append(layer)
layer = layer.base_layer
for layer_before, layer_after in zip(path[:-1], path[1:]):
layer_after.merge(safe_merge=safe_merge, adapter_names=adapter_names)
layer_before.base_layer = layer_after.base_layer
module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)]
desc = "Unloading " + ("and merging " if merge else "") + "model"
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
parent, target, target_name = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, "base_layer"):
if merge:
merge_recursively(target)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
# save any additional trainable modules part of `modules_to_save`
new_module = target.modules_to_save[target.active_adapter]
if hasattr(new_module, "base_layer"):
# check if the module is itself a tuner layer
if merge:
new_module.merge(safe_merge=safe_merge, adapter_names=adapter_names)
new_module = new_module.get_base_layer()
setattr(parent, target_name, new_module)
return self.model
def add_weighted_adapter(self, *args: Any, **kwargs: Any) -> None:
raise NotImplementedError(f"Weighted adapters are not supported for {self.__class__.__name__} (yet).")
def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None:
"""
Deletes an existing adapter.
Args:
adapter_name (Union[str, list[str]]): Name of the adapter(s) to delete.
"""
if isinstance(adapter_name, str):
adapter_names = [adapter_name]
else:
adapter_names = adapter_name
mismatched = set(adapter_names) - set(self.peft_config.keys())
if mismatched:
raise ValueError(
f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}"
)
for adapter_name in adapter_names:
del self.peft_config[adapter_name]
key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)]
new_adapter = None
for key in key_list:
_, target, _ = _get_submodules(self.model, key)
if isinstance(target, BaseTunerLayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapters[:]
self.active_adapter = new_adapter or []
def merge_and_unload(
self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
) -> nn.Module:
r"""
This method merges the layers into the base model. This is needed if someone wants to use the base model as a
standalone model.
Args:
progressbar (`bool`):
whether to show a progressbar indicating the unload and merge process
safe_merge (`bool`):
whether to activate the safe merging check to check if there is any potential Nan in the adapter
weights
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
return self._unload_and_optionally_merge(
progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names
)
def unload(self) -> nn.Module:
"""
Gets back the base model by removing all the lora modules without merging. This gives back the original base
model.
"""
return self._unload_and_optionally_merge(merge=False)
def generate(self, *args: Any, **kwargs: Any):
return self.model.generate(*args, **kwargs)
|
peft/src/peft/tuners/mixed/model.py/0
|
{
"file_path": "peft/src/peft/tuners/mixed/model.py",
"repo_id": "peft",
"token_count": 6670
}
| 200
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from .config import PeftConfig, PeftType, PromptLearningConfig, TaskType
from .loftq_utils import replace_lora_weights_loftq
from .peft_types import PeftType, TaskType
from .other import (
TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_LNTUNING_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_VERA_TARGET_MODULES_MAPPING,
TRANSFORMERS_MODELS_TO_FOURIERFT_TARGET_MODULES_MAPPING,
CONFIG_NAME,
WEIGHTS_NAME,
SAFETENSORS_WEIGHTS_NAME,
INCLUDE_LINEAR_LAYERS_SHORTHAND,
_set_trainable,
bloom_model_postprocess_past_key_value,
prepare_model_for_kbit_training,
shift_tokens_right,
transpose,
_get_batch_size,
_get_submodules,
_set_adapter,
_freeze_adapter,
ModulesToSaveWrapper,
_prepare_prompt_learning_config,
_is_valid_match,
infer_device,
get_auto_gptq_quant_linear,
get_quantization_config,
id_tensor_storage,
cast_mixed_precision_params,
)
from .save_and_load import get_peft_model_state_dict, set_peft_model_state_dict, load_peft_weights
|
peft/src/peft/utils/__init__.py/0
|
{
"file_path": "peft/src/peft/utils/__init__.py",
"repo_id": "peft",
"token_count": 793
}
| 201
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import re
import shutil
import tempfile
import time
import unittest
from contextlib import contextmanager
from functools import partial
import pytest
import torch
from parameterized import parameterized
from safetensors.torch import load_file as safe_load_file
from torch import nn
from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification
from transformers.pytorch_utils import Conv1D
from peft import (
AdaLoraConfig,
BOFTConfig,
FourierFTConfig,
HRAConfig,
IA3Config,
LNTuningConfig,
LoHaConfig,
LoKrConfig,
LoraConfig,
OFTConfig,
PeftModel,
TaskType,
VeraConfig,
get_peft_model,
)
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import infer_device
from .testing_common import PeftCommonTester
from .testing_utils import get_state_dict, require_non_cpu
# MLP is a vanilla FF network with only linear layers
# EmbConv1D has an embedding and a Conv1D layer
# Conv2D has a Conv2D layer
TEST_CASES = [
########
# LoRA #
########
("Vanilla MLP 1 LoRA", "MLP", LoraConfig, {"target_modules": "lin0"}),
("Vanilla MLP 2 LoRA", "MLP", LoraConfig, {"target_modules": ["lin0"]}),
("Vanilla MLP 3 LoRA", "MLP", LoraConfig, {"target_modules": ["lin1"]}),
("Vanilla MLP 4 LoRA", "MLP", LoraConfig, {"target_modules": ["lin0", "lin1"]}),
("Vanilla MLP 5 LoRA", "MLP", LoraConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}),
(
"Vanilla MLP 6 LoRA",
"MLP",
LoraConfig,
{
"target_modules": ["lin0"],
"lora_alpha": 4,
"lora_dropout": 0.1,
},
),
("Vanilla MLP 7 LoRA with DoRA", "MLP", LoraConfig, {"target_modules": ["lin0"], "use_dora": True}),
("Vanilla MLP 8 LoRA with DoRA", "MLP", LoraConfig, {"target_modules": ["lin0", "lin1"], "use_dora": True}),
(
"Vanilla MLP 9 LoRA with DoRA",
"MLP",
LoraConfig,
{"target_modules": "lin1", "use_dora": True, "lora_alpha": 32},
),
("Embedding + transformers Conv1D 1 LoRA", "EmbConv1D", LoraConfig, {"target_modules": ["conv1d"]}),
("Embedding + transformers Conv1D 2 LoRA", "EmbConv1D", LoraConfig, {"target_modules": ["emb"]}),
("Embedding + transformers Conv1D 3 LoRA", "EmbConv1D", LoraConfig, {"target_modules": ["emb", "conv1d"]}),
(
"Embedding + transformers Conv1D 1 DoRA",
"EmbConv1D",
LoraConfig,
{"target_modules": ["conv1d"], "use_dora": True},
),
("Embedding + transformers Conv1D 2 DoRA", "EmbConv1D", LoraConfig, {"target_modules": ["emb"], "use_dora": True}),
(
"Embedding + transformers Conv1D 3 DoRA",
"EmbConv1D",
LoraConfig,
{"target_modules": ["emb", "conv1d"], "use_dora": True},
),
("Conv2d 1 LoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d"]}),
("Conv2d 2 LoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d", "lin0"]}),
("Conv2d 1 LoRA with DoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d"], "use_dora": True}),
("Conv2d 2 LoRA with DoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d", "lin0"], "use_dora": True}),
#######
# IA³ #
#######
("Vanilla MLP 1 IA3", "MLP", IA3Config, {"target_modules": "lin0", "feedforward_modules": []}),
("Vanilla MLP 2 IA3", "MLP", IA3Config, {"target_modules": "lin0", "feedforward_modules": "lin0"}),
("Vanilla MLP 3 IA3", "MLP", IA3Config, {"target_modules": ["lin0"], "feedforward_modules": []}),
("Vanilla MLP 4 IA3", "MLP", IA3Config, {"target_modules": ["lin0"], "feedforward_modules": ["lin0"]}),
("Vanilla MLP 5 IA3", "MLP", IA3Config, {"target_modules": ["lin1"], "feedforward_modules": []}),
("Vanilla MLP 6 IA3", "MLP", IA3Config, {"target_modules": ["lin1"], "feedforward_modules": ["lin1"]}),
(
"Vanilla MLP 7 IA3",
"MLP",
IA3Config,
{"target_modules": ["lin0", "lin1"], "feedforward_modules": []},
),
(
"Vanilla MLP 8 IA3",
"MLP",
IA3Config,
{"target_modules": ["lin0", "lin1"], "feedforward_modules": ["lin0", "lin1"]},
),
(
"Vanilla MLP 9 IA3",
"MLP",
IA3Config,
{"target_modules": ["lin0"], "modules_to_save": ["lin1"], "feedforward_modules": ["lin0"]},
),
(
"transformers Conv1D 1 IA3",
"EmbConv1D",
IA3Config,
{"target_modules": ["conv1d"], "feedforward_modules": ["conv1d"]},
),
(
"transformers Conv1D 2 IA3",
"EmbConv1D",
IA3Config,
{"target_modules": ["conv1d", "lin0"], "feedforward_modules": ["conv1d", "lin0"]},
),
(
"transformers Conv1D 1 IA3",
"EmbConv1D",
IA3Config,
{"target_modules": ["conv1d"], "feedforward_modules": ["conv1d"], "modules_to_save": ["lin1"]},
),
("Conv2d 1 IA3", "Conv2d", IA3Config, {"target_modules": ["conv2d"], "feedforward_modules": []}),
("Conv2d 2 IA3", "Conv2d", IA3Config, {"target_modules": ["conv2d"], "feedforward_modules": ["conv2d"]}),
(
"Conv2d 3 IA3",
"Conv2d",
IA3Config,
{"target_modules": ["conv2d", "lin0"], "feedforward_modules": []},
),
(
"Conv2d 4 IA3",
"Conv2d",
IA3Config,
{"target_modules": ["conv2d", "lin0"], "feedforward_modules": ["conv2d"]},
),
(
"Conv2d 5 IA3",
"Conv2d",
IA3Config,
{"target_modules": ["conv2d", "lin0"], "feedforward_modules": ["conv2d", "lin0"]},
),
########
# LoHa #
########
("Vanilla MLP 1 LOHA", "MLP", LoHaConfig, {"target_modules": "lin0"}),
("Vanilla MLP 2 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin0"]}),
("Vanilla MLP 3 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin1"]}),
("Vanilla MLP 4 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin0", "lin1"]}),
("Vanilla MLP 5 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}),
(
"Vanilla MLP 6 LOHA",
"MLP",
LoHaConfig,
{
"target_modules": ["lin0"],
"alpha": 4,
"module_dropout": 0.1,
},
),
("Vanilla MLP 7 LOHA", "MLP", LoHaConfig, {"target_modules": "lin0", "rank_dropout": 0.5}),
("Conv2d 1 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d"]}),
("Conv2d 2 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d", "lin0"]}),
("Conv2d 3 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d"], "use_effective_conv2d": True}),
("Conv2d 4 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True}),
# LoKr
("Vanilla MLP 1 LOKR", "MLP", LoKrConfig, {"target_modules": "lin0"}),
("Vanilla MLP 2 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin0"]}),
("Vanilla MLP 3 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin1"]}),
("Vanilla MLP 4 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin0", "lin1"]}),
("Vanilla MLP 5 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}),
(
"Vanilla MLP 6 LOKR",
"MLP",
LoKrConfig,
{
"target_modules": ["lin0"],
"alpha": 4,
"module_dropout": 0.1,
},
),
("Vanilla MLP 7 LOKR", "MLP", LoKrConfig, {"target_modules": "lin0", "rank_dropout": 0.5}),
("Vanilla MLP 8 LOKR", "MLP", LoKrConfig, {"target_modules": "lin0", "decompose_both": True, "r": 1, "alpha": 1}),
("Conv2d 1 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d"]}),
("Conv2d 2 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d", "lin0"]}),
("Conv2d 3 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d"], "use_effective_conv2d": True}),
("Conv2d 4 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True}),
(
"Conv2d 5 LOKR",
"Conv2d",
LoKrConfig,
{"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True, "decompose_both": True},
),
(
"Conv2d 6 LOKR",
"Conv2d",
LoKrConfig,
{"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True, "decompose_factor": 4},
),
(
"Conv2d 7 LOKR",
"Conv2d",
LoKrConfig,
{
"target_modules": ["conv2d", "lin0"],
"use_effective_conv2d": True,
"decompose_both": True,
"decompose_factor": 4,
},
),
########
# OFT #
########
("Vanilla MLP 1 OFT", "MLP", OFTConfig, {"target_modules": "lin0"}),
("Vanilla MLP 2 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"]}),
("Vanilla MLP 5 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}),
(
"Vanilla MLP 6 OFT",
"MLP",
OFTConfig,
{
"target_modules": ["lin0"],
"module_dropout": 0.1,
},
),
("Vanilla MLP 7 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "coft": True}),
("Vanilla MLP 8 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "block_share": True}),
("Vanilla MLP 9 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "coft": True, "block_share": True}),
("Conv2d 1 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"]}),
("Conv2d 3 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"], "coft": True}),
("Conv2d 4 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"], "block_share": True}),
("Conv2d 5 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"], "coft": True, "block_share": True}),
########
# HRA #
########
("Vanilla MLP 1 HRA", "MLP", HRAConfig, {"target_modules": "lin0"}),
("Vanilla MLP 2 HRA", "MLP", HRAConfig, {"target_modules": ["lin0"]}),
("Vanilla MLP 3 HRA", "MLP", HRAConfig, {"target_modules": ["lin0", "lin1"]}),
("Vanilla MLP 5 HRA", "MLP", HRAConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}),
("Conv2d 1 HRA", "Conv2d", HRAConfig, {"target_modules": ["conv2d"]}),
#############
# LN Tuning #
#############
("LayerNorm 1 LNTuning", "MLP_LayerNorm", LNTuningConfig, {"target_modules": "layernorm0"}),
("LayerNorm 2 LNTuning", "MLP_LayerNorm", LNTuningConfig, {"target_modules": ["layernorm0"]}),
(
"LayerNorm 3 LNTuning",
"MLP_LayerNorm",
LNTuningConfig,
{"target_modules": ["layernorm0"], "modules_to_save": ["layernorm1"]},
),
("Linear 4 LNTuning", "MLP_LayerNorm", LNTuningConfig, {"target_modules": "lin0"}),
("Linear 5 LNTuning", "MLP_LayerNorm", LNTuningConfig, {"target_modules": ["lin0"]}),
########
# BOFT #
########
("Vanilla MLP 1 BOFT", "MLP", BOFTConfig, {"target_modules": ["lin1"], "boft_block_size": 2}),
(
"Vanilla MLP 2 BOFT",
"MLP",
BOFTConfig,
{"target_modules": ["lin1"], "modules_to_save": ["lin0"], "boft_block_size": 2},
),
(
"Vanilla MLP 3 BOFT",
"MLP",
BOFTConfig,
{
"target_modules": ["lin1"],
"boft_block_size": 2,
"boft_dropout": 0.1,
},
),
(
"Vanilla MLP 4 BOFT",
"MLP",
BOFTConfig,
{"target_modules": ["lin1"], "boft_block_size": 2, "boft_block_num": 0, "boft_n_butterfly_factor": 1},
),
(
"Vanilla MLP 5 BOFT",
"MLP",
BOFTConfig,
{"target_modules": ["lin1"], "boft_block_size": 0, "boft_block_num": 2, "boft_n_butterfly_factor": 1},
),
(
"Vanilla MLP 6 BOFT",
"MLP",
BOFTConfig,
{"target_modules": ["lin1"], "boft_block_size": 10, "boft_block_num": 0, "boft_n_butterfly_factor": 2},
),
(
"Conv2d 1 BOFT",
"Conv2d",
BOFTConfig,
{"target_modules": ["conv2d"], "boft_block_size": 45, "boft_block_num": 0, "boft_n_butterfly_factor": 1},
),
(
"Conv2d 2 BOFT",
"Conv2d",
BOFTConfig,
{"target_modules": ["conv2d"], "boft_block_size": 0, "boft_block_num": 1, "boft_n_butterfly_factor": 1},
),
(
"MLP2 1 BOFT",
"MLP2",
BOFTConfig,
{"target_modules": ["lin1"], "boft_block_size": 2, "boft_block_num": 0, "boft_n_butterfly_factor": 3},
),
(
"MLP2 2 BOFT",
"MLP2",
BOFTConfig,
{"target_modules": ["lin1"], "boft_block_size": 0, "boft_block_num": 8, "boft_n_butterfly_factor": 3},
),
(
"Conv2d2 1 BOFT",
"Conv2d2",
BOFTConfig,
{"target_modules": ["conv2d"], "boft_block_size": 2, "boft_block_num": 0, "boft_n_butterfly_factor": 2},
),
(
"Conv2d2 1 BOFT",
"Conv2d2",
BOFTConfig,
{"target_modules": ["conv2d"], "boft_block_size": 2, "boft_block_num": 0, "boft_n_butterfly_factor": 3},
),
########
# VeRA #
########
("Vanilla MLP 1 VeRA", "MLP", VeraConfig, {"target_modules": "lin0"}),
("Vanilla MLP 2 VeRA", "MLP", VeraConfig, {"target_modules": ["lin0"]}),
("Vanilla MLP 3 VeRA", "MLP", VeraConfig, {"target_modules": ["lin1"]}),
("Vanilla MLP 4 VeRA", "MLP", VeraConfig, {"target_modules": ["lin0", "lin1"]}),
(
"Vanilla MLP 5 VeRA",
"MLP",
VeraConfig,
{"target_modules": ["lin0"], "modules_to_save": ["lin1"]},
),
(
"Embedding + transformers Conv1D 1 VeRA",
"EmbConv1D",
VeraConfig,
{"target_modules": ["conv1d"]},
),
########
# FourierFT #
########
("Vanilla MLP 1 FourierFT", "MLP", FourierFTConfig, {"n_frequency": 10, "target_modules": "lin0"}),
("Vanilla MLP 2 FourierFT", "MLP", FourierFTConfig, {"n_frequency": 10, "target_modules": ["lin0"]}),
("Vanilla MLP 3 FourierFT", "MLP", FourierFTConfig, {"n_frequency": 10, "target_modules": ["lin1"]}),
(
"Vanilla MLP 5 FourierFT",
"MLP",
FourierFTConfig,
{"n_frequency": 10, "target_modules": ["lin0"], "modules_to_save": ["lin1"]},
),
(
"Vanilla MLP 6 FourierFT",
"MLP",
FourierFTConfig,
{"n_frequency": 10, "target_modules": ["lin0", "lin1"], "modules_to_save": ["lin1"]},
),
(
"Vanilla MLP 7 FourierFT",
"MLP",
FourierFTConfig,
{
"n_frequency_pattern": {"lin0": 5, "lin1": 10},
"target_modules": ["lin0", "lin1"],
"modules_to_save": ["lin1"],
},
),
]
# For this test matrix, each tuple consists of:
# - test name
# - tuner method
# - config_cls
# - 1st config kwargs
# - 2nd config kwargs
# The model used for this test is `MLP`, which uses linear layers `lin0` and `lin1`
MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES = [
(
"LoRA Same",
"lora",
LoraConfig,
{"target_modules": ["lin0"], "init_lora_weights": False},
{"target_modules": ["lin0"], "init_lora_weights": False},
),
(
"LoRA Different",
"lora",
LoraConfig,
{"target_modules": ["lin0"], "init_lora_weights": False},
{"target_modules": ["lin1"], "init_lora_weights": False},
),
(
"IA3 Same",
"ia3",
IA3Config,
{
"target_modules": ["lin0"],
"feedforward_modules": ["lin0"],
"init_ia3_weights": False,
},
{
"target_modules": ["lin0"],
"feedforward_modules": ["lin0"],
"init_ia3_weights": False,
},
),
(
"IA3 Different",
"ia3",
IA3Config,
{
"target_modules": ["lin0"],
"feedforward_modules": ["lin0"],
"init_ia3_weights": False,
},
{
"target_modules": ["lin1"],
"feedforward_modules": ["lin1"],
"init_ia3_weights": False,
},
),
(
"AdaLora Same",
"adalora",
AdaLoraConfig,
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True},
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True},
),
(
"AdaLora Different",
"adalora",
AdaLoraConfig,
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True},
{"target_modules": ["lin1"], "init_lora_weights": False, "inference_mode": True},
),
(
"FourierFT Same",
"fourierft",
FourierFTConfig,
{"n_frequency": 10, "target_modules": ["lin0"]},
{"n_frequency": 10, "target_modules": ["lin0"]},
),
(
"FourierFT Different",
"fourierft",
FourierFTConfig,
{"n_frequency": 10, "target_modules": ["lin0"]},
{"n_frequency": 10, "target_modules": ["lin1"]},
),
# Note: Currently, we cannot target lin0 and lin1 with different adapters when using VeRA. The reason is that the
# first adapter being created will result in a vera_A or vera_B shape that is too small for the next adapter
# (remember that VeRA shares these parameters across all layers), which results in an error.
(
"VeRA Same",
"vera",
VeraConfig,
{"target_modules": ["lin0"], "init_weights": False},
{"target_modules": ["lin0"], "init_weights": False},
),
(
"HRA Same",
"hra",
HRAConfig,
{"target_modules": ["lin0"], "init_weights": False},
{"target_modules": ["lin0"], "init_weights": False},
),
(
"HRA Different",
"hra",
HRAConfig,
{"target_modules": ["lin0"], "init_weights": False},
{"target_modules": ["lin1"], "init_weights": False},
),
]
PREFIXES = {
IA3Config: "ia3_",
LoraConfig: "lora_",
LoHaConfig: "hada_",
LoKrConfig: "lokr_",
OFTConfig: "oft_",
BOFTConfig: "boft_",
LNTuningConfig: "ln_tuning_",
VeraConfig: "vera_lambda_",
FourierFTConfig: "fourierft_",
HRAConfig: "hra_",
}
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.relu = nn.ReLU()
self.drop = nn.Dropout(0.5)
self.lin1 = nn.Linear(20, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.drop(X)
X = self.lin1(X)
X = self.sm(X)
return X
class MLP_LayerNorm(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.layernorm0 = nn.LayerNorm(10, 10)
self.lin0 = nn.Linear(10, 20, bias=bias)
self.relu = nn.ReLU()
self.drop = nn.Dropout(0.5)
self.layernorm1 = nn.LayerNorm(20, 20)
self.lin1 = nn.Linear(20, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float()
X = self.layernorm0(X)
X = self.lin0(X)
X = self.relu(X)
X = self.drop(X)
X = self.layernorm1(X)
X = self.lin1(X)
X = self.sm(X)
return X
class MLP2(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 32, bias=bias)
self.relu = nn.ReLU()
self.drop = nn.Dropout(0.5)
self.lin1 = nn.Linear(32, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.drop(X)
X = self.lin1(X)
X = self.sm(X)
return X
class Block(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.relu = nn.ReLU()
self.drop = nn.Dropout(0.5)
self.lin1 = nn.Linear(20, 10, bias=bias)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.drop(X)
X = self.lin1(X)
return X
class DeepMLP(nn.Module):
def __init__(self, bias=True, num_hidden_layers=12):
super().__init__()
self.layers = nn.ModuleList([Block(bias=bias) for _ in range(num_hidden_layers)])
self.out = nn.Linear(10, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float(X)
for layer in self.layers:
X = layer(X)
X = self.out(X)
X = self.sm(X)
return X
class ModelEmbConv1D(nn.Module):
def __init__(self, emb_size=100):
super().__init__()
self.emb = nn.Embedding(emb_size, 5)
self.conv1d = Conv1D(1, 5)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = self.emb(X)
X = self.conv1d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin0(X)
X = self.sm(X)
return X
class ModelEmbWithEmbeddingUtils(nn.Module):
# Adds `get_input_embeddings` and `get_output_embeddings` methods to mimic 🤗 transformers models
def __init__(self):
super().__init__()
self.embed_tokens = nn.Embedding(100, 5)
self.conv1d = Conv1D(1, 5)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = self.embed_tokens(X)
X = self.conv1d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin0(X)
X = self.sm(X)
return X
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return None
class ModelConv2D(nn.Module):
def __init__(self):
super().__init__()
self.conv2d = nn.Conv2d(5, 10, 3)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin0 = nn.Linear(10, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float().reshape(-1, 5, 3, 3)
X = self.conv2d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin0(X)
X = self.sm(X)
return X
class ModelConv2D2(nn.Module):
def __init__(self):
super().__init__()
self.lin0 = nn.Linear(10, 40)
self.conv2d = nn.Conv2d(8, 32, 3)
self.relu = nn.ReLU()
self.flat = nn.Flatten()
self.lin1 = nn.Linear(32, 2)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = X.reshape(-1, 8, 3, 3)
X = self.conv2d(X)
X = self.relu(X)
X = self.flat(X)
X = self.lin1(X)
X = self.sm(X)
return X
class MockTransformerWrapper:
"""Mock class to behave like a transformers model.
This is needed because the tests initialize the model by calling transformers_class.from_pretrained.
"""
@classmethod
def from_pretrained(cls, model_id, torch_dtype=None):
# set the seed so that from_pretrained always returns the same model
torch.manual_seed(0)
if torch_dtype is None:
torch_dtype = torch.float32
if model_id == "MLP":
return MLP().to(torch_dtype)
if model_id == "EmbConv1D":
return ModelEmbConv1D().to(torch_dtype)
if model_id == "Conv2d":
return ModelConv2D().to(torch_dtype)
if model_id == "MLP_LayerNorm":
return MLP_LayerNorm().to(torch_dtype)
if model_id == "MLP2":
return MLP2().to(torch_dtype)
if model_id == "Conv2d2":
return ModelConv2D2().to(torch_dtype)
raise ValueError(f"model_id {model_id} not implemented")
class PeftCustomModelTester(unittest.TestCase, PeftCommonTester):
"""TODO"""
transformers_class = MockTransformerWrapper
def prepare_inputs_for_testing(self):
X = torch.arange(90).view(9, 10).to(self.torch_device)
return {"X": X}
@parameterized.expand(TEST_CASES)
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_model_attr(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs):
self._test_adapter_name(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs):
# This test does not work with custom models because it assumes that
# there is always a method get_input_embeddings that returns a layer
# which does not need updates. Instead, a new test is added below that
# checks that LoRA works as expected.
pass
@parameterized.expand(TEST_CASES)
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_save_pretrained_pickle(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs, safe_serialization=False)
@parameterized.expand(TEST_CASES)
def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs):
self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
config_kwargs = config_kwargs.copy()
if issubclass(config_cls, LoraConfig):
config_kwargs["init_lora_weights"] = False
elif issubclass(config_cls, IA3Config):
config_kwargs["init_ia3_weights"] = False
elif issubclass(config_cls, LNTuningConfig):
pass
else:
config_kwargs["init_weights"] = False
self._test_merge_layers(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_merge_layers_fp16(self, test_name, model_id, config_cls, config_kwargs):
config_kwargs = config_kwargs.copy()
if issubclass(config_cls, LoraConfig):
config_kwargs["init_lora_weights"] = False
elif issubclass(config_cls, IA3Config):
config_kwargs["init_ia3_weights"] = False
self._test_merge_layers_fp16(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_merge_layers_is_idempotent(self, test_name, model_id, config_cls, config_kwargs):
# calling merge twice with the same arguments should not change the output
config_kwargs = config_kwargs.copy()
if issubclass(config_cls, LoraConfig):
config_kwargs["init_lora_weights"] = False
elif issubclass(config_cls, IA3Config):
config_kwargs["init_ia3_weights"] = False
self._test_merge_layers_is_idempotent(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_safe_merge(self, test_name, model_id, config_cls, config_kwargs):
# calling merge twice with the same arguments should not change the output
config_kwargs = config_kwargs.copy()
if issubclass(config_cls, LoraConfig):
config_kwargs["init_lora_weights"] = False
elif issubclass(config_cls, IA3Config):
config_kwargs["init_ia3_weights"] = False
elif issubclass(config_cls, LNTuningConfig):
# LNTuning do not take init_weights
pass
else:
config_kwargs["init_weights"] = False
self._test_safe_merge(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_generate(self, test_name, model_id, config_cls, config_kwargs):
# Custom models do not (necessarily) have a generate method, so this test is not performed
pass
@parameterized.expand(TEST_CASES)
def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs):
# Custom models do not (necessarily) have a generate method, so this test is not performed
pass
@parameterized.expand(TEST_CASES)
def test_training_custom_models(self, test_name, model_id, config_cls, config_kwargs):
self._test_training(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_training_custom_models_layer_indexing(self, test_name, model_id, config_cls, config_kwargs):
# At the moment, layer indexing only works when layer names conform to a specific pattern, which is not
# guaranteed here. Therefore, this test is not performed.
pass
@parameterized.expand(TEST_CASES)
def test_training_custom_models_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs):
self._test_inference_safetensors(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs):
self._test_peft_model_device_map(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_forward_output_finite(self, test_name, model_id, config_cls, config_kwargs):
X = self.prepare_inputs_for_testing()
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model.eval()
with torch.no_grad():
output = model(**X)
assert torch.isfinite(output).all()
@parameterized.expand(TEST_CASES)
def test_only_params_are_updated(self, test_name, model_id, config_cls, config_kwargs):
# An explicit test that when using an adapter on a custom model, only the adapter parameters are updated during
# training
X = self.prepare_inputs_for_testing()
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model_before = copy.deepcopy(model)
model.train()
# this high learning rate was found through testing to be necessary to avoid flakiness
lr = 100.0 if config_kwargs.get("use_dora") and model_id == "EmbConv1D" else 0.5
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# train at least 3 steps for all parameters to be updated (probably this is required because of symmetry
# breaking of some LoRA layers that are initialized with constants)
for _ in range(3):
optimizer.zero_grad()
y_pred = model(**X)
loss = y_pred.sum()
loss.backward()
optimizer.step()
tol = 1e-4
params_before = dict(model_before.named_parameters())
params_after = dict(model.named_parameters())
assert params_before.keys() == params_after.keys()
prefix = PREFIXES[config_cls]
for name, param_before in params_before.items():
param_after = params_after[name]
if (prefix in name) or ("modules_to_save" in name):
# target_modules and modules_to_save _are_ updated
assert not torch.allclose(param_before, param_after, atol=tol, rtol=tol)
else:
assert torch.allclose(param_before, param_after, atol=tol, rtol=tol)
@parameterized.expand(TEST_CASES)
def test_parameters_after_loading_model(self, test_name, model_id, config_cls, config_kwargs):
# An explicit test that when loading a trained model, the parameters are loaded correctly
# see issue #808
X = self.prepare_inputs_for_testing()
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model.train()
lr = 0.5 if not config_kwargs.get("use_dora") else 0.1 # otherwise we get nan
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# train at least 3 steps for all parameters to be updated (probably this is required because of symmetry
# breaking of some LoRA layers that are initialized with constants)
for _ in range(3):
optimizer.zero_grad()
y_pred = model(**X)
loss = y_pred.sum()
loss.backward()
optimizer.step()
tol = 1e-4
params_before = get_state_dict(model)
# note: no need to sanity check if parameters were updated at all, this
# is already covered in the previous test
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
model_from_pretrained = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname)
params_after = get_state_dict(model_from_pretrained)
assert params_before.keys() == params_after.keys()
for name, param_before in params_before.items():
param_after = params_after[name]
assert torch.allclose(param_before, param_after, atol=tol, rtol=tol)
@parameterized.expand(TEST_CASES)
def test_disable_adapters(self, test_name, model_id, config_cls, config_kwargs):
X = self.prepare_inputs_for_testing()
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device).eval()
outputs_base = model(**X)
if issubclass(config_cls, FourierFTConfig):
config_kwargs = config_kwargs.copy()
config_kwargs["init_weights"] = True
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model.eval()
outputs_before = model(**X)
assert torch.allclose(outputs_base, outputs_before)
model.train()
# EmbConv1D is slow to learn for some reason
lr = 0.01 if model_id != "EmbConv1D" else 1.0
if isinstance(config_cls, LNTuningConfig):
# LayerNorm tuning is slow to learn
lr = 1.0
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# train at least 3 steps for all parameters to be updated (probably this is required because of symmetry
# breaking of some LoRA layers that are initialized with constants)
for _ in range(3):
optimizer.zero_grad()
y_pred = model(**X)
y = torch.arange(len(y_pred)).to(self.torch_device) % 2
loss = nn.functional.nll_loss(y_pred, y)
loss.backward()
optimizer.step()
model.eval()
outputs_after = model(**X)
with model.disable_adapter():
outputs_disabled = model(**X)
# check that after leaving the disable_adapter context, everything is enabled again
outputs_enabled_after_disable = model(**X)
if self.torch_device == "cpu":
# LayerNorm is running float32 on cpu, so difference in outputs are smaller
rtol, atol = 1e-8, 1e-8
else:
rtol, atol = 1e-5, 1e-8
assert not torch.allclose(outputs_before, outputs_after, rtol=rtol, atol=atol)
assert torch.allclose(outputs_before, outputs_disabled)
assert torch.allclose(outputs_after, outputs_enabled_after_disable)
@parameterized.expand(TEST_CASES)
def test_disable_adapters_with_merging(self, test_name, model_id, config_cls, config_kwargs):
# same as test_disable_adapters, but with merging
X = self.prepare_inputs_for_testing()
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
if issubclass(config_cls, FourierFTConfig):
config_kwargs = config_kwargs.copy()
config_kwargs["init_weights"] = True
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
model.eval()
outputs_before = model(**X)
model.train()
if isinstance(config_cls, LNTuningConfig):
# LayerNorm tuning is slow to learn
lr = 1.0
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
else:
# Adam optimizer since SGD isn't great for small models with IA3 + Conv1D
lr = 0.01
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# train at least 3 steps for all parameters to be updated (probably this is required because of symmetry
# breaking of some LoRA layers that are initialized with constants)
for _ in range(3):
optimizer.zero_grad()
y_pred = model(**X)
y = torch.arange(len(y_pred)).to(self.torch_device) % 2
loss = nn.functional.nll_loss(y_pred, y)
loss.backward()
optimizer.step()
model.eval()
outputs_unmerged = model(**X)
model.merge_adapter()
outputs_after = model(**X)
with model.disable_adapter():
outputs_disabled = model(**X)
# check that after leaving the disable_adapter context, everything is enabled again
outputs_enabled_after_disable = model(**X)
atol, rtol = 1e-5, 1e-5 # tolerances higher than defaults since merging introduces some numerical instability
if issubclass(config_cls, IA3Config) and model_id == "Conv2d": # more instability with Conv2d + IA3
atol, rtol = 1e-3, 1e-3
if config_kwargs.get("use_dora") and model_id == "EmbConv1D":
atol, rtol = 1e-4, 1e-4
# check that there is a difference in results after training
assert not torch.allclose(outputs_before, outputs_after, atol=atol, rtol=rtol)
if self.torch_device in ["mlu"] and model_id in ["Conv2d"]:
atol, rtol = 1e-3, 1e-2 # MLU
# unmerged or merged should make no difference
assert torch.allclose(outputs_after, outputs_unmerged, atol=atol, rtol=rtol)
# check that disabling adapters gives the same results as before training
assert torch.allclose(outputs_before, outputs_disabled, atol=atol, rtol=rtol)
# check that enabling + disabling adapters does not change the results
assert torch.allclose(outputs_after, outputs_enabled_after_disable, atol=atol, rtol=rtol)
@parameterized.expand(TEST_CASES)
def test_disable_adapter_with_bias_warns(self, test_name, model_id, config_cls, config_kwargs):
# When training biases in lora, disabling adapters does not reset the biases, so the output is not what users
# might expect. Therefore, a warning should be given.
# Note: We test only with custom models since they run really fast. There is really no point in testing the same
# thing with decoder, encoder_decoder, etc.
if config_cls != LoraConfig or config_cls != BOFTConfig:
# skip this test for other configs as bias is specific to Lora
self.skipTest("Testing bias warnings only for LoraConfig or BOFTConfig")
if not issubclass(config_cls, (LoraConfig, BOFTConfig)):
self.skipTest("Bias argument is only supported for LoRA or BOFT models")
def run_with_disable(config_kwargs, bias):
config_kwargs = config_kwargs.copy()
config_kwargs["bias"] = bias
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
peft_model = get_peft_model(model, config)
with peft_model.disable_adapter():
pass # there is nothing to be done
if config_cls == LoraConfig:
# check that bias=all and bias=lora_only give a warning with the correct message
msg_start = "Careful, disabling adapter layers with bias configured to be"
with pytest.warns(UserWarning, match=msg_start):
run_with_disable(config_kwargs, bias="lora_only")
with pytest.warns(UserWarning, match=msg_start):
run_with_disable(config_kwargs, bias="all")
if config_cls == BOFTConfig:
# check that bias=all and bias=boft_only give a warning with the correct message
msg_start = "Careful, disabling adapter layers with bias configured to be"
with pytest.warns(UserWarning, match=msg_start):
run_with_disable(config_kwargs, bias="boft_only")
with pytest.warns(UserWarning, match=msg_start):
run_with_disable(config_kwargs, bias="all")
# For bias=none, there is no warning. Unfortunately, AFAIK unittest has no option to assert that no warning is
# given, therefore, we check that the unittest gives us an AssertionError if we check for a warning
bias_warning_was_given = False
try:
with self.assertWarns(UserWarning) as cm:
run_with_disable(config_kwargs, bias="none")
# if we get here, it means there was no AssertionError, i.e. there are warnings -- let's check that they
# are not related to the bias setting
if any(warning.message.args[0].startswith(msg_start) for warning in cm.warnings):
bias_warning_was_given = True
except AssertionError:
# This is good, there was an AssertionError, i.e. there was no warning
pass
if bias_warning_was_given:
# This is bad, there was a warning about the bias when there should not have been any.
self.fail("There should be no warning when bias is set to 'none'")
@parameterized.expand(TEST_CASES)
def test_active_adapter(self, test_name, model_id, config_cls, config_kwargs):
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
assert model.active_adapters == ["default"]
assert model.active_adapter == "default"
# at this stage, "default" is still the activate adapter, "other" is disabled
model.add_adapter("other", config)
assert model.active_adapters == ["default"]
assert model.active_adapter == "default"
# set "other" as the active adapter
model.set_adapter("other")
assert model.active_adapters == ["other"]
assert model.active_adapter == "other"
# set both adapters as active
# Note: On the PeftModel, there cannot be multiple active adapters, so we have to go through model.base_model
# instead.
model.base_model.set_adapter(["default", "other"])
# model.active_adapters works, as it delegates to the base_model
assert model.active_adapters == ["default", "other"]
# model.active_adapter would not work, thus we have to check the base_model directly
assert model.base_model.active_adapter == ["default", "other"]
@parameterized.expand(TEST_CASES)
def test_disable_adapters_exiting_context_restores_previous_state(
self, test_name, model_id, config_cls, config_kwargs
):
# Test that when we exit the disable_adapter context, we correctly restore the enabled state of the modules as
# they were before the context.
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
tuner_modules = [module for module in model.modules() if isinstance(module, BaseTunerLayer)]
# all layers should be enabled
assert all(not module.disable_adapters for module in tuner_modules)
with model.disable_adapter():
pass
# this should not change after exiting the context
assert all(not module.disable_adapters for module in tuner_modules)
# now disable all layers
model.disable_adapter_layers()
assert all(module.disable_adapters for module in tuner_modules)
with model.disable_adapter():
pass
assert all(module.disable_adapters for module in tuner_modules)
@parameterized.expand(TEST_CASES)
def test_disable_adapters_exiting_context_irregular_state(self, test_name, model_id, config_cls, config_kwargs):
# When we have a model where some adapters are enabled and others are disabled, we should get a warning when
# entering the disable_adapter context because we cannot correctly restore the state of the adapters from
# before the context. After exiting the context, all adapters will be enabled, which is the status quo of how
# we deal with this.
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
model = get_peft_model(model, config)
tuner_modules = [module for module in model.modules() if isinstance(module, BaseTunerLayer)]
# now we mix the states, some enabled some not
if len(tuner_modules) < 2:
# next check only works with more than 1 tuner module
return
# disable a single layer
tuner_modules[0].enable_adapters(False)
# sanity check that we have both enabled and disabled layers
assert {module.disable_adapters for module in tuner_modules} == {True, False}
# check that we get a warning with irregular states
msg = "The model contains some adapter layers that are enabled and others that are disabled"
with self.assertWarnsRegex(UserWarning, expected_regex=msg):
with model.disable_adapter():
pass
# when encountering irregular adapters, we enable all adapters at the end of the context
assert all(not module.disable_adapters for module in tuner_modules)
@parameterized.expand(TEST_CASES)
def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(TEST_CASES)
def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs):
self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs)
def test_weight_bias_attributes(self):
model = MLP()
config = LoraConfig(target_modules=["lin0"])
model = get_peft_model(model, config)
assert hasattr(model.base_model.model.lin0, "weight")
assert hasattr(model.base_model.model.lin0, "bias")
def test_multiple_adapters_automatic_modules_to_save(self):
# See issue 1574
# When we use certain task types, PeftModel.modules_to_save is automatically updated to include some extra
# layers not specified in the PeftConfig. This attribute should be honored for all adapters, not just for
# the default adapter.
config0 = LoraConfig(task_type=TaskType.SEQ_CLS)
config1 = LoraConfig(task_type=TaskType.SEQ_CLS)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model = get_peft_model(model, config0)
# sanity check
assert model.modules_to_save
model.add_adapter("other", config1)
assert "default" in model.base_model.classifier.modules_to_save
assert "other" in model.base_model.classifier.modules_to_save
@parameterized.expand([IA3Config, LoHaConfig, LoKrConfig, LoraConfig, OFTConfig, HRAConfig])
def test_multiple_adapters_mixed_modules_to_save(self, config_cls):
# See issue 1574
# Check that we can have a model where one adapter has modules_to_save and the other doesn't. It should be
# possible to switch between those adapters and to use them.
if hasattr(config_cls, "feedforward_modules"): # IA³
config_cls = partial(config_cls, feedforward_modules=["lin0"])
config0 = config_cls(target_modules=["lin0"], modules_to_save=["lin1"])
config1 = config_cls(target_modules=["lin0"])
model = MLP()
model = get_peft_model(model, config0).to(self.torch_device)
model.add_adapter("other", config1)
assert "default" in model.base_model.lin1.modules_to_save
assert "other" not in model.base_model.lin1.modules_to_save
# check that switching adapters and predicting does not raise
inputs = self.prepare_inputs_for_testing()
# "default" adapter is active
model(**inputs)
# switch to "other" adapter
model.set_adapter("other")
model(**inputs)
@parameterized.expand([IA3Config, LoHaConfig, LoKrConfig, LoraConfig, OFTConfig, HRAConfig])
def test_multiple_adapters_mixed_modules_to_save_order_switched(self, config_cls):
# See issue 1574
# Same test as test_multiple_adapters_mixed_modules_to_save, but this time the 2nd adapter has modules_to_save.
if hasattr(config_cls, "feedforward_modules"): # IA³
config_cls = partial(config_cls, feedforward_modules=["lin0"])
config0 = config_cls(target_modules=["lin0"])
config1 = config_cls(target_modules=["lin0"], modules_to_save=["lin1"])
model = MLP()
model = get_peft_model(model, config0).to(self.torch_device)
model.add_adapter("other", config1)
assert "default" not in model.base_model.lin1.modules_to_save
assert "other" in model.base_model.lin1.modules_to_save
# check that switching adapters and predicting does not raise
inputs = self.prepare_inputs_for_testing()
# "default" adapter is active
model(**inputs)
# switch to "other" adapter
model.set_adapter("other")
model(**inputs)
def test_multiple_adapters_mixed_modules_to_save_merging_adapters(self):
# See issue 1574
# This test is similar to test_multiple_adapters_mixed_modules_to_save, but it also checks that merging adapter
# weights works when one adapter has a modules_to_save and the other hasn't
config0 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
config1 = LoraConfig(target_modules=["lin0"])
model = MLP()
model = get_peft_model(model, config0).to(self.torch_device)
model.add_adapter("other", config1)
# check that this does not raise
model.add_weighted_adapter(["default", "other"], weights=[1.0, 1.0], adapter_name="merged")
# since one of the adapters that was merged has a modules_to_save, that one should be used for the merged
# adapter
assert "default" in model.base_model.model.lin1.modules_to_save
assert "other" not in model.base_model.model.lin1.modules_to_save
assert "merged" in model.base_model.model.lin1.modules_to_save
# check that using the merged adapter does not raise
model.set_adapter("merged")
inputs = self.prepare_inputs_for_testing()
model(**inputs)
def test_multiple_adapters_same_modules_to_save_merging_adapters_raises(self):
# See issue 1574
# This test is similar to test_multiple_adapters_mixed_modules_to_save_merging_adapters but here the two
# adapters target the same module with modules_to_save. In this case, trying to merge the adapter weights
# should raise an error.
config0 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
config1 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
model = MLP()
model = get_peft_model(model, config0).to(self.torch_device)
model.add_adapter("other", config1)
msg = re.escape(
"Cannot add weighted adapters if they target the same module with modules_to_save, but found 1 such "
"instance(s)."
)
with pytest.raises(ValueError, match=msg):
model.add_weighted_adapter(["default", "other"], weights=[1.0, 1.0], adapter_name="merged")
def test_multiple_adapters_seq_cls_mixed_modules_to_save_merging_adapters(self):
# See issue 1574
# This test is similar to test_multiple_adapters_mixed_modules_to_save_merging_adapters but uses a SEQ_CLS
# model like in test_multiple_adapters_automatic_modules_to_save. This should raise an error because the same
# module is implicitly targeted by modules_to_save twice.
config0 = LoraConfig(task_type=TaskType.SEQ_CLS)
config1 = LoraConfig(task_type=TaskType.SEQ_CLS)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model = get_peft_model(model, config0)
model.add_adapter("other", config1)
msg = re.escape(
"Cannot add weighted adapters if they target the same module with modules_to_save, but found 1 such "
"instance(s)."
)
with pytest.raises(ValueError, match=msg):
model.add_weighted_adapter(["default", "other"], weights=[1.0, 1.0], adapter_name="merged")
def test_existing_model_card(self):
# ensure that if there is already a model card, it is not overwritten
model = MLP()
config = LoraConfig(target_modules=["lin0"])
model = get_peft_model(model, config)
with tempfile.TemporaryDirectory() as tmp_dirname:
# create a model card
text = "---\nmeta: hello\n---\nThis is a model card\n"
with open(os.path.join(tmp_dirname, "README.md"), "w") as f:
f.write(text)
model.save_pretrained(tmp_dirname)
with open(os.path.join(tmp_dirname, "README.md")) as f:
model_card = f.read()
assert "library_name: peft" in model_card
assert "meta: hello" in model_card
assert "This is a model card" in model_card
def test_non_existing_model_card(self):
# ensure that if there is already a model card, it is not overwritten
model = MLP()
config = LoraConfig(target_modules=["lin0"])
model = get_peft_model(model, config)
with tempfile.TemporaryDirectory() as tmp_dirname:
model.save_pretrained(tmp_dirname)
with open(os.path.join(tmp_dirname, "README.md")) as f:
model_card = f.read()
assert "library_name: peft" in model_card
# rough check that the model card is pre-filled
assert len(model_card) > 1000
@parameterized.expand(["auto", True, False])
def test_targeting_lora_to_embedding_layer(self, save_embedding_layers):
model = ModelEmbWithEmbeddingUtils()
config = LoraConfig(target_modules=["embed_tokens", "lin0"], init_lora_weights=False)
model = get_peft_model(model, config)
with tempfile.TemporaryDirectory() as tmp_dirname:
if save_embedding_layers == "auto":
# assert warning
msg_start = "Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`."
with pytest.warns(UserWarning, match=msg_start):
model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers)
else:
model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers)
from safetensors.torch import load_file as safe_load_file
state_dict = safe_load_file(os.path.join(tmp_dirname, "adapter_model.safetensors"))
if save_embedding_layers in ["auto", True]:
assert "base_model.model.embed_tokens.base_layer.weight" in state_dict
assert torch.allclose(
model.base_model.model.embed_tokens.base_layer.weight,
state_dict["base_model.model.embed_tokens.base_layer.weight"],
)
else:
assert "base_model.model.embed_tokens.base_layer.weight" not in state_dict
del state_dict
@parameterized.expand(["auto", True, False])
def test_targeting_lora_to_embedding_layer_non_transformers(self, save_embedding_layers):
model = ModelEmbConv1D()
config = LoraConfig(target_modules=["emb", "lin0"], init_lora_weights=False)
model = get_peft_model(model, config)
with tempfile.TemporaryDirectory() as tmp_dirname:
if save_embedding_layers is True:
with pytest.warns(
UserWarning,
match=r"Could not identify embedding layer\(s\) because the model is not a 🤗 transformers model\.",
):
model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers)
else:
model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers)
from safetensors.torch import load_file as safe_load_file
state_dict = safe_load_file(os.path.join(tmp_dirname, "adapter_model.safetensors"))
assert "base_model.model.emb.base_layer.weight" not in state_dict
del state_dict
def test_load_resized_embedding_ignore_mismatched_sizes(self):
# issue #1605
# Make it possible to load a LoRA layer that targets an embedding layer even if the sizes mismatch by passing
# ignore_mismatched_sizes=True
model = ModelEmbConv1D(emb_size=100)
config = LoraConfig(target_modules=["emb", "lin0"], init_lora_weights=False)
model = get_peft_model(model, config)
# note: not using the context manager here because it fails on Windows CI for some reason
tmp_dirname = tempfile.mkdtemp()
try:
model.save_pretrained(tmp_dirname)
model = ModelEmbConv1D(emb_size=105)
# first check that this raises
with pytest.raises(RuntimeError) as exc:
PeftModel.from_pretrained(model, tmp_dirname)
msg = exc.value.args[0]
assert "size mismatch" in msg and "100" in msg and "105" in msg
# does not raise
PeftModel.from_pretrained(model, tmp_dirname, ignore_mismatched_sizes=True)
finally:
try:
shutil.rmtree(tmp_dirname)
except PermissionError:
# windows error
pass
@parameterized.expand(
[
LoraConfig(target_modules=["lin0"], init_lora_weights=False),
LoKrConfig(target_modules=["lin0"], init_weights=False),
LoHaConfig(target_modules=["lin0"], init_weights=False),
AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False),
IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"], init_ia3_weights=False),
OFTConfig(target_modules=["lin0"], init_weights=False),
BOFTConfig(target_modules=["lin0"], init_weights=False, boft_block_size=2),
HRAConfig(target_modules=["lin0"], init_weights=False),
]
)
def test_adapter_name_makes_no_difference(self, config0):
# It should not matter whether we use the default adapter name or a custom one
model_cls = MLP
input = torch.arange(90).reshape(9, 10).to(self.torch_device)
# base model
torch.manual_seed(0)
base_model = model_cls().eval().to(self.torch_device)
output_base = base_model(input)
# default name
torch.manual_seed(0)
base_model = model_cls().eval().to(self.torch_device)
torch.manual_seed(0)
peft_model_default = get_peft_model(base_model, config0, adapter_name="default").eval().to(self.torch_device)
output_default = peft_model_default(input)
sd_default = peft_model_default.state_dict()
# custom name 1
torch.manual_seed(0)
base_model = model_cls().eval().to(self.torch_device)
torch.manual_seed(0)
peft_model_custom1 = get_peft_model(base_model, config0, adapter_name="adapter").eval().to(self.torch_device)
output_custom1 = peft_model_custom1(input)
sd_custom1 = peft_model_custom1.state_dict()
# custom name 2
torch.manual_seed(0)
base_model = model_cls().eval().to(self.torch_device)
torch.manual_seed(0)
peft_model_custom2 = (
get_peft_model(base_model, config0, adapter_name="other-name").eval().to(self.torch_device)
)
output_custom2 = peft_model_custom2(input)
sd_custom2 = peft_model_custom2.state_dict()
assert len(sd_default) == len(sd_custom1) == len(sd_custom2)
for key in sd_default:
key1 = key.replace("default", "adapter")
key2 = key.replace("default", "other-name")
assert key1 in sd_custom1
assert key2 in sd_custom2
for k0, k1, k2 in zip(sd_default, sd_custom1, sd_custom2):
assert torch.allclose(sd_default[k0], sd_custom1[k1])
assert torch.allclose(sd_default[k0], sd_custom2[k2])
assert not torch.allclose(output_base, output_default)
assert not torch.allclose(output_base, output_custom1)
assert not torch.allclose(output_base, output_custom2)
assert torch.allclose(output_custom1, output_custom2)
assert torch.allclose(output_default, output_custom1)
def test_gpt2_dora_merge_and_unload(self):
# see https://github.com/huggingface/peft/pull/1588#discussion_r1537914207
model = AutoModelForCausalLM.from_pretrained("gpt2")
config = LoraConfig(task_type="CAUSAL_LM", use_dora=True)
model = get_peft_model(model, config)
# should not raise an error
model.merge_and_unload()
def test_gpt2_dora_merge_and_unload_safe_merge(self):
# see https://github.com/huggingface/peft/pull/1588#discussion_r1537914207
model = AutoModelForCausalLM.from_pretrained("gpt2")
config = LoraConfig(task_type="CAUSAL_LM", use_dora=True)
model = get_peft_model(model, config)
# should not raise an error
model.merge_and_unload(safe_merge=True)
def test_dora_save_and_load_remapping(self):
# Here we test the refactor of DoRA which changed lora_magnitude_vector from a ParameterDict to a ModuleDict
# with a DoraLayer instance. The old parameter is now the "weight" attribute of that layer. Since we want the
# state_dict format not to change, we ensure that the ".weight" part of the key is removed.
model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
config = LoraConfig(task_type="CAUSAL_LM", use_dora=True)
model = get_peft_model(model, config)
state_dict = model.state_dict()
# sanity check: state dict contains "lora_magnitude_vector.default.weight" keys
assert any("lora_magnitude_vector.default.weight" in k for k in state_dict)
# save the model, check the state dict
# note: not using the context manager here because it fails on Windows CI for some reason
tmp_dirname = tempfile.mkdtemp()
try:
model.save_pretrained(tmp_dirname)
state_dict_adapter = safe_load_file(os.path.join(tmp_dirname, "adapter_model.safetensors"))
# note that in the state dict, the "default" part of the key is removed
assert not any("lora_magnitude_vector.weight" in k for k in state_dict_adapter)
del model
loaded = PeftModel.from_pretrained(AutoModelForCausalLM.from_pretrained("facebook/opt-125m"), tmp_dirname)
finally:
try:
shutil.rmtree(tmp_dirname)
except PermissionError:
# windows error
pass
state_dict_loaded = loaded.state_dict()
assert state_dict.keys() == state_dict_loaded.keys()
for k in state_dict:
assert torch.allclose(state_dict[k], state_dict_loaded[k])
class TestMultiRankAdapter(unittest.TestCase):
"""Tests related to multirank LoRA adapters"""
def test_multirank(self):
config_1 = LoraConfig(
r=8,
lora_alpha=8,
init_lora_weights=False,
target_modules=["lin0", "lin1"],
)
config_2 = LoraConfig(
r=8,
lora_alpha=8,
init_lora_weights=False,
target_modules=["lin0", "lin1"],
rank_pattern={"lin0": 4},
alpha_pattern={"lin0": 4},
)
# Add first adapter
model = get_peft_model(MLP(), config_1, adapter_name="first")
# Add second adapter
model.add_adapter("second", config_2)
# Extract current and expected ranks
rank_current = model.lin0.lora_A["second"].weight.shape[0]
rank_expected = config_2.rank_pattern["lin0"]
assert rank_current == rank_expected, f"Rank {rank_current} is not equal to expected {rank_expected}"
def test_multirank_2(self):
rank_pattern = {}
alpha_pattern = {}
r = 4
lora_alpha = 8
for i in range(10):
rank = 64 // (i + 1)
for j in range(2):
rank_pattern[f"layers.{i}.lin{j}"] = rank
alpha_pattern[f"layers.{i}.lin{j}"] = 2 * rank
config = LoraConfig(
r=r,
lora_alpha=lora_alpha,
init_lora_weights=False,
target_modules=["lin0", "lin1"],
rank_pattern=rank_pattern,
alpha_pattern=alpha_pattern,
)
# Add first adapter
model = get_peft_model(DeepMLP(), config, adapter_name="first")
# Add second adapter
model.add_adapter("second", config)
for adapter in ["first", "second"]:
for key, module in model.base_model.model.named_modules():
if isinstance(module, BaseTunerLayer):
rank_expected = rank_pattern.get(key, r)
rank_current = module.lora_A[adapter].weight.shape[0]
assert (
rank_current == rank_expected
), f"Rank {rank_current} is not equal to expected {rank_expected}"
class TestRepr(unittest.TestCase):
"""Tests related to the repr of adapted models"""
def test_repr_lora_linear(self):
config = LoraConfig(target_modules=["lin0"])
model = get_peft_model(MLP(), config)
print_output = repr(model.model.lin0)
assert print_output.startswith("lora.Linear")
assert "in_features=10" in print_output
assert "out_features=20" in print_output
assert "lora_A" in print_output
assert "lora_B" in print_output
assert "default" in print_output
def test_repr_lora_embedding(self):
config = LoraConfig(target_modules=["emb"])
model = get_peft_model(ModelEmbConv1D(), config)
print_output = repr(model.model.emb)
assert print_output.startswith("lora.Embedding")
assert "100, 5" in print_output
assert "lora_embedding_A" in print_output
assert "lora_embedding_B" in print_output
assert "default" in print_output
def test_repr_lora_conv1d(self):
config = LoraConfig(target_modules=["conv1d"])
model = get_peft_model(ModelEmbConv1D(), config)
print_output = repr(model.model.conv1d)
assert print_output.startswith("lora.Linear")
assert "in_features=5" in print_output
assert "out_features=1" in print_output
assert "lora_A" in print_output
assert "lora_B" in print_output
assert "default" in print_output
def test_repr_lora_conv2d(self):
config = LoraConfig(target_modules=["conv2d"])
model = get_peft_model(ModelConv2D(), config)
print_output = repr(model.model.conv2d)
assert print_output.startswith("lora.Conv2d")
assert "5, 10" in print_output
assert "kernel_size=(3, 3)" in print_output
assert "stride=(1, 1)" in print_output
assert "lora_A" in print_output
assert "lora_B" in print_output
assert "default" in print_output
class MultipleActiveAdaptersTester(unittest.TestCase):
"""
A test class to test the functionality of multiple active adapters.
This is not specifically tied to custom models, it's just easy to test here and testing it on all types of models
would be overkill.
"""
def prepare_inputs_for_testing(self):
X = torch.arange(90).view(9, 10)
return {"X": X}
def set_multiple_active_adapters(self, model, adapter_names):
for module in model.modules():
if isinstance(module, BaseTunerLayer):
module.set_adapter(adapter_names)
@parameterized.expand(MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES)
def test_multiple_active_adapters_forward(
self, test_name, tuner_method, config_cls, config_kwargs_1, config_kwargs_2
):
torch.manual_seed(0)
model = MLP(bias=tuner_method != "ia3")
model.eval()
X = self.prepare_inputs_for_testing()
config_1 = config_cls(**config_kwargs_1)
config_2 = config_cls(**config_kwargs_2)
peft_model = get_peft_model(model, config_1, adapter_name="adapter_1")
peft_model.add_adapter("adapter_2", config_2)
# set adapter_1
peft_model.set_adapter("adapter_1")
adapter_1_output = peft_model(**X)
# set adapter_2
peft_model.set_adapter("adapter_2")
adapter_2_output = peft_model(**X)
# set ["adapter_1", "adapter_2"]
self.set_multiple_active_adapters(peft_model, ["adapter_1", "adapter_2"])
combined_output = peft_model(**X)
assert not torch.allclose(adapter_1_output, adapter_2_output, atol=1e-5)
assert not torch.allclose(adapter_1_output, combined_output, atol=1e-5)
assert not torch.allclose(adapter_2_output, combined_output, atol=1e-5)
if tuner_method == "lora":
# create a weighted adapter combining both adapters and check that
# its output is same as setting multiple active adapters
peft_model.add_weighted_adapter(
["adapter_1", "adapter_2"], [1.0, 1.0], "new_combined_adapter", combination_type="cat"
)
peft_model.set_adapter("new_combined_adapter")
new_combined_output = peft_model(**X)
assert torch.allclose(new_combined_output, combined_output, atol=1e-5)
@parameterized.expand(MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES)
def test_multiple_active_adapters_merge_and_unmerge(
self, test_name, tuner_method, config_cls, config_kwargs_1, config_kwargs_2
):
torch.manual_seed(0)
model = MLP(bias=tuner_method != "ia3")
model.eval()
X = self.prepare_inputs_for_testing()
base_output = model(**X)
config_1 = config_cls(**config_kwargs_1)
config_2 = config_cls(**config_kwargs_2)
peft_model = get_peft_model(model, config_1, adapter_name="adapter_1")
peft_model.add_adapter("adapter_2", config_2)
# set ["adapter_1", "adapter_2"]
self.set_multiple_active_adapters(peft_model, ["adapter_1", "adapter_2"])
combined_output = peft_model(**X)
peft_model.merge_adapter()
merged_combined_output = peft_model(**X)
assert torch.allclose(merged_combined_output, combined_output, atol=1e-5)
peft_model.unmerge_adapter()
with peft_model.disable_adapter():
disabled_adapter_output = peft_model(**X)
assert torch.allclose(disabled_adapter_output, base_output, atol=1e-4)
@parameterized.expand(MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES)
def test_merge_layers_multi(self, test_name, tuner_method, config_cls, config_kwargs_1, config_kwargs_2):
torch.manual_seed(0)
model = MLP(bias=tuner_method != "ia3")
model.eval()
config_1 = config_cls(**config_kwargs_1)
config_2 = config_cls(**config_kwargs_2)
model = get_peft_model(model, config_1)
dummy_input = self.prepare_inputs_for_testing()
model.eval()
with torch.inference_mode():
logits_adapter_1 = model(**dummy_input)[0]
model.add_adapter("adapter-2", config_2)
model.set_adapter("adapter-2")
model.eval()
with torch.inference_mode():
logits_adapter_2 = model(**dummy_input)[0]
assert not torch.allclose(logits_adapter_1, logits_adapter_2, atol=1e-3, rtol=1e-3)
model.set_adapter("default")
with torch.inference_mode():
logits_adapter_1_after_set = model(**dummy_input)[0]
assert torch.allclose(logits_adapter_1_after_set, logits_adapter_1, atol=1e-3, rtol=1e-3)
model_copy = copy.deepcopy(model)
model_copy_2 = copy.deepcopy(model)
model_merged_all = model.merge_and_unload(adapter_names=["adapter-2", "default"])
with torch.inference_mode():
logits_merged_all = model_merged_all(**dummy_input)[0]
assert not torch.allclose(logits_merged_all, logits_adapter_2, atol=1e-3, rtol=1e-3)
assert not torch.allclose(logits_merged_all, logits_adapter_1, atol=1e-3, rtol=1e-3)
model_merged_adapter_2 = model_copy.merge_and_unload(adapter_names=["adapter-2"])
with torch.inference_mode():
logits_merged_adapter_2 = model_merged_adapter_2(**dummy_input)[0]
assert torch.allclose(logits_merged_adapter_2, logits_adapter_2, atol=1e-3, rtol=1e-3)
model_merged_adapter_default = model_copy_2.merge_and_unload(adapter_names=["default"])
with torch.inference_mode():
logits_merged_adapter_default = model_merged_adapter_default(**dummy_input)[0]
assert torch.allclose(logits_merged_adapter_default, logits_adapter_1, atol=1e-3, rtol=1e-3)
class RequiresGradTester(unittest.TestCase):
"""Test that requires_grad is set correctly in specific circumstances
# See issue #899.
This is not specifically tied to custom models, it's just easy to test here and testing it on all types of models
would be overkill.
"""
def check_requires_grad(self, model, *params_expected: str):
# Check that only the given parameters have requires_grad=True, and all others have requires_grad=False.
# Calling without arguments besides the model means that all parameters should have requires_grad=False.
params_with_requires_grad = [name for name, param in model.named_parameters() if param.requires_grad]
diff = set(params_expected).symmetric_difference(set(params_with_requires_grad))
msg = f"Expected {params_expected} to require gradients, got {params_with_requires_grad}"
assert len(diff) == 0, msg
def test_requires_grad_modules_to_save_default(self):
config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model = get_peft_model(MLP(), config)
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.default.weight",
"base_model.model.lin1.modules_to_save.default.bias",
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
def test_requires_grad_modules_to_save_disabling(self):
config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model = get_peft_model(MLP(), config)
# when disabling the adapter, the original module's grad should be enabled and vice versa
peft_model.disable_adapter_layers()
self.check_requires_grad(
peft_model,
"base_model.model.lin1.original_module.weight",
"base_model.model.lin1.original_module.bias",
)
# when re-enabling the adapter, the original module's grad should be disabled and vice versa
peft_model.enable_adapter_layers()
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.default.weight",
"base_model.model.lin1.modules_to_save.default.bias",
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# when using the disable_adapter context, the original module's grad should be enabled and vice versa
with peft_model.disable_adapter():
self.check_requires_grad(
peft_model,
"base_model.model.lin1.original_module.weight",
"base_model.model.lin1.original_module.bias",
)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.default.weight",
"base_model.model.lin1.modules_to_save.default.bias",
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
def test_requires_grad_modules_to_save_multiple_adapters(self):
config0 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.default.weight",
"base_model.model.lin1.modules_to_save.default.bias",
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.default.weight",
"base_model.model.lin1.modules_to_save.default.bias",
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# set config1 as active, should lead to adapter1 requiring grad
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.modules_to_save.adapter1.weight",
"base_model.model.lin1.modules_to_save.adapter1.bias",
"base_model.model.lin0.lora_A.adapter1.weight",
"base_model.model.lin0.lora_B.adapter1.weight",
)
def test_requires_grad_lora_different_targets(self):
# test two different LoRA adapters that target different modules
config0 = LoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoraConfig(target_modules=["lin1"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lora_A.adapter1.weight",
"base_model.model.lin1.lora_B.adapter1.weight",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lora_A.adapter1.weight",
"base_model.model.lin1.lora_B.adapter1.weight",
)
def test_requires_grad_lora_same_targets(self):
# same as previous test, except that LoRA adapters target the same layer
config0 = LoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoraConfig(target_modules=["lin0"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default.weight",
"base_model.model.lin0.lora_B.default.weight",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1.weight",
"base_model.model.lin0.lora_B.adapter1.weight",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1.weight",
"base_model.model.lin0.lora_B.adapter1.weight",
)
def test_requires_grad_ia3_different_targets(self):
# test two different IA3 adapters that target different modules
config0 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = IA3Config(target_modules=["lin1"], feedforward_modules=["lin1"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.ia3_l.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.ia3_l.adapter1",
)
def test_requires_grad_ia3_same_targets(self):
# same as previous test, except that IA3 adapters target the same layer
config0 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
def test_requires_grad_adalora_different_targets(self):
# test two different AdaLora adapters that target different modules
config0 = AdaLoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = AdaLoraConfig(target_modules=["lin1"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default",
"base_model.model.lin0.lora_B.default",
"base_model.model.lin0.lora_E.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default",
"base_model.model.lin0.lora_B.default",
"base_model.model.lin0.lora_E.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lora_A.adapter1",
"base_model.model.lin1.lora_B.adapter1",
"base_model.model.lin1.lora_E.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lora_A.adapter1",
"base_model.model.lin1.lora_B.adapter1",
"base_model.model.lin1.lora_E.adapter1",
)
def test_requires_grad_adalora_same_targets(self):
# same as previous test, except that AdaLora adapters target the same layer
config0 = AdaLoraConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = AdaLoraConfig(target_modules=["lin0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default",
"base_model.model.lin0.lora_B.default",
"base_model.model.lin0.lora_E.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.default",
"base_model.model.lin0.lora_B.default",
"base_model.model.lin0.lora_E.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1",
"base_model.model.lin0.lora_B.adapter1",
"base_model.model.lin0.lora_E.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1",
"base_model.model.lin0.lora_B.adapter1",
"base_model.model.lin0.lora_E.adapter1",
)
def test_requires_grad_lora_conv2d(self):
# test two different LoRA adapters that target different modules
config0 = LoraConfig(target_modules=["conv2d"])
peft_model = get_peft_model(ModelConv2D(), config0)
config1 = LoraConfig(target_modules=["lin0"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.conv2d.lora_A.default.weight",
"base_model.model.conv2d.lora_B.default.weight",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.conv2d.lora_A.default.weight",
"base_model.model.conv2d.lora_B.default.weight",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1.weight",
"base_model.model.lin0.lora_B.adapter1.weight",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lora_A.adapter1.weight",
"base_model.model.lin0.lora_B.adapter1.weight",
)
def test_requires_grad_lora_emb_conv1d(self):
# test two different LoRA adapters that target different modules
config0 = LoraConfig(target_modules=["conv1d"])
peft_model = get_peft_model(ModelEmbConv1D(), config0)
config1 = LoraConfig(target_modules=["emb"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.conv1d.lora_A.default.weight",
"base_model.model.conv1d.lora_B.default.weight",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.conv1d.lora_A.default.weight",
"base_model.model.conv1d.lora_B.default.weight",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.emb.lora_embedding_A.adapter1",
"base_model.model.emb.lora_embedding_B.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.emb.lora_embedding_A.adapter1",
"base_model.model.emb.lora_embedding_B.adapter1",
)
def test_requires_grad_ia3_conv1d(self):
# test two different LoRA adapters that target different modules
config0 = IA3Config(target_modules=["conv1d"], feedforward_modules=[])
peft_model = get_peft_model(ModelEmbConv1D(), config0)
config1 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.conv1d.ia3_l.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.conv1d.ia3_l.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
def test_requires_grad_ia3_conv2d(self):
# test two different LoRA adapters that target different modules
config0 = IA3Config(target_modules=["conv2d"], feedforward_modules=["conv2d"])
peft_model = get_peft_model(ModelConv2D(), config0)
config1 = IA3Config(target_modules=["lin0"], feedforward_modules=[])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.conv2d.ia3_l.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.conv2d.ia3_l.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.ia3_l.adapter1",
)
def test_requires_grad_loha_different_targets(self):
# test two different LoHa adapters that target different modules
config0 = LoHaConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoHaConfig(target_modules=["lin1"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.default",
"base_model.model.lin0.hada_w1_b.default",
"base_model.model.lin0.hada_w2_a.default",
"base_model.model.lin0.hada_w2_b.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.default",
"base_model.model.lin0.hada_w1_b.default",
"base_model.model.lin0.hada_w2_a.default",
"base_model.model.lin0.hada_w2_b.default",
)
# change activate pter to pter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.hada_w1_a.adapter1",
"base_model.model.lin1.hada_w1_b.adapter1",
"base_model.model.lin1.hada_w2_a.adapter1",
"base_model.model.lin1.hada_w2_b.adapter1",
)
# disable all pters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.hada_w1_a.adapter1",
"base_model.model.lin1.hada_w1_b.adapter1",
"base_model.model.lin1.hada_w2_a.adapter1",
"base_model.model.lin1.hada_w2_b.adapter1",
)
def test_requires_grad_loha_same_targets(self):
# same as previous test, except that LoHa adapters target the same layer
config0 = LoHaConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoHaConfig(target_modules=["lin0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.default",
"base_model.model.lin0.hada_w1_b.default",
"base_model.model.lin0.hada_w2_a.default",
"base_model.model.lin0.hada_w2_b.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.default",
"base_model.model.lin0.hada_w1_b.default",
"base_model.model.lin0.hada_w2_a.default",
"base_model.model.lin0.hada_w2_b.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.adapter1",
"base_model.model.lin0.hada_w1_b.adapter1",
"base_model.model.lin0.hada_w2_a.adapter1",
"base_model.model.lin0.hada_w2_b.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hada_w1_a.adapter1",
"base_model.model.lin0.hada_w1_b.adapter1",
"base_model.model.lin0.hada_w2_a.adapter1",
"base_model.model.lin0.hada_w2_b.adapter1",
)
def test_requires_grad_lokr_different_targets(self):
# test two different LoKr adapters that target different modules
config0 = LoKrConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoKrConfig(target_modules=["lin1"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.default",
"base_model.model.lin0.lokr_w2.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.default",
"base_model.model.lin0.lokr_w2.default",
)
# change activate pter to pter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lokr_w1.adapter1",
"base_model.model.lin1.lokr_w2.adapter1",
)
# disable all pters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.lokr_w1.adapter1",
"base_model.model.lin1.lokr_w2.adapter1",
)
def test_requires_grad_lokr_same_targets(self):
# same as previous test, except that LoKr adapters target the same layer
config0 = LoKrConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = LoKrConfig(target_modules=["lin0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.default",
"base_model.model.lin0.lokr_w2.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.default",
"base_model.model.lin0.lokr_w2.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.adapter1",
"base_model.model.lin0.lokr_w2.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.lokr_w1.adapter1",
"base_model.model.lin0.lokr_w2.adapter1",
)
def test_requires_grad_oft_different_targets(self):
# test two different OFT adapters that target different modules
config0 = OFTConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = OFTConfig(target_modules=["lin1"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.default",
)
# change activate pter to pter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.oft_r.adapter1",
)
# disable all pters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.oft_r.adapter1",
)
def test_requires_grad_oft_same_targets(self):
# same as previous test, except that OFT adapters target the same layer
config0 = OFTConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = OFTConfig(target_modules=["lin0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.oft_r.adapter1",
)
def test_requires_grad_hra_different_targets(self):
# test two different HRA adapters that target different modules
config0 = HRAConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = HRAConfig(target_modules=["lin1"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hra_u.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hra_u.default",
)
# change activate pter to pter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.hra_u.adapter1",
)
# disable all pters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.hra_u.adapter1",
)
def test_requires_grad_hra_same_targets(self):
# same as previous test, except that HRA adapters target the same layer
config0 = HRAConfig(target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = HRAConfig(target_modules=["lin0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hra_u.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hra_u.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hra_u.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.hra_u.adapter1",
)
def test_requires_grad_boft_different_targets(self):
# test two different OFT adapters that target different modules
config0 = BOFTConfig(target_modules=["lin0"], boft_block_size=2)
peft_model = get_peft_model(MLP2(), config0)
config1 = BOFTConfig(target_modules=["lin1"], boft_block_size=2, inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active pter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.boft_R.default",
"base_model.model.lin0.boft_s.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.boft_R.default",
"base_model.model.lin0.boft_s.default",
)
# change activate pter to pter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.boft_R.adapter1",
"base_model.model.lin1.boft_s.adapter1",
)
# disable all pters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.boft_R.adapter1",
"base_model.model.lin1.boft_s.adapter1",
)
def test_requires_grad_boft_same_targets(self):
# same as previous test, except that BOFT adapters target the same layer
config0 = BOFTConfig(target_modules=["lin1"], boft_block_size=2)
peft_model = get_peft_model(MLP(), config0)
config1 = BOFTConfig(target_modules=["lin1"], boft_block_size=2, inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin1.boft_R.default",
"base_model.model.lin1.boft_s.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.boft_R.default",
"base_model.model.lin1.boft_s.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.boft_R.adapter1",
"base_model.model.lin1.boft_s.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.boft_R.adapter1",
"base_model.model.lin1.boft_s.adapter1",
)
def test_requires_grad_lntuning_different_targets(self):
config0 = LNTuningConfig(
target_modules=["layernorm0"],
)
peft_model = get_peft_model(MLP_LayerNorm(), config0)
config1 = LNTuningConfig(
target_modules=["layernorm1"],
inference_mode=True,
)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.layernorm0.ln_tuning_layers.default.weight",
"base_model.model.layernorm0.ln_tuning_layers.default.bias",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.layernorm0.ln_tuning_layers.default.weight",
"base_model.model.layernorm0.ln_tuning_layers.default.bias",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.layernorm1.ln_tuning_layers.adapter1.weight",
"base_model.model.layernorm1.ln_tuning_layers.adapter1.bias",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.layernorm1.ln_tuning_layers.adapter1.weight",
"base_model.model.layernorm1.ln_tuning_layers.adapter1.bias",
)
def test_requires_grad_lntuning_same_targets(self):
config0 = LNTuningConfig(
target_modules=["layernorm0"],
)
peft_model = get_peft_model(MLP_LayerNorm(), config0)
config1 = LNTuningConfig(target_modules=["layernorm0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.layernorm0.ln_tuning_layers.default.weight",
"base_model.model.layernorm0.ln_tuning_layers.default.bias",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.layernorm0.ln_tuning_layers.default.weight",
"base_model.model.layernorm0.ln_tuning_layers.default.bias",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.layernorm0.ln_tuning_layers.adapter1.weight",
"base_model.model.layernorm0.ln_tuning_layers.adapter1.bias",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.layernorm0.ln_tuning_layers.adapter1.weight",
"base_model.model.layernorm0.ln_tuning_layers.adapter1.bias",
)
def test_requires_grad_vera_different_targets(self):
# Test two different VeRA adapters that target different modules. Most notably, ensure that vera_A and vera_B
# don't require grads.
# requires a model with at least 2 layers with the same shapes
class MLP2(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.relu = nn.ReLU()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.lin1 = nn.Linear(20, 20, bias=bias) # lin1 and lin2 have same shape
self.lin2 = nn.Linear(20, 20, bias=bias)
self.lin3 = nn.Linear(20, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
X = self.relu(X)
X = self.lin2(X)
X = self.relu(X)
X = self.lin3(X)
X = self.sm(X)
return X
config0 = VeraConfig(target_modules=["lin1"])
peft_model = get_peft_model(MLP2(), config0)
config1 = VeraConfig(target_modules=["lin2"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin1.vera_lambda_b.default",
"base_model.model.lin1.vera_lambda_d.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.vera_lambda_b.default",
"base_model.model.lin1.vera_lambda_d.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin2.vera_lambda_b.adapter1",
"base_model.model.lin2.vera_lambda_d.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin2.vera_lambda_b.adapter1",
"base_model.model.lin2.vera_lambda_d.adapter1",
)
def test_requires_grad_vera_same_targets(self):
# Test two different VeRA adapters that target the same module. Most notably, ensure that vera_A and vera_B
# don't require grads.
# requires a model with at least 2 layers with the same shapes
class MLP2(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.relu = nn.ReLU()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.lin1 = nn.Linear(20, 20, bias=bias) # lin1 and lin2 have same shape
self.lin2 = nn.Linear(20, 20, bias=bias)
self.lin3 = nn.Linear(20, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = X.float()
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
X = self.relu(X)
X = self.lin2(X)
X = self.relu(X)
X = self.lin3(X)
X = self.sm(X)
return X
config0 = VeraConfig(target_modules=["lin1", "lin2"])
peft_model = get_peft_model(MLP2(), config0)
config1 = VeraConfig(target_modules=["lin1", "lin2"])
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin1.vera_lambda_b.default",
"base_model.model.lin1.vera_lambda_d.default",
"base_model.model.lin2.vera_lambda_b.default",
"base_model.model.lin2.vera_lambda_d.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.vera_lambda_b.default",
"base_model.model.lin1.vera_lambda_d.default",
"base_model.model.lin2.vera_lambda_b.default",
"base_model.model.lin2.vera_lambda_d.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.vera_lambda_b.adapter1",
"base_model.model.lin1.vera_lambda_d.adapter1",
"base_model.model.lin2.vera_lambda_b.adapter1",
"base_model.model.lin2.vera_lambda_d.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.vera_lambda_b.adapter1",
"base_model.model.lin1.vera_lambda_d.adapter1",
"base_model.model.lin2.vera_lambda_b.adapter1",
"base_model.model.lin2.vera_lambda_d.adapter1",
)
def test_requires_grad_fourierft_different_targets(self):
# test two different fourierft adapters that target different modules
config0 = FourierFTConfig(n_frequency=10, target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = FourierFTConfig(n_frequency=10, target_modules=["lin1"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.fourierft_spectrum.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.fourierft_spectrum.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin1.fourierft_spectrum.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
self.check_requires_grad(
peft_model,
"base_model.model.lin1.fourierft_spectrum.adapter1",
)
def test_requires_grad_fourierft_same_targets(self):
# same as previous test, except that AdaLora adapters target the same layer
config0 = FourierFTConfig(n_frequency=10, target_modules=["lin0"])
peft_model = get_peft_model(MLP(), config0)
config1 = FourierFTConfig(n_frequency=10, target_modules=["lin0"], inference_mode=True)
peft_model.add_adapter("adapter1", config1)
# active adapter is still "default"
self.check_requires_grad(
peft_model,
"base_model.model.lin0.fourierft_spectrum.default",
)
# set config0 as active, should not change anything
peft_model.set_adapter("default")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.fourierft_spectrum.default",
)
# change activate adapter to adapter1
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.fourierft_spectrum.adapter1",
)
# disable all adapters
with peft_model.disable_adapter():
self.check_requires_grad(peft_model)
# after context is exited, return to the previous state
peft_model.set_adapter("adapter1")
self.check_requires_grad(
peft_model,
"base_model.model.lin0.fourierft_spectrum.adapter1",
)
class TestMixedAdapterBatches:
torch_device = infer_device()
@pytest.fixture
def mlp_lora(self):
"""A simple MLP with 2 LoRA adapters"""
torch.manual_seed(0)
base_model = MLP().to(self.torch_device).eval()
config0 = LoraConfig(target_modules=["lin0"], init_lora_weights=False)
config1 = LoraConfig(target_modules=["lin0"], r=16, init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter0").eval()
peft_model.add_adapter("adapter1", config1)
return peft_model
def run_checks(self, model, inputs):
# This checks that we can have mixed adapters in a single batch. The test works by creating the outputs for the
# base model, adapter 0, and adapter 1 separately. Then, we create an output with mixed adapters, where the
# sample [0, 3, 6] are for the base model, [1, 4, 7] for adapter 0, and [2, 5, 8] for adapter 1. Finally, we
# check that the outputs of the mixed batch are correct for the corresponding indices.
adapter_name0, adapter_name1 = model.peft_config.keys()
with model.disable_adapter():
output_base = model(**inputs)
model.set_adapter(adapter_name0)
output0 = model(**inputs)
# sanity check, outputs are not the same
assert not torch.allclose(output_base, output0)
model.set_adapter(adapter_name1)
output1 = model(**inputs)
# sanity check, outputs have the right shape and are not the same
assert len(output_base) >= 3
assert len(output_base) == len(output0) == len(output1)
assert not torch.allclose(output_base, output0)
assert not torch.allclose(output_base, output1)
# set adapter_indices so that it alternates between base, adapter 0, and adapter 1
adapters = ["__base__", adapter_name0, adapter_name1]
inputs["adapter_names"] = [adapters[i % 3] for i in (range(len(inputs["X"])))]
output_mixed = model.forward(**inputs)
assert torch.allclose(output_base[::3], output_mixed[::3])
assert torch.allclose(output0[1::3], output_mixed[1::3])
assert torch.allclose(output1[2::3], output_mixed[2::3])
def test_mixed_adapter_batches_lora_mlp(self, mlp_lora):
inputs = {"X": torch.arange(90).view(-1, 10).to(self.torch_device)}
self.run_checks(mlp_lora, inputs)
def test_mixed_adapter_batches_lora_different_target_layers(self, mlp_lora):
base_model = MLP().to(self.torch_device).eval()
# target different lora layers
config0 = LoraConfig(target_modules=["lin0"], init_lora_weights=False)
config1 = LoraConfig(target_modules=["lin1"], init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter0").eval()
peft_model.add_adapter("adapter1", config1)
inputs = {"X": torch.arange(90).view(-1, 10).to(self.torch_device)}
self.run_checks(peft_model, inputs)
def test_mixed_adapter_batches_lora_partly_overlapping_target_layers(self, mlp_lora):
base_model = MLP().to(self.torch_device).eval()
# target different lora layers
config0 = LoraConfig(target_modules=["lin0"], init_lora_weights=False)
config1 = LoraConfig(target_modules=["lin0", "lin1"], init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter0").eval()
peft_model.add_adapter("adapter1", config1)
inputs = {"X": torch.arange(90).view(-1, 10).to(self.torch_device)}
self.run_checks(peft_model, inputs)
def test_mixed_adapter_batches_lora_conv1d_emb(self):
base_model = ModelEmbConv1D().to(self.torch_device).eval()
config0 = LoraConfig(target_modules=["emb", "conv1d"], init_lora_weights=False)
config1 = LoraConfig(target_modules=["emb", "conv1d"], r=16, init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter0").eval()
peft_model.add_adapter("adapter1", config1)
inputs = {"X": torch.arange(90).view(-1, 10).to(self.torch_device)}
self.run_checks(peft_model, inputs)
def test_mixed_adapter_batches_lora_conv2d(self):
base_model = ModelConv2D().to(self.torch_device).eval()
config0 = LoraConfig(target_modules=["conv2d"], init_lora_weights=False)
config1 = LoraConfig(target_modules=["conv2d"], r=16, init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter0").eval()
peft_model.add_adapter("adapter1", config1)
inputs = {"X": torch.arange(270).view(6, 5, 3, 3).to(self.torch_device)}
self.run_checks(peft_model, inputs)
def test_mixed_adapter_batches_lora_length_mismatch_raises(self, mlp_lora):
inputs = {
"X": torch.arange(90).view(-1, 10).to(self.torch_device),
"adapter_names": ["__base__"] * 5, # wrong length!
}
msg = r"Length of `adapter_names` should be the same as the number of inputs, but got "
with pytest.raises(ValueError, match=msg):
mlp_lora.forward(**inputs)
def test_mixed_adapter_batches_lora_training_mode_raises(self, mlp_lora):
inputs = {
"X": torch.arange(90).view(-1, 10).to(self.torch_device),
"adapter_names": ["__base__"] * 9,
}
mlp_lora = mlp_lora.train()
msg = r"Cannot pass `adapter_names` when the model is in training mode."
with pytest.raises(ValueError, match=msg):
mlp_lora.forward(**inputs)
def test_mixed_adapter_batches_lora_disabled(self, mlp_lora):
# Disabling adapters should have precedence over passing adapter names
inputs = {"X": torch.arange(90).view(-1, 10).to(self.torch_device)}
with mlp_lora.disable_adapter():
output_disabled = mlp_lora(**inputs)
adapters = ["__base__", "adapter0", "adapter1"]
inputs["adapter_names"] = [adapters[i % 3] for i in (range(len(inputs["X"])))]
with mlp_lora.disable_adapter():
output_mixed = mlp_lora.forward(**inputs)
assert torch.allclose(output_disabled, output_mixed)
def test_mixed_adapter_batches_lora_merged_raises(self, mlp_lora):
# When there are merged adapters, passing adapter names should raise an error
inputs = {
"X": torch.arange(90).view(-1, 10).to(self.torch_device),
"adapter_names": ["default"] * 9,
}
mlp_lora.merge_adapter(["adapter0"])
msg = r"Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first."
with pytest.raises(ValueError, match=msg):
mlp_lora.forward(**inputs)
def test_mixed_adapter_batches_lora_with_dora_raises(self):
# When there are Dora adapters, passing adapter names should raise an error
torch.manual_seed(0)
inputs = {
"X": torch.arange(90).view(-1, 10).to(self.torch_device),
"adapter_names": ["default"] * 9,
}
base_model = MLP().to(self.torch_device).eval()
config = LoraConfig(target_modules=["lin0"], init_lora_weights=False, use_dora=True)
peft_model = get_peft_model(base_model, config).eval()
msg = r"Cannot pass `adapter_names` when DoRA is enabled."
with pytest.raises(ValueError, match=msg):
peft_model.forward(**inputs)
@require_non_cpu
def test_mixed_adapter_batches_lora_opt_timing(self):
# Use a more realistic model (opt-125m) and do a simple runtime check to ensure that mixed adapter batches
# don't add too much overhead. These types of tests are inherently flaky, so we try to add in some robustness.
logs = [] # store the time it takes to run each forward pass here
@contextmanager
def timed():
tic = time.perf_counter()
yield
toc = time.perf_counter()
logs.append(toc - tic)
base_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(self.torch_device).eval()
inputs = {"input_ids": torch.randint(0, 1000, (16, 64)).to(self.torch_device)}
with timed():
output_base = base_model(**inputs).logits
config0 = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False)
peft_model = get_peft_model(base_model, config0, "adapter1").eval()
with timed():
output0 = peft_model(**inputs).logits
# sanity check, outputs are not the same
assert not torch.allclose(output_base, output0)
config1 = LoraConfig(task_type="CAUSAL_LM", r=16, init_lora_weights=False)
peft_model.add_adapter("adapter2", config1)
peft_model.set_adapter("adapter2")
with timed():
output1 = peft_model(**inputs).logits
# sanity check, outputs are not the same
assert not torch.allclose(output_base, output1)
# set adapter_indices so that it alternates between 0 (base), lora 1, and lora 2
adapters = ["__base__", "adapter1", "adapter2"]
inputs["adapter_names"] = [adapters[i % 3] for i in (range(len(inputs["input_ids"])))]
with timed():
output_mixed = peft_model.forward(**inputs).logits
atol, rtol = 1e-4, 1e-4
assert torch.allclose(output_base[::3], output_mixed[::3], atol=atol, rtol=rtol)
assert torch.allclose(output0[1::3], output_mixed[1::3], atol=atol, rtol=rtol)
assert torch.allclose(output1[2::3], output_mixed[2::3], atol=atol, rtol=rtol)
# Check that the overhead in time added by mixed batches is not too high.
# To prevent flakiness, we measure mixed inference 3 times and take the lowest value, then compare it to the mean
# of the non-mixed inference times. We also grant a generous margin of 2x the mean time.
with timed():
output_mixed = peft_model.forward(**inputs).logits
with timed():
output_mixed = peft_model.forward(**inputs).logits
time_base, time0, time1, *time_mixed = logs
time_non_mixed = (time_base + time0 + time1) / 3
time_mixed = min(time_mixed)
factor = 2.0
assert time_mixed < factor * time_non_mixed
# Measure timing of running base and adapter separately vs using a mixed batch. Note that on CPU, the
# differences are quite small, so this test requires GPU to avoid flakiness.
for _ in range(3):
with timed():
with peft_model.disable_adapter():
peft_model(**{k: v[::3] for k, v in inputs.items()})
peft_model.set_adapter("adapter1")
peft_model(**{k: v[1::3] for k, v in inputs.items()})
peft_model.set_adapter("adapter2")
peft_model(**{k: v[2::3] for k, v in inputs.items()})
times_separate = logs[-3:]
time_separate = sum(times_separate) / 3
assert time_separate > time_mixed
class TestDynamicDispatch:
# These are tests for the dynamic dispatch feature for LoRA. We create a custom module and a custom LoRA layer
# that targets it.
@pytest.fixture(scope="class")
def custom_module_cls(self):
class MyModule(nn.Module):
# A custom layer that just behaves like an nn.Linear layer but is not an instance of nn.Linear. Therefore,
# it would normally fail to be targeted.
def __init__(self):
super().__init__()
self.in_features = 10
self.out_features = 20
self.weight = nn.Parameter(torch.randn(20, 10))
def forward(self, x):
return nn.functional.linear(x, self.weight)
return MyModule
@pytest.fixture(scope="class")
def custom_lora_cls(self):
from peft.tuners import lora
class MyLora(lora.Linear):
# just re-use the lora.Linear code here
pass
return MyLora
@pytest.fixture(scope="class")
def model_cls(self, custom_module_cls):
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.lin0 = nn.Linear(10, 10)
self.relu = nn.ReLU()
self.my_module = custom_module_cls()
self.lin1 = nn.Linear(20, 2)
def forward(self, x):
x = self.relu(self.lin0(x))
x = self.relu(self.my_module(x))
x = self.lin1(x)
return x
return MyModel
def test_custom_lora_layer_used(self, custom_module_cls, custom_lora_cls, model_cls):
# check that when we register custom lora layers, they are indeed being used for the intended module
model = model_cls()
config = LoraConfig(target_modules=["lin0", "my_module", "lin1"])
config._register_custom_module({custom_module_cls: custom_lora_cls})
peft_model = get_peft_model(model, config)
assert isinstance(peft_model.base_model.model.my_module, custom_lora_cls)
assert isinstance(peft_model.base_model.model.my_module.base_layer, custom_module_cls)
# sanity check that the other lora layer types are still the default ones
assert not isinstance(peft_model.base_model.model.lin0.base_layer, custom_module_cls)
assert not isinstance(peft_model.base_model.model.lin1.base_layer, custom_module_cls)
def test_training_works(self, model_cls, custom_module_cls, custom_lora_cls):
# check that when we train with custom lora layers, they are indeed updated
model = model_cls()
config = LoraConfig(target_modules=["lin0", "my_module", "lin1"])
config._register_custom_module({custom_module_cls: custom_lora_cls})
peft_model = get_peft_model(model, config)
sd_before = peft_model.state_dict()
inputs = torch.randn(16, 10)
optimizer = torch.optim.SGD(peft_model.parameters(), lr=1e-1)
for _ in range(5):
optimizer.zero_grad()
output = peft_model(inputs)
loss = output.sum() ** 2
loss.backward()
optimizer.step()
sd_after = peft_model.state_dict()
assert not torch.allclose(
sd_before["base_model.model.my_module.lora_A.default.weight"],
sd_after["base_model.model.my_module.lora_A.default.weight"],
)
assert not torch.allclose(
sd_before["base_model.model.my_module.lora_B.default.weight"],
sd_after["base_model.model.my_module.lora_B.default.weight"],
)
def test_saving_and_loading(self, custom_module_cls, custom_lora_cls, model_cls, tmp_path):
# check that we can successfully save and load the custom lora cls
torch.manual_seed(0)
model = model_cls()
config = LoraConfig(target_modules=["lin0", "my_module", "lin1"])
config._register_custom_module({custom_module_cls: custom_lora_cls})
torch.manual_seed(1)
peft_model = get_peft_model(model, config)
inputs = torch.randn(5, 10)
outputs_before = peft_model(inputs) # does not raise
sd_before = peft_model.state_dict()
peft_model.save_pretrained(tmp_path / "lora-custom-module")
del model, peft_model
torch.manual_seed(0) # same seed for base model
model = model_cls()
# custom lora mapping is not persisted at the moment, so as a workaround this is needed
config = LoraConfig.from_pretrained(tmp_path / "lora-custom-module")
config._register_custom_module({custom_module_cls: custom_lora_cls})
# different seed for adapter to ensure it is not identical just because of seed
torch.manual_seed(123)
peft_model = PeftModel.from_pretrained(model, tmp_path / "lora-custom-module", config=config)
assert isinstance(peft_model.base_model.model.my_module, custom_lora_cls)
assert isinstance(peft_model.base_model.model.my_module.base_layer, custom_module_cls)
outputs_after = peft_model(inputs) # does not raise
assert torch.allclose(outputs_before, outputs_after)
sd_after = peft_model.state_dict()
assert sd_before.keys() == sd_after.keys()
for key in sd_before.keys():
assert torch.allclose(sd_before[key], sd_after[key])
def test_override_lora_linear(self, custom_lora_cls):
# in this test, we check if users can override default PEFT behavior by supplying a custom lora class that is
# being used instead of lora.Linear
model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
config = LoraConfig(task_type=TaskType.CAUSAL_LM)
config._register_custom_module({nn.Linear: custom_lora_cls})
peft_model = get_peft_model(model, config)
layers = peft_model.base_model.model.model.decoder.layers
for layer in layers:
assert isinstance(layer.self_attn.v_proj, custom_lora_cls)
assert isinstance(layer.self_attn.q_proj, custom_lora_cls)
def test_custom_lora_layer_issues_warning(self, custom_module_cls, custom_lora_cls, model_cls, recwarn):
# users will get a warning if they target a layer type that is not officially supported
model = model_cls()
config = LoraConfig(target_modules=["lin0", "my_module", "lin1"])
config._register_custom_module({custom_module_cls: custom_lora_cls})
get_peft_model(model, config)
# check warning message
msg = (
"Unsupported layer type '<class 'tests.test_custom_models.TestDynamicDispatch.custom_module_cls."
"<locals>.MyModule'>' encountered, proceed at your own risk."
)
assert str(recwarn.list[-1].message) == msg
def test_target_layer_without_in_features_out_features(self, recwarn):
# It should be possible for users to target layers even if we cannot determine in_features and out_features.
# Those are only needed to initialize the LoRA layer via update_layer, so as long as users take care of that,
# they should be good and not require those attributes to exist
from peft.tuners import lora
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.lstm = nn.LSTM(10, 20)
class MyLora(nn.Module, lora.LoraLayer):
def __init__(self, base_layer, adapter_name, **kwargs):
super().__init__()
lora.LoraLayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
model = MyModel()
# check that in_features and out_features attributes don't exist on LSTM
assert not hasattr(model.lstm, "in_features")
assert not hasattr(model.lstm, "out_features")
config = LoraConfig(target_modules=["lstm"])
config._register_custom_module({nn.LSTM: MyLora})
peft_model = get_peft_model(model, config)
# check that custom LoRA layer is correctly applied
assert isinstance(peft_model.base_model.lstm, MyLora)
assert isinstance(peft_model.base_model.lstm.base_layer, nn.LSTM)
# we should still get a warning message
msg = "Unsupported layer type '<class 'torch.nn.modules.rnn.LSTM'>' encountered, proceed at your own risk."
assert str(recwarn.list[-1].message) == msg
|
peft/tests/test_custom_models.py/0
|
{
"file_path": "peft/tests/test_custom_models.py",
"repo_id": "peft",
"token_count": 65289
}
| 202
|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The intent of the tests contained in this file is to check as many PEFT features as possible with torch.compile. This
# is thus a document on how well torch.compile is supported by PEFT. Currently, we know that certain features do not
# work with torch.compile. The corresponding tests should be marked with `@pytest.mark.xfail(strict=True)`.
#
# When adding a new test that fails with torch.compile, please make sure first that it does NOT fail without
# torch.compile.
import gc
import os
import pytest
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import (
AdaLoraConfig,
BOFTConfig,
HRAConfig,
IA3Config,
LNTuningConfig,
LoHaConfig,
LoKrConfig,
LoraConfig,
OFTConfig,
PeftModel,
TaskType,
VeraConfig,
get_peft_model,
)
from .testing_utils import require_bitsandbytes
# only run (very slow) torch.compile tests when explicitly asked to
if os.environ.get("PEFT_DEBUG_WITH_TORCH_COMPILE") != "1":
pytest.skip(allow_module_level=True)
# Mapping: name of the setting -> (Peft config instance, torch.compile kwargs)
SETTINGS = {
"adalora": (AdaLoraConfig(task_type=TaskType.CAUSAL_LM), {}),
"boft": (BOFTConfig(task_type=TaskType.CAUSAL_LM), {}),
"dora": (LoraConfig(task_type=TaskType.CAUSAL_LM, use_dora=True), {}),
"ia3": (IA3Config(task_type=TaskType.CAUSAL_LM), {}),
"ln_tuning": (LNTuningConfig(task_type=TaskType.CAUSAL_LM, target_modules=["final_layer_norm"]), {}),
"loha": (LoHaConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}),
"lokr": pytest.param(
(LoKrConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}),
marks=pytest.mark.xfail(strict=True),
),
"lora": (LoraConfig(task_type=TaskType.CAUSAL_LM), {}),
"lora-target-embeddings": pytest.param(
(LoraConfig(task_type=TaskType.CAUSAL_LM, target_modules=["embed_tokens"]), {}),
marks=pytest.mark.xfail(strict=True),
),
"lora-with-modules-to-save": (LoraConfig(task_type=TaskType.CAUSAL_LM, modules_to_save=["embed_tokens"]), {}),
"oft": (OFTConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}),
"vera": (VeraConfig(task_type=TaskType.CAUSAL_LM), {}),
"hra": (HRAConfig(task_type=TaskType.CAUSAL_LM, target_modules=["q_proj", "v_proj"]), {}),
}
@pytest.mark.single_gpu_tests
class TestTorchCompileCausalLM:
"""
Tests for using torch.compile with causal LM.
Tip: When adding a new test, set `fake_compile = False` below. With this setting, torch.compile is being skipped.
This is useful for two reasons:
- compile is slow, so to quickly iterate on the test, it's best to disable it and only enable it at the very end
- even if you expect the test to fail with compile, as compile does not work with every PEFT feature, it still MUST
succeed without compile, otherwise the test is incorrect.
Before creating the PR, disable `fake_compile`.
"""
fake_compile = False
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
max_train_loss = 15.0 # generous threshold for maximum loss after training
@pytest.fixture(autouse=True)
def teardown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
@pytest.fixture(scope="class")
def tokenizer(self):
return AutoTokenizer.from_pretrained(self.model_id)
@pytest.fixture(scope="class")
def data(self, tokenizer):
def tokenize(samples):
# For some reason, the max sequence length is not honored by the tokenizer, resulting in IndexErrors. Thus,
# manually ensure that sequences are not too long.
tokenized = tokenizer(samples["quote"])
tokenized["input_ids"] = [input_ids[: tokenizer.model_max_length] for input_ids in tokenized["input_ids"]]
tokenized["attention_mask"] = [
input_ids[: tokenizer.model_max_length] for input_ids in tokenized["attention_mask"]
]
return tokenized
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(tokenize, batched=True)
# We need to manually remove unused columns. This is because we cannot use remove_unused_columns=True in the
# Trainer, as this leads to errors with torch.compile. We also cannot just leave them in, as they contain
# strings. Therefore, manually remove all unused columns.
data = data.remove_columns(["quote", "author", "tags"])
return data
def compile(self, model, compile_kwargs):
compile_kwargs = compile_kwargs.copy()
# those are only for the Trainer arguments
compile_kwargs.pop("torch_compile_backend", None)
compile_kwargs.pop("torch_compile_mode", None)
if self.fake_compile:
return model
return torch.compile(model, **compile_kwargs)
@pytest.mark.parametrize("settings", SETTINGS.values(), ids=SETTINGS.keys())
def test_causal_lm_training_trainer_compile(self, settings, tokenizer, data, tmp_path):
r"""Train a PEFT model with torch.compile using Trainer"""
tmp_dir = tmp_path / "model"
config, compile_kwargs = settings
if isinstance(config, AdaLoraConfig):
pytest.skip(reason="AdaLora does not work correctly with Trainer")
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
)
model = get_peft_model(model, config)
# record outputs before training
model.eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_before = model(sample)
model.train()
train_kwargs = {
"per_device_train_batch_size": 4,
"max_steps": 5,
"learning_rate": 1e-3,
"logging_steps": 1,
"output_dir": tmp_dir,
"seed": 0,
}
training_args = TrainingArguments(
torch_compile=not self.fake_compile,
torch_compile_backend=compile_kwargs.get("torch_compile_backend", None),
torch_compile_mode=compile_kwargs.get("torch_compile_mode", None),
**train_kwargs,
)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=training_args,
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.eval()
atol, rtol = 1e-4, 1e-4
with torch.inference_mode():
output_after = model(sample)
tokens_after = model.generate(sample)
assert torch.isfinite(output_after.logits).all()
# sanity check: model was updated
assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol)
assert trainer.state.log_history[-1]["train_loss"] < self.max_train_loss
# check saving the model and loading it without compile
model.save_pretrained(tmp_path)
del model
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(self.model_id, device_map="auto")
model = PeftModel.from_pretrained(model, tmp_path)
with torch.inference_mode():
output_loaded = model(sample)
tokens_loaded = model.generate(sample)
assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol)
assert (tokens_after == tokens_loaded).all()
@pytest.mark.parametrize("settings", SETTINGS.values(), ids=SETTINGS.keys())
def test_causal_lm_training_pytorch_compile(self, settings, tokenizer, data, tmp_path):
r"""Train a PEFT model with torch.compile using PyTorch training loop"""
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
)
config, compile_kwargs = settings
model = get_peft_model(model, config)
if isinstance(config, AdaLoraConfig):
model.base_model.peft_config["default"].total_step = 5
model = self.compile(model, compile_kwargs)
# record outputs before training
model.eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_before = model(sample)
model.train()
model.config.use_cache = False
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3)
batch_size = 4
losses = []
max_steps = 5 * batch_size
for i in range(0, max_steps, batch_size):
batch = tokenizer.pad(data["train"][i : i + batch_size], return_tensors="pt").to(model.device)
# add targets
batch["labels"] = batch["input_ids"].clone()
optimizer.zero_grad()
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
losses.append(loss.item())
if isinstance(config, AdaLoraConfig):
model.base_model.update_and_allocate(i)
model.eval()
with torch.inference_mode():
output_after = model(sample)
tokens_after = model.generate(sample)
assert torch.isfinite(output_after.logits).all()
atol, rtol = 1e-4, 1e-4
# sanity check: model was updated
assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol)
assert losses[-1] < self.max_train_loss
# check saving the model and loading it without compile
model.save_pretrained(tmp_path)
del model
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(self.model_id, device_map="auto")
model = PeftModel.from_pretrained(model, tmp_path)
with torch.inference_mode():
output_loaded = model(sample)
tokens_loaded = model.generate(sample)
assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol)
assert (tokens_after == tokens_loaded).all()
@require_bitsandbytes
@pytest.mark.xfail(strict=True)
def test_causal_lm_training_lora_bnb_compile(self, tokenizer, data, tmp_path):
r"""Train a bnb quantized LoRA model with torch.compile using PyTorch training loop"""
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
config = LoraConfig(task_type=TaskType.CAUSAL_LM)
model = get_peft_model(model, config)
model = self.compile(model, {})
# record outputs before training
model.eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_before = model(sample)
model.train()
model.config.use_cache = False
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3)
batch_size = 4
losses = []
max_steps = 5 * batch_size
for i in range(0, max_steps, batch_size):
batch = tokenizer.pad(data["train"][i : i + batch_size], return_tensors="pt").to(model.device)
# add targets
batch["labels"] = batch["input_ids"].clone()
optimizer.zero_grad()
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
losses.append(loss.item())
model.eval()
with torch.inference_mode():
output_after = model(sample)
assert torch.isfinite(output_after.logits).all()
atol, rtol = 1e-4, 1e-4
# sanity check: model was updated
assert not torch.allclose(output_before.logits, output_after.logits, atol=atol, rtol=rtol)
assert losses[-1] < self.max_train_loss
# check saving the model and loading it without compile
model.save_pretrained(tmp_path)
del model
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id, device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
model = PeftModel.from_pretrained(model, tmp_path)
with torch.inference_mode():
# after loading, outputs are float32 for some reason
output_loaded = model(sample)
assert torch.allclose(output_after.logits, output_loaded.logits, atol=atol, rtol=rtol)
@pytest.mark.xfail(strict=True)
@require_bitsandbytes
def test_causal_lm_multiple_lora_adapter_compile(self, tokenizer, data):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
model = self.compile(model, {})
model.add_adapter("other", config)
model = self.compile(model, {})
with torch.inference_mode():
output_default_adapter = model(sample)
model.set_adapter("other")
with torch.inference_mode():
output_other_adapter = model(sample)
atol, rtol = 1e-4, 1e-4
# outputs of the base model != output of default adapter != output of other adapter
assert not torch.allclose(output_base.logits, output_default_adapter.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_base.logits, output_other_adapter.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default_adapter.logits, output_other_adapter.logits, atol=atol, rtol=rtol)
# now delete the other adapter
model.delete_adapter("other")
model.set_adapter("default")
with torch.inference_mode():
output_after_delete = model(sample)
# outputs after delete == output of default adapter
assert torch.allclose(output_default_adapter.logits, output_after_delete.logits, atol=atol, rtol=rtol)
@pytest.mark.xfail(strict=True)
def test_causal_lm_disable_lora_adapter_compile(self, tokenizer, data):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
model = self.compile(model, {})
output_lora = model(sample)
with model.disable_adapter():
with torch.inference_mode():
output_disabled = model(sample)
atol, rtol = 1e-4, 1e-4
# outputs of the base model == output disabled adapter != output of lora adapter
assert torch.allclose(output_base.logits, output_disabled.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_base.logits, output_lora.logits, atol=atol, rtol=rtol)
@require_bitsandbytes
def test_causal_lm_merging_lora_adapter_compile(self, tokenizer, data):
# merge the adapter
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
with torch.inference_mode():
output_lora = model(sample)
model.merge_adapter()
with torch.inference_mode():
output_merged = model(sample)
# merging is less precise, be more tolerant
atol, rtol = 1e-1, 1e-1
# outputs of the base model != output of lora adapter == output of merged adapter
assert not torch.allclose(output_base.logits, output_lora.logits, atol=atol, rtol=rtol)
assert torch.allclose(output_lora.logits, output_merged.logits, atol=atol, rtol=rtol)
@require_bitsandbytes
def test_causal_lm_merging_multiple_lora_adapters_compile(self, tokenizer, data):
# merge multiple adapters at once
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
model.add_adapter("other", config)
with torch.inference_mode():
output_default = model(sample)
model.set_adapter("other")
with torch.inference_mode():
output_other = model(sample)
model.base_model.merge_adapter(["default", "other"])
with torch.inference_mode():
output_merged = model(sample)
# merging is less precise, be more tolerant
atol, rtol = 1e-1, 1e-1
# outputs of the base model != output of default adapter != output of other adapter
assert not torch.allclose(output_base.logits, output_default.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_base.logits, output_other.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default.logits, output_other.logits, atol=atol, rtol=rtol)
# outputs of merged adapter != all others
assert not torch.allclose(output_base.logits, output_merged.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default.logits, output_merged.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_other.logits, output_merged.logits, atol=atol, rtol=rtol)
@require_bitsandbytes
@pytest.mark.xfail(strict=True)
def test_causal_lm_merge_and_unload_lora_adapter_compile(self, tokenizer, data):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
model = self.compile(model, {})
with torch.inference_mode():
output_lora = model(sample)
unloaded = model.merge_and_unload()
with torch.inference_mode():
output_unloaded = unloaded(sample)
# merging is less precise, be more tolerant
atol, rtol = 1e-1, 1e-1
# outputs of the base model != output of lora adapter == output of unloaded adapter
assert not torch.allclose(output_base.logits, output_lora.logits, atol=atol, rtol=rtol)
assert torch.allclose(output_lora.logits, output_unloaded.logits, atol=atol, rtol=rtol)
@require_bitsandbytes
@pytest.mark.xfail(strict=True)
def test_causal_lm_mixed_batch_lora_adapter_compile(self, tokenizer, data):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
# we need at least 3 samples for this to work!
sample = {
"input_ids": torch.arange(12).reshape(3, 4).to("cuda"),
"attention_mask": torch.ones(3, 4).long().to("cuda"),
}
with torch.inference_mode():
output_base = model(**sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
with torch.inference_mode():
output_default = model(**sample)
model.add_adapter("other", config)
model.set_adapter("other")
with torch.inference_mode():
output_other = model(**sample)
model = self.compile(model, {})
# set adapter_indices so that it alternates between 0 (base), lora 1, and lora 2
adapter_names = ["__base__", "default", "other"]
with torch.inference_mode():
output_mixed = model(**sample, adapter_names=adapter_names)
atol, rtol = 1e-4, 1e-4
# outputs of the base model != output of lora adapter 1 != output of other adapter
assert not torch.allclose(output_base.logits, output_default.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default.logits, output_other.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_other.logits, output_mixed.logits, atol=atol, rtol=rtol)
# outputs of mixed adapter is mix of all 3
assert torch.allclose(output_base.logits[0], output_mixed.logits[0], atol=atol, rtol=rtol)
assert torch.allclose(output_default.logits[1], output_mixed.logits[1], atol=atol, rtol=rtol)
assert torch.allclose(output_other.logits[2], output_mixed.logits[2], atol=atol, rtol=rtol)
@require_bitsandbytes
def test_causal_lm_add_weighted_adapter_lora_adapter_compile(self, tokenizer, data):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
).eval()
sample = torch.tensor(data["train"][:1]["input_ids"]).to(model.device)
with torch.inference_mode():
output_base = model(sample)
config = LoraConfig(task_type=TaskType.CAUSAL_LM, init_lora_weights=False)
model = get_peft_model(model, config).eval()
model.add_adapter("other", config)
with torch.inference_mode():
output_default = model(sample)
model.set_adapter("other")
with torch.inference_mode():
output_other = model(sample)
model.add_weighted_adapter(["default", "other"], [0.5, 0.5], adapter_name="combined")
model.set_adapter("combined")
with torch.inference_mode():
output_combined = model(sample)
atol, rtol = 1e-4, 1e-4
# outputs of the base model != output of default adapter != output of other adapter
assert not torch.allclose(output_base.logits, output_default.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_base.logits, output_other.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default.logits, output_other.logits, atol=atol, rtol=rtol)
# outputs of combined adapter != all others
assert not torch.allclose(output_base.logits, output_combined.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_default.logits, output_combined.logits, atol=atol, rtol=rtol)
assert not torch.allclose(output_other.logits, output_combined.logits, atol=atol, rtol=rtol)
|
peft/tests/test_torch_compile.py/0
|
{
"file_path": "peft/tests/test_torch_compile.py",
"repo_id": "peft",
"token_count": 10843
}
| 203
|
# Upgrading from previous versions
I generally try to maintain code interface and especially model weight compability across many `timm` versions. Sometimes there are exceptions.
## Checkpoint remapping
Pretrained weight remapping is handled by `checkpoint_filter_fn` in a model implementation module. This remaps old pretrained checkpoints to new, and also 3rd party (original) checkpoints to `timm` format if the model was modified when brough into `timm`.
The `checkpoint_filter_fn` is automatically called when loading pretrained weights via `pretrained=True`, but they can be called manually if you call the fn directly with the current model instance and old state dict.
## Upgrading from 0.6 and earlier
Many changes were made since the 0.6.x stable releases. They were previewed in 0.8.x dev releases but not everyone transitioned.
* `timm.models.layers` moved to `timm.layers`:
* `from timm.models.layers import name` will still work via deprecation mapping (but please transition to `timm.layers`).
* `import timm.models.layers.module` or `from timm.models.layers.module import name` needs to be changed now.
* Builder, helper, non-model modules in `timm.models` have a `_` prefix added, ie `timm.models.helpers` -> `timm.models._helpers`, there are temporary deprecation mapping files but those will be removed.
* All models now support `architecture.pretrained_tag` naming (ex `resnet50.rsb_a1`).
* The pretrained_tag is the specific weight variant (different head) for the architecture.
* Using only `architecture` defaults to the first weights in the default_cfgs for that model architecture.
* In adding pretrained tags, many model names that existed to differentiate were renamed to use the tag (ex: `vit_base_patch16_224_in21k` -> `vit_base_patch16_224.augreg_in21k`). There are deprecation mappings for these.
* A number of models had their checkpoints remaped to match architecture changes needed to better support `features_only=True`, there are `checkpoint_filter_fn` methods in any model module that was remapped. These can be passed to `timm.models.load_checkpoint(..., filter_fn=timm.models.swin_transformer_v2.checkpoint_filter_fn)` to remap your existing checkpoint.
* The Hugging Face Hub (https://huggingface.co/timm) is now the primary source for `timm` weights. Model cards include link to papers, original source, license.
* Previous 0.6.x can be cloned from [0.6.x](https://github.com/rwightman/pytorch-image-models/tree/0.6.x) branch or installed via pip with version.
|
pytorch-image-models/UPGRADING.md/0
|
{
"file_path": "pytorch-image-models/UPGRADING.md",
"repo_id": "pytorch-image-models",
"token_count": 694
}
| 204
|
# Adversarial Inception v3
**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
This particular model was trained for study of adversarial examples (adversarial training).
The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('adv_inception_v3', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `adv_inception_v3`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('adv_inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1804-00097,
author = {Alexey Kurakin and
Ian J. Goodfellow and
Samy Bengio and
Yinpeng Dong and
Fangzhou Liao and
Ming Liang and
Tianyu Pang and
Jun Zhu and
Xiaolin Hu and
Cihang Xie and
Jianyu Wang and
Zhishuai Zhang and
Zhou Ren and
Alan L. Yuille and
Sangxia Huang and
Yao Zhao and
Yuzhe Zhao and
Zhonglin Han and
Junjiajia Long and
Yerkebulan Berdibekov and
Takuya Akiba and
Seiya Tokui and
Motoki Abe},
title = {Adversarial Attacks and Defences Competition},
journal = {CoRR},
volume = {abs/1804.00097},
year = {2018},
url = {http://arxiv.org/abs/1804.00097},
archivePrefix = {arXiv},
eprint = {1804.00097},
timestamp = {Thu, 31 Oct 2019 16:31:22 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: Adversarial Inception v3
Paper:
Title: Adversarial Attacks and Defences Competition
URL: https://paperswithcode.com/paper/adversarial-attacks-and-defences-competition
Models:
- Name: adv_inception_v3
In Collection: Adversarial Inception v3
Metadata:
FLOPs: 7352418880
Parameters: 23830000
File Size: 95549439
Architecture:
- 1x1 Convolution
- Auxiliary Classifier
- Average Pooling
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inception-v3 Module
- Max Pooling
- ReLU
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: adv_inception_v3
Crop Pct: '0.875'
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v3.py#L456
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/adv_inception_v3-9e27bd63.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.58%
Top 5 Accuracy: 93.74%
-->
|
pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/adversarial-inception-v3.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2247
}
| 205
|
# (Gluon) ResNet
**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('gluon_resnet101_v1b', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `gluon_resnet101_v1b`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('gluon_resnet101_v1b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/HeZRS15,
author = {Kaiming He and
Xiangyu Zhang and
Shaoqing Ren and
Jian Sun},
title = {Deep Residual Learning for Image Recognition},
journal = {CoRR},
volume = {abs/1512.03385},
year = {2015},
url = {http://arxiv.org/abs/1512.03385},
archivePrefix = {arXiv},
eprint = {1512.03385},
timestamp = {Wed, 17 Apr 2019 17:23:45 +0200},
biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: Gloun ResNet
Paper:
Title: Deep Residual Learning for Image Recognition
URL: https://paperswithcode.com/paper/deep-residual-learning-for-image-recognition
Models:
- Name: gluon_resnet101_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 10068547584
Parameters: 44550000
File Size: 178723172
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L89
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.3%
Top 5 Accuracy: 94.53%
- Name: gluon_resnet101_v1c
In Collection: Gloun ResNet
Metadata:
FLOPs: 10376567296
Parameters: 44570000
File Size: 178802575
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1c
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L113
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.53%
Top 5 Accuracy: 94.59%
- Name: gluon_resnet101_v1d
In Collection: Gloun ResNet
Metadata:
FLOPs: 10377018880
Parameters: 44570000
File Size: 178802755
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L138
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.4%
Top 5 Accuracy: 95.02%
- Name: gluon_resnet101_v1s
In Collection: Gloun ResNet
Metadata:
FLOPs: 11805511680
Parameters: 44670000
File Size: 179221777
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet101_v1s
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L166
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.29%
Top 5 Accuracy: 95.16%
- Name: gluon_resnet152_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 14857660416
Parameters: 60190000
File Size: 241534001
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L97
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.69%
Top 5 Accuracy: 94.73%
- Name: gluon_resnet152_v1c
In Collection: Gloun ResNet
Metadata:
FLOPs: 15165680128
Parameters: 60210000
File Size: 241613404
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1c
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L121
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.91%
Top 5 Accuracy: 94.85%
- Name: gluon_resnet152_v1d
In Collection: Gloun ResNet
Metadata:
FLOPs: 15166131712
Parameters: 60210000
File Size: 241613584
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L147
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.48%
Top 5 Accuracy: 95.2%
- Name: gluon_resnet152_v1s
In Collection: Gloun ResNet
Metadata:
FLOPs: 16594624512
Parameters: 60320000
File Size: 242032606
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet152_v1s
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L175
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.02%
Top 5 Accuracy: 95.42%
- Name: gluon_resnet18_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 2337073152
Parameters: 11690000
File Size: 46816736
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet18_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L65
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 70.84%
Top 5 Accuracy: 89.76%
- Name: gluon_resnet34_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 4718469120
Parameters: 21800000
File Size: 87295112
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet34_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L73
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.59%
Top 5 Accuracy: 92.0%
- Name: gluon_resnet50_v1b
In Collection: Gloun ResNet
Metadata:
FLOPs: 5282531328
Parameters: 25560000
File Size: 102493763
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L81
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.58%
Top 5 Accuracy: 93.72%
- Name: gluon_resnet50_v1c
In Collection: Gloun ResNet
Metadata:
FLOPs: 5590551040
Parameters: 25580000
File Size: 102573166
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1c
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L105
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.01%
Top 5 Accuracy: 93.99%
- Name: gluon_resnet50_v1d
In Collection: Gloun ResNet
Metadata:
FLOPs: 5591002624
Parameters: 25580000
File Size: 102573346
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L129
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.06%
Top 5 Accuracy: 94.46%
- Name: gluon_resnet50_v1s
In Collection: Gloun ResNet
Metadata:
FLOPs: 7019495424
Parameters: 25680000
File Size: 102992368
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnet50_v1s
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L156
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.7%
Top 5 Accuracy: 94.25%
-->
|
pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/gloun-resnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 7210
}
| 206
|
# SK-ResNet
**SK ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNet are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('skresnet18', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `skresnet18`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('skresnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{li2019selective,
title={Selective Kernel Networks},
author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang},
year={2019},
eprint={1903.06586},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: SKResNet
Paper:
Title: Selective Kernel Networks
URL: https://paperswithcode.com/paper/selective-kernel-networks
Models:
- Name: skresnet18
In Collection: SKResNet
Metadata:
FLOPs: 2333467136
Parameters: 11960000
File Size: 47923238
Architecture:
- Convolution
- Dense Connections
- Global Average Pooling
- Max Pooling
- Residual Connection
- Selective Kernel
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: skresnet18
LR: 0.1
Epochs: 100
Layers: 18
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L148
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 73.03%
Top 5 Accuracy: 91.17%
- Name: skresnet34
In Collection: SKResNet
Metadata:
FLOPs: 4711849952
Parameters: 22280000
File Size: 89299314
Architecture:
- Convolution
- Dense Connections
- Global Average Pooling
- Max Pooling
- Residual Connection
- Selective Kernel
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: skresnet34
LR: 0.1
Epochs: 100
Layers: 34
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L165
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.93%
Top 5 Accuracy: 93.32%
-->
|
pytorch-image-models/hfdocs/source/models/skresnet.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/models/skresnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2082
}
| 207
|
# Data
[[autodoc]] timm.data.create_dataset
[[autodoc]] timm.data.create_loader
[[autodoc]] timm.data.create_transform
[[autodoc]] timm.data.resolve_data_config
|
pytorch-image-models/hfdocs/source/reference/data.mdx/0
|
{
"file_path": "pytorch-image-models/hfdocs/source/reference/data.mdx",
"repo_id": "pytorch-image-models",
"token_count": 67
}
| 208
|
import os
from typing import Optional
from .reader_image_folder import ReaderImageFolder
from .reader_image_in_tar import ReaderImageInTar
def create_reader(
name: str,
root: Optional[str] = None,
split: str = 'train',
**kwargs,
):
kwargs = {k: v for k, v in kwargs.items() if v is not None}
name = name.lower()
name = name.split('/', 1)
prefix = ''
if len(name) > 1:
prefix = name[0]
name = name[-1]
# FIXME improve the selection right now just tfds prefix or fallback path, will need options to
# explicitly select other options shortly
if prefix == 'hfds':
from .reader_hfds import ReaderHfds # defer Hf datasets import
reader = ReaderHfds(name=name, root=root, split=split, **kwargs)
elif prefix == 'hfids':
from .reader_hfids import ReaderHfids # defer HF datasets import
reader = ReaderHfids(name=name, root=root, split=split, **kwargs)
elif prefix == 'tfds':
from .reader_tfds import ReaderTfds # defer tensorflow import
reader = ReaderTfds(name=name, root=root, split=split, **kwargs)
elif prefix == 'wds':
from .reader_wds import ReaderWds
kwargs.pop('download', False)
reader = ReaderWds(root=root, name=name, split=split, **kwargs)
else:
assert os.path.exists(root)
# default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder
# FIXME support split here or in reader?
if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar':
reader = ReaderImageInTar(root, **kwargs)
else:
reader = ReaderImageFolder(root, **kwargs)
return reader
|
pytorch-image-models/timm/data/readers/reader_factory.py/0
|
{
"file_path": "pytorch-image-models/timm/data/readers/reader_factory.py",
"repo_id": "pytorch-image-models",
"token_count": 694
}
| 209
|
""" PyTorch selectable adaptive pooling
Adaptive pooling with the ability to select the type of pooling from:
* 'avg' - Average pooling
* 'max' - Max pooling
* 'avgmax' - Sum of average and max pooling re-scaled by 0.5
* 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim
Both a functional and a nn.Module version of the pooling is provided.
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from .format import get_spatial_dim, get_channel_dim
_int_tuple_2_t = Union[int, Tuple[int, int]]
def adaptive_pool_feat_mult(pool_type='avg'):
if pool_type.endswith('catavgmax'):
return 2
else:
return 1
def adaptive_avgmax_pool2d(x, output_size: _int_tuple_2_t = 1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
def adaptive_catavgmax_pool2d(x, output_size: _int_tuple_2_t = 1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
def select_adaptive_pool2d(x, pool_type='avg', output_size: _int_tuple_2_t = 1):
"""Selectable global pooling function with dynamic input kernel size
"""
if pool_type == 'avg':
x = F.adaptive_avg_pool2d(x, output_size)
elif pool_type == 'avgmax':
x = adaptive_avgmax_pool2d(x, output_size)
elif pool_type == 'catavgmax':
x = adaptive_catavgmax_pool2d(x, output_size)
elif pool_type == 'max':
x = F.adaptive_max_pool2d(x, output_size)
else:
assert False, 'Invalid pool type: %s' % pool_type
return x
class FastAdaptiveAvgPool(nn.Module):
def __init__(self, flatten: bool = False, input_fmt: F = 'NCHW'):
super(FastAdaptiveAvgPool, self).__init__()
self.flatten = flatten
self.dim = get_spatial_dim(input_fmt)
def forward(self, x):
return x.mean(self.dim, keepdim=not self.flatten)
class FastAdaptiveMaxPool(nn.Module):
def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):
super(FastAdaptiveMaxPool, self).__init__()
self.flatten = flatten
self.dim = get_spatial_dim(input_fmt)
def forward(self, x):
return x.amax(self.dim, keepdim=not self.flatten)
class FastAdaptiveAvgMaxPool(nn.Module):
def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):
super(FastAdaptiveAvgMaxPool, self).__init__()
self.flatten = flatten
self.dim = get_spatial_dim(input_fmt)
def forward(self, x):
x_avg = x.mean(self.dim, keepdim=not self.flatten)
x_max = x.amax(self.dim, keepdim=not self.flatten)
return 0.5 * x_avg + 0.5 * x_max
class FastAdaptiveCatAvgMaxPool(nn.Module):
def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):
super(FastAdaptiveCatAvgMaxPool, self).__init__()
self.flatten = flatten
self.dim_reduce = get_spatial_dim(input_fmt)
if flatten:
self.dim_cat = 1
else:
self.dim_cat = get_channel_dim(input_fmt)
def forward(self, x):
x_avg = x.mean(self.dim_reduce, keepdim=not self.flatten)
x_max = x.amax(self.dim_reduce, keepdim=not self.flatten)
return torch.cat((x_avg, x_max), self.dim_cat)
class AdaptiveAvgMaxPool2d(nn.Module):
def __init__(self, output_size: _int_tuple_2_t = 1):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_avgmax_pool2d(x, self.output_size)
class AdaptiveCatAvgMaxPool2d(nn.Module):
def __init__(self, output_size: _int_tuple_2_t = 1):
super(AdaptiveCatAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_catavgmax_pool2d(x, self.output_size)
class SelectAdaptivePool2d(nn.Module):
"""Selectable global pooling layer with dynamic input kernel size
"""
def __init__(
self,
output_size: _int_tuple_2_t = 1,
pool_type: str = 'fast',
flatten: bool = False,
input_fmt: str = 'NCHW',
):
super(SelectAdaptivePool2d, self).__init__()
assert input_fmt in ('NCHW', 'NHWC')
self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing
pool_type = pool_type.lower()
if not pool_type:
self.pool = nn.Identity() # pass through
self.flatten = nn.Flatten(1) if flatten else nn.Identity()
elif pool_type.startswith('fast') or input_fmt != 'NCHW':
assert output_size == 1, 'Fast pooling and non NCHW input formats require output_size == 1.'
if pool_type.endswith('catavgmax'):
self.pool = FastAdaptiveCatAvgMaxPool(flatten, input_fmt=input_fmt)
elif pool_type.endswith('avgmax'):
self.pool = FastAdaptiveAvgMaxPool(flatten, input_fmt=input_fmt)
elif pool_type.endswith('max'):
self.pool = FastAdaptiveMaxPool(flatten, input_fmt=input_fmt)
elif pool_type == 'fast' or pool_type.endswith('avg'):
self.pool = FastAdaptiveAvgPool(flatten, input_fmt=input_fmt)
else:
assert False, 'Invalid pool type: %s' % pool_type
self.flatten = nn.Identity()
else:
assert input_fmt == 'NCHW'
if pool_type == 'avgmax':
self.pool = AdaptiveAvgMaxPool2d(output_size)
elif pool_type == 'catavgmax':
self.pool = AdaptiveCatAvgMaxPool2d(output_size)
elif pool_type == 'max':
self.pool = nn.AdaptiveMaxPool2d(output_size)
elif pool_type == 'avg':
self.pool = nn.AdaptiveAvgPool2d(output_size)
else:
assert False, 'Invalid pool type: %s' % pool_type
self.flatten = nn.Flatten(1) if flatten else nn.Identity()
def is_identity(self):
return not self.pool_type
def forward(self, x):
x = self.pool(x)
x = self.flatten(x)
return x
def feat_mult(self):
return adaptive_pool_feat_mult(self.pool_type)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'pool_type=' + self.pool_type \
+ ', flatten=' + str(self.flatten) + ')'
|
pytorch-image-models/timm/layers/adaptive_avgmax_pool.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/adaptive_avgmax_pool.py",
"repo_id": "pytorch-image-models",
"token_count": 3039
}
| 210
|
""" NormAct (Normalizaiton + Activation Layer) Factory
Create norm + act combo modules that attempt to be backwards compatible with separate norm + act
isntances in models. Where these are used it will be possible to swap separate BN + act layers with
combined modules like IABN or EvoNorms.
Hacked together by / Copyright 2020 Ross Wightman
"""
import types
import functools
from .evo_norm import *
from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d
from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d
from .inplace_abn import InplaceAbn
_NORM_ACT_MAP = dict(
batchnorm=BatchNormAct2d,
batchnorm2d=BatchNormAct2d,
groupnorm=GroupNormAct,
groupnorm1=functools.partial(GroupNormAct, num_groups=1),
layernorm=LayerNormAct,
layernorm2d=LayerNormAct2d,
evonormb0=EvoNorm2dB0,
evonormb1=EvoNorm2dB1,
evonormb2=EvoNorm2dB2,
evonorms0=EvoNorm2dS0,
evonorms0a=EvoNorm2dS0a,
evonorms1=EvoNorm2dS1,
evonorms1a=EvoNorm2dS1a,
evonorms2=EvoNorm2dS2,
evonorms2a=EvoNorm2dS2a,
frn=FilterResponseNormAct2d,
frntlu=FilterResponseNormTlu2d,
inplaceabn=InplaceAbn,
iabn=InplaceAbn,
)
_NORM_ACT_TYPES = {m for n, m in _NORM_ACT_MAP.items()}
# has act_layer arg to define act type
_NORM_ACT_REQUIRES_ARG = {
BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn}
def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs):
layer = get_norm_act_layer(layer_name, act_layer=act_layer)
layer_instance = layer(num_features, apply_act=apply_act, **kwargs)
if jit:
layer_instance = torch.jit.script(layer_instance)
return layer_instance
def get_norm_act_layer(norm_layer, act_layer=None):
if norm_layer is None:
return None
assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))
assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial))
norm_act_kwargs = {}
# unbind partial fn, so args can be rebound later
if isinstance(norm_layer, functools.partial):
norm_act_kwargs.update(norm_layer.keywords)
norm_layer = norm_layer.func
if isinstance(norm_layer, str):
if not norm_layer:
return None
layer_name = norm_layer.replace('_', '').lower().split('-')[0]
norm_act_layer = _NORM_ACT_MAP[layer_name]
elif norm_layer in _NORM_ACT_TYPES:
norm_act_layer = norm_layer
elif isinstance(norm_layer, types.FunctionType):
# if function type, must be a lambda/fn that creates a norm_act layer
norm_act_layer = norm_layer
else:
type_name = norm_layer.__name__.lower()
if type_name.startswith('batchnorm'):
norm_act_layer = BatchNormAct2d
elif type_name.startswith('groupnorm'):
norm_act_layer = GroupNormAct
elif type_name.startswith('groupnorm1'):
norm_act_layer = functools.partial(GroupNormAct, num_groups=1)
elif type_name.startswith('layernorm2d'):
norm_act_layer = LayerNormAct2d
elif type_name.startswith('layernorm'):
norm_act_layer = LayerNormAct
else:
assert False, f"No equivalent norm_act layer for {type_name}"
if norm_act_layer in _NORM_ACT_REQUIRES_ARG:
# pass `act_layer` through for backwards compat where `act_layer=None` implies no activation.
# In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types
norm_act_kwargs.setdefault('act_layer', act_layer)
if norm_act_kwargs:
norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args
return norm_act_layer
|
pytorch-image-models/timm/layers/create_norm_act.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/create_norm_act.py",
"repo_id": "pytorch-image-models",
"token_count": 1594
}
| 211
|
""" Lambda Layer
Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention`
- https://arxiv.org/abs/2102.08602
@misc{2102.08602,
Author = {Irwan Bello},
Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention},
Year = {2021},
}
Status:
This impl is a WIP. Code snippets in the paper were used as reference but
good chance some details are missing/wrong.
I've only implemented local lambda conv based pos embeddings.
For a PyTorch impl that includes other embedding options checkout
https://github.com/lucidrains/lambda-networks
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
from torch import nn
import torch.nn.functional as F
from .grid import ndgrid
from .helpers import to_2tuple, make_divisible
from .weight_init import trunc_normal_
def rel_pos_indices(size):
size = to_2tuple(size)
pos = torch.stack(ndgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1)
rel_pos = pos[:, None, :] - pos[:, :, None]
rel_pos[0] += size[0] - 1
rel_pos[1] += size[1] - 1
return rel_pos # 2, H * W, H * W
class LambdaLayer(nn.Module):
"""Lambda Layer
Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention`
- https://arxiv.org/abs/2102.08602
NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add.
The internal dimensions of the lambda module are controlled via the interaction of several arguments.
* the output dimension of the module is specified by dim_out, which falls back to input dim if not set
* the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
* the query (q) and key (k) dimension are determined by
* dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None
* q = num_heads * dim_head, k = dim_head
* as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set
Args:
dim (int): input dimension to the module
dim_out (int): output dimension of the module, same as dim if not set
feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W
stride (int): output stride of the module, avg pool used if stride == 2
num_heads (int): parallel attention heads.
dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9)
qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
qkv_bias (bool): add bias to q, k, and v projections
"""
def __init__(
self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9,
qk_ratio=1.0, qkv_bias=False):
super().__init__()
dim_out = dim_out or dim
assert dim_out % num_heads == 0, ' should be divided by num_heads'
self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads
self.num_heads = num_heads
self.dim_v = dim_out // num_heads
self.qkv = nn.Conv2d(
dim,
num_heads * self.dim_qk + self.dim_qk + self.dim_v,
kernel_size=1, bias=qkv_bias)
self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk)
self.norm_v = nn.BatchNorm2d(self.dim_v)
if r is not None:
# local lambda convolution for pos
self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0))
self.pos_emb = None
self.rel_pos_indices = None
else:
# relative pos embedding
assert feat_size is not None
feat_size = to_2tuple(feat_size)
rel_size = [2 * s - 1 for s in feat_size]
self.conv_lambda = None
self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk))
self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False)
self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity()
self.reset_parameters()
def reset_parameters(self):
trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in
if self.conv_lambda is not None:
trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5)
if self.pos_emb is not None:
trunc_normal_(self.pos_emb, std=.02)
def forward(self, x):
B, C, H, W = x.shape
M = H * W
qkv = self.qkv(x)
q, k, v = torch.split(qkv, [
self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1)
q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K
v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V
k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M
content_lam = k @ v # B, K, V
content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V
if self.pos_emb is None:
position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K
position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V
else:
# FIXME relative pos embedding path not fully verified
pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1)
position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V
position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V
out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W
out = self.pool(out)
return out
|
pytorch-image-models/timm/layers/lambda_layer.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/lambda_layer.py",
"repo_id": "pytorch-image-models",
"token_count": 2611
}
| 212
|
""" Sin-cos, fourier, rotary position embedding modules and functions
Hacked together by / Copyright 2022 Ross Wightman
"""
import math
from typing import List, Tuple, Optional, Union
import torch
from torch import nn as nn
from .grid import ndgrid
from .trace_utils import _assert
def pixel_freq_bands(
num_bands: int,
max_freq: float = 224.,
linear_bands: bool = True,
device: Optional[torch.device] = None,
):
if linear_bands:
bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=torch.float32, device=device)
else:
bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=torch.float32, device=device)
return bands * torch.pi
def freq_bands(
num_bands: int,
temperature: float = 10000.,
step: int = 2,
device: Optional[torch.device] = None,
) -> torch.Tensor:
exp = torch.arange(0, num_bands, step, dtype=torch.int64, device=device).to(torch.float32) / num_bands
bands = 1. / (temperature ** exp)
return bands
def build_sincos2d_pos_embed(
feat_shape: List[int],
dim: int = 64,
temperature: float = 10000.,
reverse_coord: bool = False,
interleave_sin_cos: bool = False,
dtype: torch.dtype = torch.float32,
device: Optional[torch.device] = None
) -> torch.Tensor:
"""
Args:
feat_shape:
dim:
temperature:
reverse_coord: stack grid order W, H instead of H, W
interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos
dtype:
device:
Returns:
"""
assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding'
pos_dim = dim // 4
bands = freq_bands(pos_dim, temperature=temperature, step=1, device=device)
if reverse_coord:
feat_shape = feat_shape[::-1] # stack W, H instead of H, W
grid = torch.stack(ndgrid([
torch.arange(s, device=device, dtype=torch.int64).to(torch.float32)
for s in feat_shape
])).flatten(1).transpose(0, 1)
pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0)
# FIXME add support for unflattened spatial dim?
stack_dim = 2 if interleave_sin_cos else 1 # stack sin, cos, sin, cos instead of sin sin cos cos
pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1)
return pos_emb.to(dtype=dtype)
def build_fourier_pos_embed(
feat_shape: List[int],
bands: Optional[torch.Tensor] = None,
num_bands: int = 64,
max_res: int = 224,
temperature: float = 10000.,
linear_bands: bool = False,
include_grid: bool = False,
in_pixels: bool = True,
ref_feat_shape: Optional[List[int]] = None,
dtype: torch.dtype = torch.float32,
device: Optional[torch.device] = None,
) -> List[torch.Tensor]:
"""
Args:
feat_shape: Feature shape for embedding.
bands: Pre-calculated frequency bands.
num_bands: Number of frequency bands (determines output dim).
max_res: Maximum resolution for pixel based freq.
temperature: Temperature for non-pixel freq.
linear_bands: Linear band spacing for pixel based freq.
include_grid: Include the spatial grid in output.
in_pixels: Output in pixel freq.
ref_feat_shape: Reference feature shape for resize / fine-tune.
dtype: Output dtype.
device: Output device.
Returns:
"""
if bands is None:
if in_pixels:
bands = pixel_freq_bands(
num_bands,
float(max_res),
linear_bands=linear_bands,
device=device,
)
else:
bands = freq_bands(
num_bands,
temperature=temperature,
step=1,
device=device,
)
else:
if device is None:
device = bands.device
if dtype is None:
dtype = bands.dtype
if in_pixels:
t = [torch.linspace(-1., 1., steps=s, device=device, dtype=torch.float32) for s in feat_shape]
else:
t = [torch.arange(s, device=device, dtype=torch.int64).to(torch.float32) for s in feat_shape]
if ref_feat_shape is not None:
# eva's scheme for resizing rope embeddings (ref shape = pretrain)
t = [x / f * r for x, f, r in zip(t, feat_shape, ref_feat_shape)]
grid = torch.stack(ndgrid(t), dim=-1)
grid = grid.unsqueeze(-1)
pos = grid * bands
pos_sin, pos_cos = pos.sin().to(dtype=dtype), pos.cos().to(dtype)
out = [grid, pos_sin, pos_cos] if include_grid else [pos_sin, pos_cos]
return out
class FourierEmbed(nn.Module):
def __init__(
self,
max_res: int = 224,
num_bands: int = 64,
concat_grid=True,
keep_spatial=False,
):
super().__init__()
self.max_res = max_res
self.num_bands = num_bands
self.concat_grid = concat_grid
self.keep_spatial = keep_spatial
self.register_buffer(
'bands',
pixel_freq_bands(max_res, num_bands),
persistent=False,
)
def forward(self, x):
B, C = x.shape[:2]
feat_shape = x.shape[2:]
emb = build_fourier_pos_embed(
feat_shape,
self.bands,
include_grid=self.concat_grid,
dtype=x.dtype,
device=x.device,
)
emb = torch.cat(emb, dim=-1)
emb = emb.transpose(-1, -2).flatten(len(feat_shape))
batch_expand = (B,) + (-1,) * (x.ndim - 1)
# FIXME support nD
if self.keep_spatial:
x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1)
else:
x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1)
x = x.reshape(B, feat_shape.numel(), -1)
return x
def rot(x):
return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape)
def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb):
if sin_emb.ndim == 3:
return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x)
return x * cos_emb + rot(x) * sin_emb
def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb):
if isinstance(x, torch.Tensor):
x = [x]
return [t * cos_emb + rot(t) * sin_emb for t in x]
def apply_rot_embed_cat(x: torch.Tensor, emb):
sin_emb, cos_emb = emb.tensor_split(2, -1)
if sin_emb.ndim == 3:
return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x)
return x * cos_emb + rot(x) * sin_emb
def apply_keep_indices_nlc(x, pos_embed, keep_indices):
pos_embed = pos_embed.unsqueeze(0).expand(x.shape[0], -1, -1)
pos_embed = pos_embed.gather(1, keep_indices.unsqueeze(-1).expand(-1, -1, pos_embed.shape[-1]))
return pos_embed
def build_rotary_pos_embed(
feat_shape: List[int],
bands: Optional[torch.Tensor] = None,
dim: int = 64,
max_res: int = 224,
temperature: float = 10000.,
linear_bands: bool = False,
in_pixels: bool = True,
ref_feat_shape: Optional[List[int]] = None,
dtype: torch.dtype = torch.float32,
device: Optional[torch.device] = None,
):
"""
Args:
feat_shape: Spatial shape of the target tensor for embedding.
bands: Optional pre-generated frequency bands
dim: Output dimension of embedding tensor.
max_res: Maximum resolution for pixel mode.
temperature: Temperature (inv freq) for non-pixel mode
linear_bands: Linearly (instead of log) spaced bands for pixel mode
in_pixels: Pixel vs language (inv freq) mode.
dtype: Output dtype.
device: Output device.
Returns:
"""
sin_emb, cos_emb = build_fourier_pos_embed(
feat_shape,
bands=bands,
num_bands=dim // 4,
max_res=max_res,
temperature=temperature,
linear_bands=linear_bands,
in_pixels=in_pixels,
ref_feat_shape=ref_feat_shape,
device=device,
dtype=dtype,
)
num_spatial_dim = 1
# this would be much nicer as a .numel() call to torch.Size(), but torchscript sucks
for x in feat_shape:
num_spatial_dim *= x
sin_emb = sin_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1)
cos_emb = cos_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1)
return sin_emb, cos_emb
class RotaryEmbedding(nn.Module):
""" Rotary position embedding
NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not
been well tested, and will likely change. It will be moved to its own file.
The following impl/resources were referenced for this impl:
* https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py
* https://blog.eleuther.ai/rotary-embeddings/
"""
def __init__(
self,
dim,
max_res=224,
temperature=10000,
in_pixels=True,
linear_bands: bool = False,
feat_shape: Optional[List[int]] = None,
ref_feat_shape: Optional[List[int]] = None,
):
super().__init__()
self.dim = dim
self.max_res = max_res
self.temperature = temperature
self.in_pixels = in_pixels
self.feat_shape = feat_shape
self.ref_feat_shape = ref_feat_shape
if feat_shape is None:
# only cache bands
if in_pixels:
bands = pixel_freq_bands(
dim // 4,
float(max_res),
linear_bands=linear_bands,
)
else:
bands = freq_bands(
dim // 4,
temperature=temperature,
step=1,
)
self.register_buffer(
'bands',
bands,
persistent=False,
)
self.pos_embed_sin = None
self.pos_embed_cos = None
else:
# cache full sin/cos embeddings if shape provided up front
emb_sin, emb_cos = build_rotary_pos_embed(
feat_shape=feat_shape,
dim=dim,
max_res=max_res,
linear_bands=linear_bands,
in_pixels=in_pixels,
ref_feat_shape=self.ref_feat_shape,
)
self.bands = None
self.register_buffer(
'pos_embed_sin',
emb_sin,
persistent=False,
)
self.register_buffer(
'pos_embed_cos',
emb_cos,
persistent=False,
)
def get_embed(self, shape: Optional[List[int]] = None):
if self.bands is not None:
# rebuild embeddings every call, use if target shape changes
assert shape is not None
return build_rotary_pos_embed(
shape,
self.bands,
in_pixels=self.in_pixels,
)
else:
return self.pos_embed_sin, self.pos_embed_cos
def forward(self, x):
# assuming channel-first tensor where spatial dim are >= 2
sin_emb, cos_emb = self.get_embed(x.shape[2:])
return apply_rot_embed(x, sin_emb, cos_emb)
class RotaryEmbeddingCat(nn.Module):
""" Rotary position embedding w/ concatenatd sin & cos
The following impl/resources were referenced for this impl:
* https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py
* https://blog.eleuther.ai/rotary-embeddings/
"""
def __init__(
self,
dim,
max_res=224,
temperature=10000,
in_pixels=True,
linear_bands: bool = False,
feat_shape: Optional[List[int]] = None,
ref_feat_shape: Optional[List[int]] = None,
):
super().__init__()
self.dim = dim
self.max_res = max_res
self.temperature = temperature
self.in_pixels = in_pixels
self.feat_shape = feat_shape
self.ref_feat_shape = ref_feat_shape
if feat_shape is None:
# only cache bands
if in_pixels:
bands = pixel_freq_bands(
dim // 4,
float(max_res),
linear_bands=linear_bands,
)
else:
bands = freq_bands(
dim // 4,
temperature=temperature,
step=1,
)
self.register_buffer(
'bands',
bands,
persistent=False,
)
self.pos_embed = None
else:
# cache full sin/cos embeddings if shape provided up front
embeds = build_rotary_pos_embed(
feat_shape=feat_shape,
dim=dim,
max_res=max_res,
linear_bands=linear_bands,
in_pixels=in_pixels,
ref_feat_shape=self.ref_feat_shape,
)
self.bands = None
self.register_buffer(
'pos_embed',
torch.cat(embeds, -1),
persistent=False,
)
def get_embed(self, shape: Optional[List[int]] = None):
if self.bands is not None and shape is not None:
# rebuild embeddings every call, use if target shape changes
embeds = build_rotary_pos_embed(
shape,
self.bands,
in_pixels=self.in_pixels,
ref_feat_shape=self.ref_feat_shape,
)
return torch.cat(embeds, -1)
elif self.pos_embed is not None:
return self.pos_embed
else:
assert False, "get_embed() requires pre-computed pos_embed or valid shape w/ pre-computed bands"
def forward(self, x):
# assuming channel-first tensor where spatial dim are >= 2
pos_embed = self.get_embed(x.shape[2:])
return apply_rot_embed_cat(x, pos_embed)
|
pytorch-image-models/timm/layers/pos_embed_sincos.py/0
|
{
"file_path": "pytorch-image-models/timm/layers/pos_embed_sincos.py",
"repo_id": "pytorch-image-models",
"token_count": 7180
}
| 213
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .cross_entropy import LabelSmoothingCrossEntropy
class JsdCrossEntropy(nn.Module):
""" Jensen-Shannon Divergence + Cross-Entropy Loss
Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py
From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty -
https://arxiv.org/abs/1912.02781
Hacked together by / Copyright 2020 Ross Wightman
"""
def __init__(self, num_splits=3, alpha=12, smoothing=0.1):
super().__init__()
self.num_splits = num_splits
self.alpha = alpha
if smoothing is not None and smoothing > 0:
self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing)
else:
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def __call__(self, output, target):
split_size = output.shape[0] // self.num_splits
assert split_size * self.num_splits == output.shape[0]
logits_split = torch.split(output, split_size)
# Cross-entropy is only computed on clean images
loss = self.cross_entropy_loss(logits_split[0], target[:split_size])
probs = [F.softmax(logits, dim=1) for logits in logits_split]
# Clamp mixture distribution to avoid exploding KL divergence
logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log()
loss += self.alpha * sum([F.kl_div(
logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs)
return loss
|
pytorch-image-models/timm/loss/jsd.py/0
|
{
"file_path": "pytorch-image-models/timm/loss/jsd.py",
"repo_id": "pytorch-image-models",
"token_count": 639
}
| 214
|
""" Deep Layer Aggregation and DLA w/ Res2Net
DLA original adapted from Official Pytorch impl at: https://github.com/ucbdrive/dla
DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484
Res2Net additions from: https://github.com/gasvn/Res2Net/
Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169
"""
import math
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import create_classifier
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['DLA']
class DlaBasic(nn.Module):
"""DLA Basic"""
def __init__(self, inplanes, planes, stride=1, dilation=1, **_):
super(DlaBasic, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3,
stride=stride, padding=dilation, bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3,
stride=1, padding=dilation, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.stride = stride
def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None):
if shortcut is None:
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += shortcut
out = self.relu(out)
return out
class DlaBottleneck(nn.Module):
"""DLA/DLA-X Bottleneck"""
expansion = 2
def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64):
super(DlaBottleneck, self).__init__()
self.stride = stride
mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)
mid_planes = mid_planes // self.expansion
self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.conv2 = nn.Conv2d(
mid_planes, mid_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False, dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None):
if shortcut is None:
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += shortcut
out = self.relu(out)
return out
class DlaBottle2neck(nn.Module):
""" Res2Net/Res2NeXT DLA Bottleneck
Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py
"""
expansion = 2
def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4):
super(DlaBottle2neck, self).__init__()
self.is_first = stride > 1
self.scale = scale
mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality)
mid_planes = mid_planes // self.expansion
self.width = mid_planes
self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes * scale)
num_scale_convs = max(1, scale - 1)
convs = []
bns = []
for _ in range(num_scale_convs):
convs.append(nn.Conv2d(
mid_planes, mid_planes, kernel_size=3,
stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False))
bns.append(nn.BatchNorm2d(mid_planes))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) if self.is_first else None
self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None):
if shortcut is None:
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
spo = []
sp = spx[0] # redundant, for torchscript
for i, (conv, bn) in enumerate(zip(self.convs, self.bns)):
if i == 0 or self.is_first:
sp = spx[i]
else:
sp = sp + spx[i]
sp = conv(sp)
sp = bn(sp)
sp = self.relu(sp)
spo.append(sp)
if self.scale > 1:
if self.pool is not None: # self.is_first == True, None check for torchscript
spo.append(self.pool(spx[-1]))
else:
spo.append(spx[-1])
out = torch.cat(spo, 1)
out = self.conv3(out)
out = self.bn3(out)
out += shortcut
out = self.relu(out)
return out
class DlaRoot(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, shortcut):
super(DlaRoot, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.shortcut = shortcut
def forward(self, x_children: List[torch.Tensor]):
x = self.conv(torch.cat(x_children, 1))
x = self.bn(x)
if self.shortcut:
x += x_children[0]
x = self.relu(x)
return x
class DlaTree(nn.Module):
def __init__(
self,
levels,
block,
in_channels,
out_channels,
stride=1,
dilation=1,
cardinality=1,
base_width=64,
level_root=False,
root_dim=0,
root_kernel_size=1,
root_shortcut=False,
):
super(DlaTree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity()
self.project = nn.Identity()
cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width)
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride, **cargs)
self.tree2 = block(out_channels, out_channels, 1, **cargs)
if in_channels != out_channels:
# NOTE the official impl/weights have project layers in levels > 1 case that are never
# used, I've moved the project layer here to avoid wasted params but old checkpoints will
# need strict=False while loading.
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels))
self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut)
else:
cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut))
self.tree1 = DlaTree(
levels - 1,
block,
in_channels,
out_channels,
stride,
root_dim=0,
**cargs,
)
self.tree2 = DlaTree(
levels - 1,
block,
out_channels,
out_channels,
root_dim=root_dim + out_channels,
**cargs,
)
self.root = None
self.level_root = level_root
self.root_dim = root_dim
self.levels = levels
def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None):
if children is None:
children = []
bottom = self.downsample(x)
shortcut = self.project(bottom)
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, shortcut)
if self.root is not None: # levels == 1
x2 = self.tree2(x1)
x = self.root([x2, x1] + children)
else:
children.append(x1)
x = self.tree2(x1, None, children)
return x
class DLA(nn.Module):
def __init__(
self,
levels,
channels,
output_stride=32,
num_classes=1000,
in_chans=3,
global_pool='avg',
cardinality=1,
base_width=64,
block=DlaBottle2neck,
shortcut_root=False,
drop_rate=0.0,
):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.cardinality = cardinality
self.base_width = base_width
assert output_stride == 32 # FIXME support dilation
self.base_layer = nn.Sequential(
nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False),
nn.BatchNorm2d(channels[0]),
nn.ReLU(inplace=True),
)
self.level0 = self._make_conv_level(channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2)
cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root)
self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs)
self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs)
self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs)
self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs)
self.feature_info = [
dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level
dict(num_chs=channels[1], reduction=2, module='level1'),
dict(num_chs=channels[2], reduction=4, module='level2'),
dict(num_chs=channels[3], reduction=8, module='level3'),
dict(num_chs=channels[4], reduction=16, module='level4'),
dict(num_chs=channels[5], reduction=32, module='level5'),
]
self.num_features = self.head_hidden_size = channels[-1]
self.global_pool, self.head_drop, self.fc = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
use_conv=True,
drop_rate=drop_rate,
)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(
inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^base_layer',
blocks=r'^level(\d+)' if coarse else [
# an unusual arch, this achieves somewhat more granularity without getting super messy
(r'^level(\d+)\.tree(\d+)', None),
(r'^level(\d+)\.root', (2,)),
(r'^level(\d+)', (1,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.fc
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.fc = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool, use_conv=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity()
def forward_features(self, x):
x = self.base_layer(x)
x = self.level0(x)
x = self.level1(x)
x = self.level2(x)
x = self.level3(x)
x = self.level4(x)
x = self.level5(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
if pre_logits:
return self.flatten(x)
x = self.fc(x)
return self.flatten(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_dla(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
DLA,
variant,
pretrained,
pretrained_strict=False,
feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'base_layer.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'dla34.in1k': _cfg(hf_hub_id='timm/'),
'dla46_c.in1k': _cfg(hf_hub_id='timm/'),
'dla46x_c.in1k': _cfg(hf_hub_id='timm/'),
'dla60x_c.in1k': _cfg(hf_hub_id='timm/'),
'dla60.in1k': _cfg(hf_hub_id='timm/'),
'dla60x.in1k': _cfg(hf_hub_id='timm/'),
'dla102.in1k': _cfg(hf_hub_id='timm/'),
'dla102x.in1k': _cfg(hf_hub_id='timm/'),
'dla102x2.in1k': _cfg(hf_hub_id='timm/'),
'dla169.in1k': _cfg(hf_hub_id='timm/'),
'dla60_res2net.in1k': _cfg(hf_hub_id='timm/'),
'dla60_res2next.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def dla60_res2net(pretrained=False, **kwargs) -> DLA:
model_args = dict(
levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),
block=DlaBottle2neck, cardinality=1, base_width=28)
return _create_dla('dla60_res2net', pretrained, **dict(model_args, **kwargs))
@register_model
def dla60_res2next(pretrained=False,**kwargs):
model_args = dict(
levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024),
block=DlaBottle2neck, cardinality=8, base_width=4)
return _create_dla('dla60_res2next', pretrained, **dict(model_args, **kwargs))
@register_model
def dla34(pretrained=False, **kwargs) -> DLA: # DLA-34
model_args = dict(
levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], block=DlaBasic)
return _create_dla('dla34', pretrained, **dict(model_args, **kwargs))
@register_model
def dla46_c(pretrained=False, **kwargs) -> DLA: # DLA-46-C
model_args = dict(
levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck)
return _create_dla('dla46_c', pretrained, **dict(model_args, **kwargs))
@register_model
def dla46x_c(pretrained=False, **kwargs) -> DLA: # DLA-X-46-C
model_args = dict(
levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256],
block=DlaBottleneck, cardinality=32, base_width=4)
return _create_dla('dla46x_c', pretrained, **dict(model_args, **kwargs))
@register_model
def dla60x_c(pretrained=False, **kwargs) -> DLA: # DLA-X-60-C
model_args = dict(
levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256],
block=DlaBottleneck, cardinality=32, base_width=4)
return _create_dla('dla60x_c', pretrained, **dict(model_args, **kwargs))
@register_model
def dla60(pretrained=False, **kwargs) -> DLA: # DLA-60
model_args = dict(
levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck)
return _create_dla('dla60', pretrained, **dict(model_args, **kwargs))
@register_model
def dla60x(pretrained=False, **kwargs) -> DLA: # DLA-X-60
model_args = dict(
levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=32, base_width=4)
return _create_dla('dla60x', pretrained, **dict(model_args, **kwargs))
@register_model
def dla102(pretrained=False, **kwargs) -> DLA: # DLA-102
model_args = dict(
levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, shortcut_root=True)
return _create_dla('dla102', pretrained, **dict(model_args, **kwargs))
@register_model
def dla102x(pretrained=False, **kwargs) -> DLA: # DLA-X-102
model_args = dict(
levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True)
return _create_dla('dla102x', pretrained, **dict(model_args, **kwargs))
@register_model
def dla102x2(pretrained=False, **kwargs) -> DLA: # DLA-X-102 64
model_args = dict(
levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True)
return _create_dla('dla102x2', pretrained, **dict(model_args, **kwargs))
@register_model
def dla169(pretrained=False, **kwargs) -> DLA: # DLA-169
model_args = dict(
levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024],
block=DlaBottleneck, shortcut_root=True)
return _create_dla('dla169', pretrained, **dict(model_args, **kwargs))
|
pytorch-image-models/timm/models/dla.py/0
|
{
"file_path": "pytorch-image-models/timm/models/dla.py",
"repo_id": "pytorch-image-models",
"token_count": 9163
}
| 215
|
from functools import partial
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from ._builder import build_model_with_cfg
from ._builder import pretrained_cfg_for_features
from ._efficientnet_blocks import SqueezeExcite
from ._efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels
from ._registry import register_model, generate_default_cfgs
from .mobilenetv3 import MobileNetV3, MobileNetV3Features
__all__ = [] # model_registry will add each entrypoint fn to this
def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs):
"""Creates a hardcorenas model
Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS
Paper: https://arxiv.org/abs/2102.11646
"""
num_features = 1280
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels)
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=num_features,
stem_size=32,
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=se_layer,
**kwargs,
)
features_only = False
model_cls = MobileNetV3
kwargs_filter = None
if model_kwargs.pop('features_only', False):
features_only = True
kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool')
model_cls = MobileNetV3Features
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
pretrained_strict=not features_only,
kwargs_filter=kwargs_filter,
**model_kwargs,
)
if features_only:
model.default_cfg = pretrained_cfg_for_features(model.default_cfg)
return model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'hardcorenas_a.miil_green_in1k': _cfg(hf_hub_id='timm/'),
'hardcorenas_b.miil_green_in1k': _cfg(hf_hub_id='timm/'),
'hardcorenas_c.miil_green_in1k': _cfg(hf_hub_id='timm/'),
'hardcorenas_d.miil_green_in1k': _cfg(hf_hub_id='timm/'),
'hardcorenas_e.miil_green_in1k': _cfg(hf_hub_id='timm/'),
'hardcorenas_f.miil_green_in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def hardcorenas_a(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_A """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'],
['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_b(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_B """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'],
['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'],
['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'],
['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'],
['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_c(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_C """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre',
'ir_r1_k5_s1_e3_c40_nre'],
['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'],
['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_d(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_D """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'],
['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
'ir_r1_k3_s1_e3_c80_se0.25'],
['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25',
'ir_r1_k5_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_e(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_E """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25',
'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'],
['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25',
'ir_r1_k5_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs)
return model
@register_model
def hardcorenas_f(pretrained=False, **kwargs) -> MobileNetV3:
""" hardcorenas_F """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'],
['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
'ir_r1_k3_s1_e3_c80_se0.25'],
['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25',
'ir_r1_k3_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs)
return model
|
pytorch-image-models/timm/models/hardcorenas.py/0
|
{
"file_path": "pytorch-image-models/timm/models/hardcorenas.py",
"repo_id": "pytorch-image-models",
"token_count": 4629
}
| 216
|
""" MobileNet V3
A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl.
Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244
Hacked together by / Copyright 2019, Ross Wightman
"""
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import SelectAdaptivePool2d, Linear, LayerType, PadType, create_conv2d, get_norm_act_layer
from ._builder import build_model_with_cfg, pretrained_cfg_for_features
from ._efficientnet_blocks import SqueezeExcite
from ._efficientnet_builder import BlockArgs, EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \
round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT
from ._features import FeatureInfo, FeatureHooks, feature_take_indices
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['MobileNetV3', 'MobileNetV3Features']
class MobileNetV3(nn.Module):
""" MobiletNet-V3
Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific
'efficient head', where global pooling is done before the head convolution without a final batch-norm
layer before the classifier.
Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244
Other architectures utilizing MobileNet-V3 efficient head that are supported by this impl include:
* HardCoRe-NAS - https://arxiv.org/abs/2102.11646 (defn in hardcorenas.py uses this class)
* FBNet-V3 - https://arxiv.org/abs/2006.02049
* LCNet - https://arxiv.org/abs/2109.15099
* MobileNet-V4 - https://arxiv.org/abs/2404.10518
"""
def __init__(
self,
block_args: BlockArgs,
num_classes: int = 1000,
in_chans: int = 3,
stem_size: int = 16,
fix_stem: bool = False,
num_features: int = 1280,
head_bias: bool = True,
head_norm: bool = False,
pad_type: str = '',
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
se_from_exp: bool = True,
round_chs_fn: Callable = round_channels,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
layer_scale_init_value: Optional[float] = None,
global_pool: str = 'avg',
):
"""
Args:
block_args: Arguments for blocks of the network.
num_classes: Number of classes for classification head.
in_chans: Number of input image channels.
stem_size: Number of output channels of the initial stem convolution.
fix_stem: If True, don't scale stem by round_chs_fn.
num_features: Number of output channels of the conv head layer.
head_bias: If True, add a learnable bias to the conv head layer.
pad_type: Type of padding to use for convolution layers.
act_layer: Type of activation layer.
norm_layer: Type of normalization layer.
aa_layer: Type of anti-aliasing layer.
se_layer: Type of Squeeze-and-Excite layer.
se_from_exp: If True, calculate SE channel reduction from expanded mid channels.
round_chs_fn: Callable to round number of filters based on depth multiplier.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
layer_scale_init_value: Enable layer scale on compatible blocks if not None.
global_pool: Type of pooling to use for global pooling features of the FC head.
"""
super(MobileNetV3, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
se_layer = se_layer or SqueezeExcite
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_act_layer(stem_size, inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=32,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = builder.features
self.stage_ends = [f['stage'] for f in self.feature_info]
self.num_features = builder.in_chs # features of last stage, output of forward_features()
self.head_hidden_size = num_features # features of conv_head, pre_logits output
# Head + Pooling
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
num_pooled_chs = self.num_features * self.global_pool.feat_mult()
if head_norm:
# mobilenet-v4 post-pooling PW conv is followed by a norm+act layer
self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type) # never bias
self.norm_head = norm_act_layer(self.head_hidden_size)
self.act2 = nn.Identity()
else:
# mobilenet-v3 and others only have an activation after final PW conv
self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type, bias=head_bias)
self.norm_head = nn.Identity()
self.act2 = act_layer(inplace=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity()
efficientnet_init_weights(self)
def as_sequential(self):
layers = [self.conv_stem, self.bn1]
layers.extend(self.blocks)
layers.extend([self.global_pool, self.conv_head, self.norm_head, self.act2])
layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
@torch.jit.ignore
def group_matcher(self, coarse: bool = False):
return dict(
stem=r'^conv_stem|bn1',
blocks=r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
# NOTE: cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
extra_blocks: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
extra_blocks: Include outputs of all blocks and head conv in output, does not align with feature_info
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
if stop_early:
assert intermediates_only, 'Must use intermediates_only for early stopping.'
intermediates = []
if extra_blocks:
take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices)
else:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
take_indices = [self.stage_ends[i] for i in take_indices]
max_index = self.stage_ends[max_index]
# forward pass
feat_idx = 0 # stem is index 0
x = self.conv_stem(x)
x = self.bn1(x)
if feat_idx in take_indices:
intermediates.append(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index]
for blk in blocks:
feat_idx += 1
x = blk(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
extra_blocks: bool = False,
):
""" Prune layers not required for specified intermediates.
"""
if extra_blocks:
take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices)
else:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
max_index = self.stage_ends[max_index]
self.blocks = self.blocks[:max_index] # truncate blocks w/ stem as idx 0
if max_index < len(self.blocks):
self.conv_head = nn.Identity()
self.norm_head = nn.Identity()
if prune_head:
self.conv_head = nn.Identity()
self.norm_head = nn.Identity()
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv_stem(x)
x = self.bn1(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
x = self.global_pool(x)
x = self.conv_head(x)
x = self.norm_head(x)
x = self.act2(x)
x = self.flatten(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
if pre_logits:
return x
return self.classifier(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
class MobileNetV3Features(nn.Module):
""" MobileNetV3 Feature Extractor
A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation
and object detection models.
"""
def __init__(
self,
block_args: BlockArgs,
out_indices: Tuple[int, ...] = (0, 1, 2, 3, 4),
feature_location: str = 'bottleneck',
in_chans: int = 3,
stem_size: int = 16,
fix_stem: bool = False,
output_stride: int = 32,
pad_type: PadType = '',
round_chs_fn: Callable = round_channels,
se_from_exp: bool = True,
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
layer_scale_init_value: Optional[float] = None,
):
"""
Args:
block_args: Arguments for blocks of the network.
out_indices: Output from stages at indices.
feature_location: Location of feature before/after each block, must be in ['bottleneck', 'expansion']
in_chans: Number of input image channels.
stem_size: Number of output channels of the initial stem convolution.
fix_stem: If True, don't scale stem by round_chs_fn.
output_stride: Output stride of the network.
pad_type: Type of padding to use for convolution layers.
round_chs_fn: Callable to round number of filters based on depth multiplier.
se_from_exp: If True, calculate SE channel reduction from expanded mid channels.
act_layer: Type of activation layer.
norm_layer: Type of normalization layer.
se_layer: Type of Squeeze-and-Excite layer.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
layer_scale_init_value: Enable layer scale on compatible blocks if not None.
"""
super(MobileNetV3Features, self).__init__()
act_layer = act_layer or nn.ReLU
norm_layer = norm_layer or nn.BatchNorm2d
se_layer = se_layer or SqueezeExcite
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size)
self.act1 = act_layer(inplace=True)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=output_stride,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
feature_location=feature_location,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = FeatureInfo(builder.features, out_indices)
self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()}
efficientnet_init_weights(self)
# Register feature extraction hooks with FeatureHooks helper
self.feature_hooks = None
if feature_location != 'bottleneck':
hooks = self.feature_info.get_dicts(keys=('module', 'hook_type'))
self.feature_hooks = FeatureHooks(hooks, self.named_modules())
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
if self.feature_hooks is None:
features = []
if 0 in self._stage_out_idx:
features.append(x) # add stem out
for i, b in enumerate(self.blocks):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(b, x)
else:
x = b(x)
if i + 1 in self._stage_out_idx:
features.append(x)
return features
else:
self.blocks(x)
out = self.feature_hooks.get_output(x.device)
return list(out.values())
def _create_mnv3(variant: str, pretrained: bool = False, **kwargs) -> MobileNetV3:
features_mode = ''
model_cls = MobileNetV3
kwargs_filter = None
if kwargs.pop('features_only', False):
if 'feature_cfg' in kwargs or 'feature_cls' in kwargs:
features_mode = 'cfg'
else:
kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'head_norm', 'global_pool')
model_cls = MobileNetV3Features
features_mode = 'cls'
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
features_only=features_mode == 'cfg',
pretrained_strict=features_mode != 'cls',
kwargs_filter=kwargs_filter,
**kwargs,
)
if features_mode == 'cls':
model.default_cfg = pretrained_cfg_for_features(model.default_cfg)
return model
def _gen_mobilenet_v3_rw(
variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs
) -> MobileNetV3:
"""Creates a MobileNet-V3 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'], # hard-swish
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
head_bias=False,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'),
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_mobilenet_v3(
variant: str, channel_multiplier: float = 1.0, depth_multiplier: float = 1.0,
group_size=None, pretrained: bool = False, **kwargs
) -> MobileNetV3:
"""Creates a MobileNet-V3 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
if 'small' in variant:
num_features = 1024
if 'minimal' in variant:
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s2_e1_c16'],
# stage 1, 56x56 in
['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'],
# stage 2, 28x28 in
['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'],
# stage 3, 14x14 in
['ir_r2_k3_s1_e3_c48'],
# stage 4, 14x14in
['ir_r3_k3_s2_e6_c96'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c576'],
]
else:
act_layer = resolve_act_layer(kwargs, 'hard_swish')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu
# stage 1, 56x56 in
['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu
# stage 2, 28x28 in
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish
# stage 3, 14x14 in
['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish
# stage 4, 14x14in
['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c576'], # hard-swish
]
else:
num_features = 1280
if 'minimal' in variant:
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16'],
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'],
# stage 2, 56x56 in
['ir_r3_k3_s2_e3_c40'],
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'],
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112'],
# stage 5, 14x14in
['ir_r3_k3_s2_e6_c160'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'],
]
else:
act_layer = resolve_act_layer(kwargs, 'hard_swish')
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_nre'], # relu
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu
# stage 2, 56x56 in
['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish
# stage 4, 14x14in
['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish
# stage 5, 14x14in
['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish
# stage 6, 7x7 in
['cn_r1_k1_s1_c960'], # hard-swish
]
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels)
model_kwargs = dict(
block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, group_size=group_size),
num_features=num_features,
stem_size=16,
fix_stem=channel_multiplier < 0.75,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=act_layer,
se_layer=se_layer,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_fbnetv3(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs):
""" FBNetV3
Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining`
- https://arxiv.org/abs/2006.02049
FIXME untested, this is a preliminary impl of some FBNet-V3 variants.
"""
vl = variant.split('_')[-1]
if vl in ('a', 'b'):
stem_size = 16
arch_def = [
['ds_r2_k3_s1_e1_c16'],
['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'],
['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'],
['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'],
['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'],
['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'],
['cn_r1_k1_s1_c1344'],
]
elif vl == 'd':
stem_size = 24
arch_def = [
['ds_r2_k3_s1_e1_c16'],
['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'],
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'],
['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'],
['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'],
['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'],
['cn_r1_k1_s1_c1440'],
]
elif vl == 'g':
stem_size = 32
arch_def = [
['ds_r3_k3_s1_e1_c24'],
['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'],
['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'],
['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'],
['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'],
['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'],
['cn_r1_k1_s1_c1728'],
]
else:
raise NotImplemented
round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95)
se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn)
act_layer = resolve_act_layer(kwargs, 'hard_swish')
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
num_features=1984,
head_bias=False,
stem_size=stem_size,
round_chs_fn=round_chs_fn,
se_from_exp=False,
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=act_layer,
se_layer=se_layer,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_lcnet(variant: str, channel_multiplier: float = 1.0, pretrained: bool = False, **kwargs):
""" LCNet
Essentially a MobileNet-V3 crossed with a MobileNet-V1
Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
arch_def = [
# stage 0, 112x112 in
['dsa_r1_k3_s1_c32'],
# stage 1, 112x112 in
['dsa_r2_k3_s2_c64'],
# stage 2, 56x56 in
['dsa_r2_k3_s2_c128'],
# stage 3, 28x28 in
['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'],
# stage 4, 14x14in
['dsa_r4_k5_s1_c256'],
# stage 5, 14x14in
['dsa_r2_k5_s2_c512_se0.25'],
# 7x7
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def),
stem_size=16,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=resolve_act_layer(kwargs, 'hard_swish'),
se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU),
num_features=1280,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _gen_mobilenet_v4(
variant: str, channel_multiplier: float = 1.0, group_size=None, pretrained: bool = False, **kwargs,
) -> MobileNetV3:
"""Creates a MobileNet-V4 model.
Ref impl: ?
Paper: https://arxiv.org/abs/1905.02244
Args:
channel_multiplier: multiplier to number of channels per layer.
"""
num_features = 1280
if 'hybrid' in variant:
layer_scale_init_value = 1e-5
if 'medium' in variant:
stem_size = 32
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
[
'er_r1_k3_s2_e4_c48' # FusedIB (EdgeResidual)
],
# stage 1, 56x56 in
[
'uir_r1_a3_k5_s2_e4_c80', # ExtraDW
'uir_r1_a3_k3_s1_e2_c80', # ExtraDW
],
# stage 2, 28x28 in
[
'uir_r1_a3_k5_s2_e6_c160', # ExtraDW
'uir_r1_a0_k0_s1_e2_c160', # FFN
'uir_r1_a3_k3_s1_e4_c160', # ExtraDW
'uir_r1_a3_k5_s1_e4_c160', # ExtraDW
'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample
'uir_r1_a3_k3_s1_e4_c160', # ExtraDW
'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample
'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt
'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample
'uir_r1_a3_k3_s1_e4_c160', # ExtraDW
'mqa_r1_k3_h4_s1_v2_d64_c160', # MQA w/ KV downsample
'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt
],
# stage 3, 14x14in
[
'uir_r1_a5_k5_s2_e6_c256', # ExtraDW
'uir_r1_a5_k5_s1_e4_c256', # ExtraDW
'uir_r2_a3_k5_s1_e4_c256', # ExtraDW
'uir_r1_a0_k0_s1_e2_c256', # FFN
'uir_r1_a3_k5_s1_e2_c256', # ExtraDW
'uir_r1_a0_k0_s1_e2_c256', # FFN
'uir_r1_a0_k0_s1_e4_c256', # FFN
'mqa_r1_k3_h4_s1_d64_c256', # MQA
'uir_r1_a3_k0_s1_e4_c256', # ConvNeXt
'mqa_r1_k3_h4_s1_d64_c256', # MQA
'uir_r1_a5_k5_s1_e4_c256', # ExtraDW
'mqa_r1_k3_h4_s1_d64_c256', # MQA
'uir_r1_a5_k0_s1_e4_c256', # ConvNeXt
'mqa_r1_k3_h4_s1_d64_c256', # MQA
'uir_r1_a5_k0_s1_e4_c256', # ConvNeXt
],
# stage 4, 7x7 in
[
'cn_r1_k1_s1_c960' # Conv
],
]
elif 'large' in variant:
stem_size = 24
act_layer = resolve_act_layer(kwargs, 'gelu')
arch_def = [
# stage 0, 112x112 in
[
'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual)
],
# stage 1, 56x56 in
[
'uir_r1_a3_k5_s2_e4_c96', # ExtraDW
'uir_r1_a3_k3_s1_e4_c96', # ExtraDW
],
# stage 2, 28x28 in
[
'uir_r1_a3_k5_s2_e4_c192', # ExtraDW
'uir_r3_a3_k3_s1_e4_c192', # ExtraDW
'uir_r1_a3_k5_s1_e4_c192', # ExtraDW
'uir_r2_a5_k3_s1_e4_c192', # ExtraDW
'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample
'uir_r1_a5_k3_s1_e4_c192', # ExtraDW
'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample
'uir_r1_a5_k3_s1_e4_c192', # ExtraDW
'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample
'uir_r1_a5_k3_s1_e4_c192', # ExtraDW
'mqa_r1_k3_h8_s1_v2_d48_c192', # MQA w/ KV downsample
'uir_r1_a3_k0_s1_e4_c192', # ConvNeXt
],
# stage 3, 14x14in
[
'uir_r4_a5_k5_s2_e4_c512', # ExtraDW
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
'uir_r1_a5_k3_s1_e4_c512', # ExtraDW
'uir_r2_a5_k0_s1_e4_c512', # ConvNeXt
'uir_r1_a5_k3_s1_e4_c512', # ExtraDW
'uir_r1_a5_k5_s1_e4_c512', # ExtraDW
'mqa_r1_k3_h8_s1_d64_c512', # MQA
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
'mqa_r1_k3_h8_s1_d64_c512', # MQA
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
'mqa_r1_k3_h8_s1_d64_c512', # MQA
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
'mqa_r1_k3_h8_s1_d64_c512', # MQA
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
],
# stage 4, 7x7 in
[
'cn_r1_k1_s1_c960', # Conv
],
]
else:
assert False, f'Unknown variant {variant}.'
else:
layer_scale_init_value = None
if 'small' in variant:
stem_size = 32
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
[
'cn_r1_k3_s2_e1_c32', # Conv
'cn_r1_k1_s1_e1_c32', # Conv
],
# stage 1, 56x56 in
[
'cn_r1_k3_s2_e1_c96', # Conv
'cn_r1_k1_s1_e1_c64', # Conv
],
# stage 2, 28x28 in
[
'uir_r1_a5_k5_s2_e3_c96', # ExtraDW
'uir_r4_a0_k3_s1_e2_c96', # IR
'uir_r1_a3_k0_s1_e4_c96', # ConvNeXt
],
# stage 3, 14x14 in
[
'uir_r1_a3_k3_s2_e6_c128', # ExtraDW
'uir_r1_a5_k5_s1_e4_c128', # ExtraDW
'uir_r1_a0_k5_s1_e4_c128', # IR
'uir_r1_a0_k5_s1_e3_c128', # IR
'uir_r2_a0_k3_s1_e4_c128', # IR
],
# stage 4, 7x7 in
[
'cn_r1_k1_s1_c960', # Conv
],
]
elif 'medium' in variant:
stem_size = 32
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
[
'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual)
],
# stage 1, 56x56 in
[
'uir_r1_a3_k5_s2_e4_c80', # ExtraDW
'uir_r1_a3_k3_s1_e2_c80', # ExtraDW
],
# stage 2, 28x28 in
[
'uir_r1_a3_k5_s2_e6_c160', # ExtraDW
'uir_r2_a3_k3_s1_e4_c160', # ExtraDW
'uir_r1_a3_k5_s1_e4_c160', # ExtraDW
'uir_r1_a3_k3_s1_e4_c160', # ExtraDW
'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt
'uir_r1_a0_k0_s1_e2_c160', # ExtraDW
'uir_r1_a3_k0_s1_e4_c160', # ConvNeXt
],
# stage 3, 14x14in
[
'uir_r1_a5_k5_s2_e6_c256', # ExtraDW
'uir_r1_a5_k5_s1_e4_c256', # ExtraDW
'uir_r2_a3_k5_s1_e4_c256', # ExtraDW
'uir_r1_a0_k0_s1_e4_c256', # FFN
'uir_r1_a3_k0_s1_e4_c256', # ConvNeXt
'uir_r1_a3_k5_s1_e2_c256', # ExtraDW
'uir_r1_a5_k5_s1_e4_c256', # ExtraDW
'uir_r2_a0_k0_s1_e4_c256', # FFN
'uir_r1_a5_k0_s1_e2_c256', # ConvNeXt
],
# stage 4, 7x7 in
[
'cn_r1_k1_s1_c960', # Conv
],
]
elif 'large' in variant:
stem_size = 24
act_layer = resolve_act_layer(kwargs, 'relu')
arch_def = [
# stage 0, 112x112 in
[
'er_r1_k3_s2_e4_c48', # FusedIB (EdgeResidual)
],
# stage 1, 56x56 in
[
'uir_r1_a3_k5_s2_e4_c96', # ExtraDW
'uir_r1_a3_k3_s1_e4_c96', # ExtraDW
],
# stage 2, 28x28 in
[
'uir_r1_a3_k5_s2_e4_c192', # ExtraDW
'uir_r3_a3_k3_s1_e4_c192', # ExtraDW
'uir_r1_a3_k5_s1_e4_c192', # ExtraDW
'uir_r5_a5_k3_s1_e4_c192', # ExtraDW
'uir_r1_a3_k0_s1_e4_c192', # ConvNeXt
],
# stage 3, 14x14in
[
'uir_r4_a5_k5_s2_e4_c512', # ExtraDW
'uir_r1_a5_k0_s1_e4_c512', # ConvNeXt
'uir_r1_a5_k3_s1_e4_c512', # ExtraDW
'uir_r2_a5_k0_s1_e4_c512', # ConvNeXt
'uir_r1_a5_k3_s1_e4_c512', # ExtraDW
'uir_r1_a5_k5_s1_e4_c512', # ExtraDW
'uir_r3_a5_k0_s1_e4_c512', # ConvNeXt
],
# stage 4, 7x7 in
[
'cn_r1_k1_s1_c960', # Conv
],
]
else:
assert False, f'Unknown variant {variant}.'
model_kwargs = dict(
block_args=decode_arch_def(arch_def, group_size=group_size),
head_bias=False,
head_norm=True,
num_features=num_features,
stem_size=stem_size,
fix_stem=channel_multiplier < 1.0,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
act_layer=act_layer,
layer_scale_init_value=layer_scale_init_value,
**kwargs,
)
model = _create_mnv3(variant, pretrained, **model_kwargs)
return model
def _cfg(url: str = '', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'mobilenetv3_large_075.untrained': _cfg(url=''),
'mobilenetv3_large_100.ra_in1k': _cfg(
interpolation='bicubic',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth',
hf_hub_id='timm/'),
'mobilenetv3_large_100.miil_in21k_ft_in1k': _cfg(
interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.),
origin_url='https://github.com/Alibaba-MIIL/ImageNet21K',
paper_ids='arXiv:2104.10972v4',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_1k_miil_78_0-66471c13.pth',
hf_hub_id='timm/'),
'mobilenetv3_large_100.miil_in21k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_in21k_miil-d71cc17b.pth',
hf_hub_id='timm/',
origin_url='https://github.com/Alibaba-MIIL/ImageNet21K',
paper_ids='arXiv:2104.10972v4',
interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), num_classes=11221),
'mobilenetv3_large_150d.untrained': _cfg(
#hf_hub_id='timm/',
),
'mobilenetv3_small_050.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_050_lambc-4b7bbe87.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'mobilenetv3_small_075.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_075_lambc-384766db.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'mobilenetv3_small_100.lamb_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_100_lamb-266a294c.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'mobilenetv3_rw.rmsp_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth',
hf_hub_id='timm/',
interpolation='bicubic'),
'tf_mobilenetv3_large_075.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_100.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_minimal_100.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_075.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_100.in1k': _cfg(
url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_minimal_100.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth',
hf_hub_id='timm/',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'fbnetv3_b.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_b_224-ead5d2a1.pth',
hf_hub_id='timm/',
test_input_size=(3, 256, 256), crop_pct=0.95),
'fbnetv3_d.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_d_224-c98bce42.pth',
hf_hub_id='timm/',
test_input_size=(3, 256, 256), crop_pct=0.95),
'fbnetv3_g.ra2_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_g_240-0b1df83b.pth',
hf_hub_id='timm/',
input_size=(3, 240, 240), test_input_size=(3, 288, 288), crop_pct=0.95, pool_size=(8, 8)),
"lcnet_035.untrained": _cfg(),
"lcnet_050.ra2_in1k": _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_050-f447553b.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
"lcnet_075.ra2_in1k": _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_075-318cad2c.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
"lcnet_100.ra2_in1k": _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_100-a929038c.pth',
hf_hub_id='timm/',
interpolation='bicubic',
),
"lcnet_150.untrained": _cfg(),
'mobilenetv4_conv_small.e2400_r224_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_conv_small.e1200_r224_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 256, 256), test_crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_conv_medium.e500_r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_medium.e500_r224_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_large.e600_r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_large.e500_r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium.e200_r256_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium.ix_e550_r256_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium.ix_e550_r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium.e500_r224_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium.e200_r256_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_large.ix_e600_r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_large.e600_r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'),
# experimental
'mobilenetv4_conv_aa_medium.untrained': _cfg(
# hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_conv_blur_medium.e500_r224_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 256, 256), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_aa_large.e230_r448_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14),
crop_pct=0.95, test_input_size=(3, 544, 544), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_aa_large.e230_r384_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 480, 480), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_aa_large.e600_r384_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 480, 480), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_conv_aa_large.e230_r384_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
input_size=(3, 384, 384), pool_size=(12, 12),
crop_pct=0.95, test_input_size=(3, 448, 448), test_crop_pct=1.0, interpolation='bicubic'),
'mobilenetv4_hybrid_medium_075.untrained': _cfg(
# hf_hub_id='timm/',
crop_pct=0.95, interpolation='bicubic'),
'mobilenetv4_hybrid_large_075.untrained': _cfg(
# hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, interpolation='bicubic'),
})
@register_model
def mobilenetv3_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_large_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_large_150d(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_large_150d', 1.5, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_050(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_050', 0.50, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_small_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv3_rw(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_large_minimal_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def tf_mobilenetv3_small_minimal_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V3 """
kwargs.setdefault('bn_eps', BN_EPS_TF_DEFAULT)
kwargs.setdefault('pad_type', 'same')
model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetv3_b(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" FBNetV3-B """
model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetv3_d(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" FBNetV3-D """
model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs)
return model
@register_model
def fbnetv3_g(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" FBNetV3-G """
model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_035(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 0.35"""
model = _gen_lcnet('lcnet_035', 0.35, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_050(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 0.5"""
model = _gen_lcnet('lcnet_050', 0.5, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 1.0"""
model = _gen_lcnet('lcnet_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_100(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 1.0"""
model = _gen_lcnet('lcnet_100', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def lcnet_150(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" PP-LCNet 1.5"""
model = _gen_lcnet('lcnet_150', 1.5, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_conv_small(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 """
model = _gen_mobilenet_v4('mobilenetv4_conv_small', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_conv_medium(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 """
model = _gen_mobilenet_v4('mobilenetv4_conv_medium', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_conv_large(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 """
model = _gen_mobilenet_v4('mobilenetv4_conv_large', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_hybrid_medium(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 Hybrid """
model = _gen_mobilenet_v4('mobilenetv4_hybrid_medium', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_hybrid_large(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 Hybrid"""
model = _gen_mobilenet_v4('mobilenetv4_hybrid_large', 1.0, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_conv_aa_medium(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 w/ AvgPool AA """
model = _gen_mobilenet_v4('mobilenetv4_conv_aa_medium', 1.0, pretrained=pretrained, aa_layer='avg', **kwargs)
return model
@register_model
def mobilenetv4_conv_blur_medium(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 Conv w/ Blur AA """
model = _gen_mobilenet_v4('mobilenetv4_conv_blur_medium', 1.0, pretrained=pretrained, aa_layer='blurpc', **kwargs)
return model
@register_model
def mobilenetv4_conv_aa_large(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 w/ AvgPool AA """
model = _gen_mobilenet_v4('mobilenetv4_conv_aa_large', 1.0, pretrained=pretrained, aa_layer='avg', **kwargs)
return model
@register_model
def mobilenetv4_hybrid_medium_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 Hybrid """
model = _gen_mobilenet_v4('mobilenetv4_hybrid_medium_075', 0.75, pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv4_hybrid_large_075(pretrained: bool = False, **kwargs) -> MobileNetV3:
""" MobileNet V4 Hybrid"""
model = _gen_mobilenet_v4('mobilenetv4_hybrid_large_075', 0.75, pretrained=pretrained, **kwargs)
return model
register_model_deprecations(__name__, {
'mobilenetv3_large_100_miil': 'mobilenetv3_large_100.miil_in21k_ft_in1k',
'mobilenetv3_large_100_miil_in21k': 'mobilenetv3_large_100.miil_in21k',
})
|
pytorch-image-models/timm/models/mobilenetv3.py/0
|
{
"file_path": "pytorch-image-models/timm/models/mobilenetv3.py",
"repo_id": "pytorch-image-models",
"token_count": 29761
}
| 217
|
""" ResNeSt Models
Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955
Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang
Modified for torchscript compat, and consistency with timm by Ross Wightman
"""
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SplitAttn
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from .resnet import ResNet
class ResNestBottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
radix=1,
cardinality=1,
base_width=64,
avd=False,
avd_first=False,
is_first=False,
reduce_first=1,
dilation=1,
first_dilation=None,
act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d,
attn_layer=None,
aa_layer=None,
drop_block=None,
drop_path=None,
):
super(ResNestBottleneck, self).__init__()
assert reduce_first == 1 # not supported
assert attn_layer is None # not supported
assert aa_layer is None # TODO not yet supported
assert drop_path is None # TODO not yet supported
group_width = int(planes * (base_width / 64.)) * cardinality
first_dilation = first_dilation or dilation
if avd and (stride > 1 or is_first):
avd_stride = stride
stride = 1
else:
avd_stride = 0
self.radix = radix
self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
self.bn1 = norm_layer(group_width)
self.act1 = act_layer(inplace=True)
self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None
if self.radix >= 1:
self.conv2 = SplitAttn(
group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation,
dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_layer=drop_block)
self.bn2 = nn.Identity()
self.drop_block = nn.Identity()
self.act2 = nn.Identity()
else:
self.conv2 = nn.Conv2d(
group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation,
dilation=first_dilation, groups=cardinality, bias=False)
self.bn2 = norm_layer(group_width)
self.drop_block = drop_block() if drop_block is not None else nn.Identity()
self.act2 = act_layer(inplace=True)
self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None
self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes*4)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
def zero_init_last(self):
if getattr(self.bn3, 'weight', None) is not None:
nn.init.zeros_(self.bn3.weight)
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.act1(out)
if self.avd_first is not None:
out = self.avd_first(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.drop_block(out)
out = self.act2(out)
if self.avd_last is not None:
out = self.avd_last(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out += shortcut
out = self.act3(out)
return out
def _create_resnest(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ResNet,
variant,
pretrained,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1.0', 'classifier': 'fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'resnest14d.gluon_in1k': _cfg(hf_hub_id='timm/'),
'resnest26d.gluon_in1k': _cfg(hf_hub_id='timm/'),
'resnest50d.in1k': _cfg(hf_hub_id='timm/'),
'resnest101e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), pool_size=(8, 8)),
'resnest200e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'),
'resnest269e.in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'),
'resnest50d_4s2x40d.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic'),
'resnest50d_1s4x24d.in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic')
})
@register_model
def resnest14d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-14d model. Weights ported from GluonCV.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[1, 1, 1, 1],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest14d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest26d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-26d model. Weights ported from GluonCV.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[2, 2, 2, 2],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest26d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest50d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest101e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 23, 3],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest101e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest200e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 24, 36, 3],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest200e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest269e(pretrained=False, **kwargs) -> ResNet:
""" ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955
Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample.
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 30, 48, 8],
stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1,
block_args=dict(radix=2, avd=True, avd_first=False))
return _create_resnest('resnest269e', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d_4s2x40d(pretrained=False, **kwargs) -> ResNet:
"""ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2,
block_args=dict(radix=4, avd=True, avd_first=True))
return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
@register_model
def resnest50d_1s4x24d(pretrained=False, **kwargs) -> ResNet:
"""ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md
"""
model_kwargs = dict(
block=ResNestBottleneck, layers=[3, 4, 6, 3],
stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4,
block_args=dict(radix=1, avd=True, avd_first=True))
return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
|
pytorch-image-models/timm/models/resnest.py/0
|
{
"file_path": "pytorch-image-models/timm/models/resnest.py",
"repo_id": "pytorch-image-models",
"token_count": 4439
}
| 218
|
""" Visformer
Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533
From original at https://github.com/danczs/Visformer
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier, use_fused_attn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['Visformer']
class SpatialMlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.,
group=8,
spatial_conv=False,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = to_2tuple(drop)
self.in_features = in_features
self.out_features = out_features
self.spatial_conv = spatial_conv
if self.spatial_conv:
if group < 2: # net setting
hidden_features = in_features * 5 // 6
else:
hidden_features = in_features * 2
self.hidden_features = hidden_features
self.group = group
self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False)
self.act1 = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
if self.spatial_conv:
self.conv2 = nn.Conv2d(
hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False)
self.act2 = act_layer()
else:
self.conv2 = None
self.act2 = None
self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False)
self.drop3 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.conv1(x)
x = self.act1(x)
x = self.drop1(x)
if self.conv2 is not None:
x = self.conv2(x)
x = self.act2(x)
x = self.conv3(x)
x = self.drop3(x)
return x
class Attention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.num_heads = num_heads
head_dim = round(dim // num_heads * head_dim_ratio)
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn(experimental=True)
self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, C, H, W = x.shape
x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3)
q, k, v = x.unbind(0)
if self.fused_attn:
x = torch.nn.functional.scaled_dot_product_attention(
q.contiguous(), k.contiguous(), v.contiguous(),
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
head_dim_ratio=1.,
mlp_ratio=4.,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=LayerNorm2d,
group=8,
attn_disabled=False,
spatial_conv=False,
):
super().__init__()
self.spatial_conv = spatial_conv
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if attn_disabled:
self.norm1 = None
self.attn = None
else:
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
head_dim_ratio=head_dim_ratio,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.mlp = SpatialMlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
group=group,
spatial_conv=spatial_conv,
)
def forward(self, x):
if self.attn is not None:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Visformer(nn.Module):
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
init_channels=32,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4.,
drop_rate=0.,
pos_drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=LayerNorm2d,
attn_stage='111',
use_pos_embed=True,
spatial_conv='111',
vit_stem=False,
group=8,
global_pool='avg',
conv_init=False,
embed_norm=None,
):
super().__init__()
img_size = to_2tuple(img_size)
self.num_classes = num_classes
self.embed_dim = embed_dim
self.init_channels = init_channels
self.img_size = img_size
self.vit_stem = vit_stem
self.conv_init = conv_init
if isinstance(depth, (list, tuple)):
self.stage_num1, self.stage_num2, self.stage_num3 = depth
depth = sum(depth)
else:
self.stage_num1 = self.stage_num3 = depth // 3
self.stage_num2 = depth - self.stage_num1 - self.stage_num3
self.use_pos_embed = use_pos_embed
self.grad_checkpointing = False
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
# stage 1
if self.vit_stem:
self.stem = None
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // patch_size for x in img_size]
else:
if self.init_channels is None:
self.stem = None
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 2,
in_chans=in_chans,
embed_dim=embed_dim // 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 2) for x in img_size]
else:
self.stem = nn.Sequential(
nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(self.init_channels),
nn.ReLU(inplace=True)
)
img_size = [x // 2 for x in img_size]
self.patch_embed1 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 4,
in_chans=self.init_channels,
embed_dim=embed_dim // 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 4) for x in img_size]
if self.use_pos_embed:
if self.vit_stem:
self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
else:
self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
else:
self.pos_embed1 = None
self.stage1 = nn.Sequential(*[
Block(
dim=embed_dim//2,
num_heads=num_heads,
head_dim_ratio=0.5,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[0] == '0'),
spatial_conv=(spatial_conv[0] == '1'),
)
for i in range(self.stage_num1)
])
# stage2
if not self.vit_stem:
self.patch_embed2 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 8,
in_chans=embed_dim // 2,
embed_dim=embed_dim,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 8) for x in img_size]
if self.use_pos_embed:
self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size))
else:
self.pos_embed2 = None
else:
self.patch_embed2 = None
self.stage2 = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=num_heads,
head_dim_ratio=1.0,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[1] == '0'),
spatial_conv=(spatial_conv[1] == '1'),
)
for i in range(self.stage_num1, self.stage_num1+self.stage_num2)
])
# stage 3
if not self.vit_stem:
self.patch_embed3 = PatchEmbed(
img_size=img_size,
patch_size=patch_size // 8,
in_chans=embed_dim,
embed_dim=embed_dim * 2,
norm_layer=embed_norm,
flatten=False,
)
img_size = [x // (patch_size // 8) for x in img_size]
if self.use_pos_embed:
self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size))
else:
self.pos_embed3 = None
else:
self.patch_embed3 = None
self.stage3 = nn.Sequential(*[
Block(
dim=embed_dim * 2,
num_heads=num_heads,
head_dim_ratio=1.0,
mlp_ratio=mlp_ratio,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
group=group,
attn_disabled=(attn_stage[2] == '0'),
spatial_conv=(spatial_conv[2] == '1'),
)
for i in range(self.stage_num1+self.stage_num2, depth)
])
self.num_features = self.head_hidden_size = embed_dim if self.vit_stem else embed_dim * 2
self.norm = norm_layer(self.num_features)
# head
global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
self.global_pool = global_pool
self.head_drop = nn.Dropout(drop_rate)
self.head = head
# weights init
if self.use_pos_embed:
trunc_normal_(self.pos_embed1, std=0.02)
if not self.vit_stem:
trunc_normal_(self.pos_embed2, std=0.02)
trunc_normal_(self.pos_embed3, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
if self.conv_init:
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
else:
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0.)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^patch_embed1|pos_embed1|stem', # stem and embed
blocks=[
(r'^stage(\d+)\.(\d+)' if coarse else r'^stage(\d+)\.(\d+)', None),
(r'^(?:patch_embed|pos_embed)(\d+)', (0,)),
(r'^norm', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
if self.stem is not None:
x = self.stem(x)
# stage 1
x = self.patch_embed1(x)
if self.pos_embed1 is not None:
x = self.pos_drop(x + self.pos_embed1)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage1, x)
else:
x = self.stage1(x)
# stage 2
if self.patch_embed2 is not None:
x = self.patch_embed2(x)
if self.pos_embed2 is not None:
x = self.pos_drop(x + self.pos_embed2)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage2, x)
else:
x = self.stage2(x)
# stage3
if self.patch_embed3 is not None:
x = self.patch_embed3(x)
if self.pos_embed3 is not None:
x = self.pos_drop(x + self.pos_embed3)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stage3, x)
else:
x = self.stage3(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(Visformer, variant, pretrained, **kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'visformer_tiny.in1k': _cfg(hf_hub_id='timm/'),
'visformer_small.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def visformer_tiny(pretrained=False, **kwargs) -> Visformer:
model_cfg = dict(
init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8,
attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
embed_norm=nn.BatchNorm2d)
model = _create_visformer('visformer_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def visformer_small(pretrained=False, **kwargs) -> Visformer:
model_cfg = dict(
init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8,
attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True,
embed_norm=nn.BatchNorm2d)
model = _create_visformer('visformer_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
# @register_model
# def visformer_net1(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net2(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net3(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net4(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111',
# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net5(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net6(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111',
# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
#
#
# @register_model
# def visformer_net7(pretrained=False, **kwargs):
# model = Visformer(
# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000',
# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs)
# model.default_cfg = _cfg()
# return model
|
pytorch-image-models/timm/models/visformer.py/0
|
{
"file_path": "pytorch-image-models/timm/models/visformer.py",
"repo_id": "pytorch-image-models",
"token_count": 10151
}
| 219
|
""" AdamW Optimizer
Impl copied from PyTorch master
NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed
someday
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# Perform stepweight decay
p.data.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
p.addcdiv_(exp_avg, denom, value=-step_size)
return loss
|
pytorch-image-models/timm/optim/adamw.py/0
|
{
"file_path": "pytorch-image-models/timm/optim/adamw.py",
"repo_id": "pytorch-image-models",
"token_count": 2417
}
| 220
|
""" JIT scripting/tracing utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import os
import torch
def set_jit_legacy():
""" Set JIT executor to legacy w/ support for op fusion
This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes
in the JIT exectutor. These API are not supported so could change.
"""
#
assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!"
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
#torch._C._jit_set_texpr_fuser_enabled(True)
def set_jit_fuser(fuser):
if fuser == "te":
# default fuser should be == 'te'
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
try:
torch._C._jit_set_nvfuser_enabled(False)
except Exception:
pass
elif fuser == "old" or fuser == "legacy":
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
try:
torch._C._jit_set_nvfuser_enabled(False)
except Exception:
pass
elif fuser == "nvfuser" or fuser == "nvf":
os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1'
#os.environ['PYTORCH_NVFUSER_DISABLE_FMA'] = '1'
#os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0'
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_can_fuse_on_cpu()
torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_nvfuser_enabled(True)
else:
assert False, f"Invalid jit fuser ({fuser})"
|
pytorch-image-models/timm/utils/jit.py/0
|
{
"file_path": "pytorch-image-models/timm/utils/jit.py",
"repo_id": "pytorch-image-models",
"token_count": 1036
}
| 221
|
[workspace]
members = [
"benchmark",
"backends/v3",
"backends/grpc-metadata",
"backends/trtllm",
"backends/client",
"launcher"
]
default-members = [
"benchmark",
"backends/v3",
"backends/grpc-metadata",
# "backends/trtllm",
"backends/client",
"launcher"
]
resolver = "2"
[workspace.package]
version = "2.2.1-dev0"
edition = "2021"
authors = ["Olivier Dehaene"]
homepage = "https://github.com/huggingface/text-generation-inference"
[workspace.dependencies]
base64 = "0.22.0"
tokenizers = { version = "0.19.1", features = ["http"] }
hf-hub = { version = "0.3.1", features = ["tokio"] }
metrics = { version = "0.23.0" }
metrics-exporter-prometheus = { version = "0.15.1", features = [] }
minijinja = { version = "2.2.0", features = ["json"] }
minijinja-contrib = { version = "2.0.2", features = ["pycompat"] }
[profile.release]
incremental = true
[profile.release-binary]
inherits = "release"
debug = 1
incremental = true
panic = "abort"
[profile.release-opt]
inherits = "release"
debug = 0
incremental = false
lto = "fat"
opt-level = 3
codegen-units = 1
|
text-generation-inference/Cargo.toml/0
|
{
"file_path": "text-generation-inference/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 434
}
| 222
|
#[allow(clippy::derive_partial_eq_without_eq)]
mod pb;
mod client;
mod sharded_client;
pub use client::Client;
pub use pb::generate::v2::HealthResponse;
pub use pb::generate::v2::{
Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType, InfoResponse,
NextTokenChooserParameters, Request, StoppingCriteriaParameters, Tokens,
};
pub use sharded_client::ShardedClient;
|
text-generation-inference/backends/client/src/v2/mod.rs/0
|
{
"file_path": "text-generation-inference/backends/client/src/v2/mod.rs",
"repo_id": "text-generation-inference",
"token_count": 134
}
| 223
|
use std::sync::Arc;
use tokio::sync::{mpsc, oneshot};
use crate::radix::RadixAllocator;
#[derive(Debug, Clone)]
pub struct BlockAllocation {
pub allocation_id: u64,
pub blocks: Vec<u32>,
pub slots: Vec<u32>,
/// Prefix that was cached and for which the KV does not have to
/// be recomputed.
pub prefix_len: u32,
pub(crate) block_allocator: Option<BlockAllocator>,
}
impl Drop for BlockAllocation {
fn drop(&mut self) {
if let Some(block_allocator) = self.block_allocator.as_mut() {
block_allocator.free(self.blocks.clone(), self.allocation_id)
}
}
}
#[derive(Debug, Clone)]
pub struct BlockAllocator {
/// Channel to communicate with the background task
block_allocator: mpsc::UnboundedSender<BlockAllocatorCommand>,
}
impl BlockAllocator {
pub(crate) fn new(
max_batch_total_tokens: u32,
block_size: u32,
prefix_caching: bool,
window_size: Option<u32>,
) -> Self {
// Create channel
let (sender, receiver) = mpsc::unbounded_channel();
// Launch background queue task
tokio::spawn(block_allocator_task(
max_batch_total_tokens / block_size,
block_size,
prefix_caching,
window_size,
receiver,
));
Self {
block_allocator: sender,
}
}
pub(crate) async fn allocate(
&self,
tokens: u32,
prefill_tokens: Option<Arc<Vec<u32>>>,
) -> Option<BlockAllocation> {
let (response_sender, response_receiver) = oneshot::channel();
self.block_allocator
.send(BlockAllocatorCommand::Allocate {
tokens,
prefill_tokens,
response_sender,
})
.unwrap();
response_receiver.await.unwrap().map(|mut allocation| {
allocation.block_allocator = Some(self.clone());
allocation
})
}
pub(crate) fn free(&self, blocks: Vec<u32>, allocation_id: u64) {
self.block_allocator
.send(BlockAllocatorCommand::Free {
allocation_id,
blocks,
})
.unwrap();
}
}
async fn block_allocator_task(
blocks: u32,
block_size: u32,
prefix_caching: bool,
window_size: Option<u32>,
mut receiver: mpsc::UnboundedReceiver<BlockAllocatorCommand>,
) {
let mut allocator: Box<dyn Allocator + Send> = if prefix_caching {
Box::new(RadixAllocator::new(block_size, blocks, window_size))
} else {
Box::new(SimpleAllocator::new(blocks, block_size, window_size))
};
while let Some(cmd) = receiver.recv().await {
match cmd {
BlockAllocatorCommand::Free {
blocks,
allocation_id,
} => allocator.free(blocks, allocation_id),
BlockAllocatorCommand::Allocate {
tokens,
prefill_tokens,
response_sender,
} => {
response_sender
.send(allocator.allocate(tokens, prefill_tokens))
.unwrap();
}
}
}
}
#[derive(Debug)]
enum BlockAllocatorCommand {
Free {
blocks: Vec<u32>,
allocation_id: u64,
},
Allocate {
tokens: u32,
prefill_tokens: Option<Arc<Vec<u32>>>,
response_sender: oneshot::Sender<Option<BlockAllocation>>,
},
}
pub trait Allocator {
fn allocate(
&mut self,
tokens: u32,
prefill_tokens: Option<Arc<Vec<u32>>>,
) -> Option<BlockAllocation>;
fn free(&mut self, blocks: Vec<u32>, allocation_id: u64);
}
pub struct SimpleAllocator {
free_blocks: Vec<u32>,
block_size: u32,
window_size: Option<u32>,
}
impl SimpleAllocator {
fn new(blocks: u32, block_size: u32, window_size: Option<u32>) -> Self {
SimpleAllocator {
block_size,
// Block 0 is reserved for health checks
free_blocks: (1..blocks).collect(),
window_size,
}
}
}
impl Allocator for SimpleAllocator {
fn allocate(
&mut self,
tokens: u32,
_prefill_tokens: Option<Arc<Vec<u32>>>,
) -> Option<BlockAllocation> {
// Apply window size
let (required_blocks, repeats) = {
let (tokens, repeats) = match self.window_size {
None => (tokens, 1),
Some(window_size) => {
let repeats = (tokens + window_size - 1) / window_size;
let tokens = core::cmp::min(tokens, window_size);
(tokens, repeats as usize)
}
};
// Pad to a multiple of block size
let required_blocks = (tokens + self.block_size - 1) / self.block_size;
(required_blocks, repeats)
};
let tokens = tokens as usize;
if required_blocks > self.free_blocks.len() as u32 {
None
} else {
let blocks = self
.free_blocks
.split_off(self.free_blocks.len() - required_blocks as usize);
let mut slots =
Vec::with_capacity((required_blocks * self.block_size * repeats as u32) as usize);
'slots: for block_id in blocks.repeat(repeats).iter() {
for s in (block_id * self.block_size)..((block_id + 1) * self.block_size) {
slots.push(s);
if slots.len() == tokens {
break 'slots;
}
}
}
Some(BlockAllocation {
allocation_id: 0,
blocks,
slots,
prefix_len: 0,
block_allocator: None,
})
}
}
fn free(&mut self, blocks: Vec<u32>, _allocation_id: u64) {
self.free_blocks.extend(blocks)
}
}
|
text-generation-inference/backends/v3/src/block_allocator.rs/0
|
{
"file_path": "text-generation-inference/backends/v3/src/block_allocator.rs",
"repo_id": "text-generation-inference",
"token_count": 3020
}
| 224
|
/// MIT License
//
// Copyright (c) 2020 hatoo
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
use std::collections::BTreeMap;
pub(crate) fn histogram(values: &[f64], bins: usize) -> Vec<(f64, usize)> {
assert!(bins >= 2);
let mut bucket: Vec<usize> = vec![0; bins];
let min = values.iter().collect::<average::Min>().min();
let max = values.iter().collect::<average::Max>().max();
let step = (max - min) / (bins - 1) as f64;
for &v in values {
let i = std::cmp::min(((v - min) / step).ceil() as usize, bins - 1);
bucket[i] += 1;
}
bucket
.into_iter()
.enumerate()
.map(|(i, v)| (min + step * i as f64, v))
.collect()
}
pub(crate) fn percentiles(values: &[f64], pecents: &[i32]) -> BTreeMap<String, f64> {
pecents
.iter()
.map(|&p| {
let i = (f64::from(p) / 100.0 * values.len() as f64) as usize;
(format!("p{p}"), *values.get(i).unwrap_or(&f64::NAN))
})
.collect()
}
|
text-generation-inference/benchmark/src/utils.rs/0
|
{
"file_path": "text-generation-inference/benchmark/src/utils.rs",
"repo_id": "text-generation-inference",
"token_count": 598
}
| 225
|
Documentation available at: https://huggingface.co/docs/text-generation-inference
## Release
When making a release, please update the latest version in the documentation with:
```
export OLD_VERSION="2\.0\.3"
export NEW_VERSION="2\.0\.4"
find . -name '*.md' -exec sed -i -e "s/$OLD_VERSION/$NEW_VERSION/g" {} \;
```
|
text-generation-inference/docs/README.md/0
|
{
"file_path": "text-generation-inference/docs/README.md",
"repo_id": "text-generation-inference",
"token_count": 107
}
| 226
|
# Guidance
## What is Guidance?
Guidance is a feature that allows users to constrain the generation of a large language model with a specified grammar. This feature is particularly useful when you want to generate text that follows a specific structure or uses a specific set of words or produce output in a specific format. A prominent example is JSON grammar, where the model is forced to output valid JSON.
## How is it used?
Guidance can be implemented in many ways and the community is always finding new ways to use it. Here are some examples of how you can use guidance:
Technically, guidance can be used to generate:
- a specific JSON object
- a function signature
- typed output like a list of integers
However these use cases can span a wide range of applications, such as:
- extracting structured data from unstructured text
- summarizing text into a specific format
- limit output to specific classes of words (act as a LLM powered classifier)
- generate the input to specific APIs or services
- provide reliable and consistent output for downstream tasks
- extract data from multimodal inputs
## How it works?
Diving into the details, guidance is enabled by including a grammar with a generation request that is compiled, and used to modify the chosen tokens.
This process can be broken down into the following steps:
1. A request is sent to the backend, it is processed and placed in batch. Processing includes compiling the grammar into a finite state machine and a grammar state.
<div class="flex justify-center">
<img
class="block dark:hidden"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/request-to-batch.gif"
/>
<img
class="hidden dark:block"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/request-to-batch-dark.gif"
/>
</div>
2. The model does a forward pass over the batch. This returns probabilities for each token in the vocabulary for each request in the batch.
3. The process of choosing one of those tokens is called `sampling`. The model samples from the distribution of probabilities to choose the next token. In TGI all of the steps before sampling are called `processor`. Grammars are applied as a processor that masks out tokens that are not allowed by the grammar.
<div class="flex justify-center">
<img
class="block dark:hidden"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/logit-grammar-mask.gif"
/>
<img
class="hidden dark:block"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/logit-grammar-mask-dark.gif"
/>
</div>
4. The grammar mask is applied and the model samples from the remaining tokens. Once a token is chosen, we update the grammar state with the new token, to prepare it for the next pass.
<div class="flex justify-center">
<img
class="block dark:hidden"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/sample-logits.gif"
/>
<img
class="hidden dark:block"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/sample-logits-dark.gif"
/>
</div>
## How to use Guidance?
There are two main ways to use guidance; you can either use the `/generate` endpoint with a grammar or use the `/chat/completion` endpoint with tools.
Under the hood tools are a special case of grammars that allows the model to choose one or none of the provided tools.
Please refer to [using guidance](../basic_tutorials/using_guidance) for more examples and details on how to use guidance in Python, JavaScript, and cURL.
### Getting the most out of guidance
Depending on how you are using guidance, you may want to make use of different features. Here are some tips to get the most out of guidance:
- If you are using the `/generate` with a `grammar` it is recommended to include the grammar in the prompt prefixed by something like `Please use the following JSON schema to generate the output:`. This will help the model understand the context of the grammar and generate the output accordingly.
- If you are getting a response with many repeated tokens, please use the `frequency_penalty` or `repetition_penalty` to reduce the number of repeated tokens in the output.
|
text-generation-inference/docs/source/conceptual/guidance.md/0
|
{
"file_path": "text-generation-inference/docs/source/conceptual/guidance.md",
"repo_id": "text-generation-inference",
"token_count": 1237
}
| 227
|
# HTTP API Reference
#### Table of Contents
- [Text Generation Inference custom API](#text-generation-inference-custom-api)
- [OpenAI Messages API](#openai-messages-api)
- [Making a Request](#making-a-request)
- [Streaming](#streaming)
- [Synchronous](#synchronous)
- [Hugging Face Inference Endpoints](#hugging-face-inference-endpoints)
- [Cloud Providers](#cloud-providers)
- [Amazon SageMaker](#amazon-sagemaker)
The HTTP API is a RESTful API that allows you to interact with the text-generation-inference component. Two endpoints are available:
* Text Generation Inference [custom API](https://huggingface.github.io/text-generation-inference/)
* OpenAI's [Messages API](#openai-messages-api)
## Text Generation Inference custom API
Check the [API documentation](https://huggingface.github.io/text-generation-inference/) for more information on how to interact with the Text Generation Inference API.
## OpenAI Messages API
Text Generation Inference (TGI) now supports the Messages API, which is fully compatible with the OpenAI Chat Completion API. This feature is available starting from version 1.4.0. You can use OpenAI's client libraries or third-party libraries expecting OpenAI schema to interact with TGI's Messages API. Below are some examples of how to utilize this compatibility.
> **Note:** The Messages API is supported from TGI version 1.4.0 and above. Ensure you are using a compatible version to access this feature.
## Making a Request
You can make a request to TGI's Messages API using `curl`. Here's an example:
```bash
curl localhost:3000/v1/chat/completions \
-X POST \
-d '{
"model": "tgi",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is deep learning?"
}
],
"stream": true,
"max_tokens": 20
}' \
-H 'Content-Type: application/json'
```
## Streaming
You can also use OpenAI's Python client library to make a streaming request. Here's how:
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="-"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message)
```
## Synchronous
If you prefer to make a synchronous request, you can do so like this:
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
base_url="http://localhost:3000/v1",
api_key="-"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=False
)
print(chat_completion)
```
## Hugging Face Inference Endpoints
The Messages API is integrated with [Inference Endpoints](https://huggingface.co/inference-endpoints/dedicated).
Every endpoint that uses "Text Generation Inference" with an LLM, which has a chat template can now be used. Below is an example of how to use IE with TGI using OpenAI's Python client library:
> **Note:** Make sure to replace `base_url` with your endpoint URL and to include `v1/` at the end of the URL. The `api_key` should be replaced with your Hugging Face API key.
```python
from openai import OpenAI
# init the client but point it to TGI
client = OpenAI(
# replace with your endpoint url, make sure to include "v1/" at the end
base_url="https://vlzz10eq3fol3429.us-east-1.aws.endpoints.huggingface.cloud/v1/",
# replace with your API key
api_key="hf_XXX"
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
],
stream=True
)
# iterate and print stream
for message in chat_completion:
print(message.choices[0].delta.content, end="")
```
## Cloud Providers
TGI can be deployed on various cloud providers for scalable and robust text generation. One such provider is Amazon SageMaker, which has recently added support for TGI. Here's how you can deploy TGI on Amazon SageMaker:
## Amazon SageMaker
To enable the Messages API in Amazon SageMaker you need to set the environment variable `MESSAGES_API_ENABLED=true`.
This will modify the `/invocations` route to accept Messages dictonaries consisting out of role and content. See the example below on how to deploy Llama with the new Messages API.
```python
import json
import sagemaker
import boto3
from sagemaker.huggingface import HuggingFaceModel, get_huggingface_llm_image_uri
try:
role = sagemaker.get_execution_role()
except ValueError:
iam = boto3.client('iam')
role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'HuggingFaceH4/zephyr-7b-beta',
'SM_NUM_GPUS': json.dumps(1),
'MESSAGES_API_ENABLED': True
}
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
image_uri=get_huggingface_llm_image_uri("huggingface",version="1.4.0"),
env=hub,
role=role,
)
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1,
instance_type="ml.g5.2xlarge",
container_startup_health_check_timeout=300,
)
# send request
predictor.predict({
"messages": [
{"role": "system", "content": "You are a helpful assistant." },
{"role": "user", "content": "What is deep learning?"}
]
})
```
|
text-generation-inference/docs/source/reference/api_reference.md/0
|
{
"file_path": "text-generation-inference/docs/source/reference/api_reference.md",
"repo_id": "text-generation-inference",
"token_count": 1915
}
| 228
|
{
"choices": [
{
"finish_reason": "stop",
"index": 1,
"logprobs": null,
"text": " PR for more information?"
},
{
"finish_reason": "length",
"index": 3,
"logprobs": null,
"text": "hd20220811-"
},
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"text": "le Business Incubator is providing a workspace"
},
{
"finish_reason": "length",
"index": 2,
"logprobs": null,
"text": " severely flawed and often has a substandard"
}
],
"created": 1722014725,
"id": "",
"model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
"object": "text_completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 36,
"prompt_tokens": 8,
"total_tokens": 44
}
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts.json",
"repo_id": "text-generation-inference",
"token_count": 407
}
| 229
|
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 2,
"logprob": null,
"text": "<bos>"
},
{
"id": 2015,
"logprob": -10.0,
"text": "Test"
},
{
"id": 3853,
"logprob": -10.875,
"text": " request"
}
],
"seed": null,
"tokens": [
{
"id": 1736,
"logprob": -2.09375,
"special": false,
"text": " form"
},
{
"id": 109,
"logprob": -1.9140625,
"special": false,
"text": "\n\n"
},
{
"id": 651,
"logprob": -2.453125,
"special": false,
"text": "The"
},
{
"id": 2121,
"logprob": -1.8984375,
"special": false,
"text": " test"
},
{
"id": 3853,
"logprob": -0.23535156,
"special": false,
"text": " request"
},
{
"id": 1736,
"logprob": -0.091308594,
"special": false,
"text": " form"
},
{
"id": 603,
"logprob": -0.96875,
"special": false,
"text": " is"
},
{
"id": 1671,
"logprob": -1.6484375,
"special": false,
"text": " used"
},
{
"id": 577,
"logprob": -0.43164062,
"special": false,
"text": " to"
},
{
"id": 3853,
"logprob": -1.2421875,
"special": false,
"text": " request"
}
],
"top_tokens": null
},
"generated_text": " form\n\nThe test request form is used to request"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 2,
"logprob": null,
"text": "<bos>"
},
{
"id": 2015,
"logprob": -10.0,
"text": "Test"
},
{
"id": 3853,
"logprob": -10.875,
"text": " request"
}
],
"seed": null,
"tokens": [
{
"id": 1736,
"logprob": -2.09375,
"special": false,
"text": " form"
},
{
"id": 109,
"logprob": -1.9140625,
"special": false,
"text": "\n\n"
},
{
"id": 651,
"logprob": -2.453125,
"special": false,
"text": "The"
},
{
"id": 2121,
"logprob": -1.8984375,
"special": false,
"text": " test"
},
{
"id": 3853,
"logprob": -0.23535156,
"special": false,
"text": " request"
},
{
"id": 1736,
"logprob": -0.091308594,
"special": false,
"text": " form"
},
{
"id": 603,
"logprob": -0.96875,
"special": false,
"text": " is"
},
{
"id": 1671,
"logprob": -1.6484375,
"special": false,
"text": " used"
},
{
"id": 577,
"logprob": -0.43164062,
"special": false,
"text": " to"
},
{
"id": 3853,
"logprob": -1.2421875,
"special": false,
"text": " request"
}
],
"top_tokens": null
},
"generated_text": " form\n\nThe test request form is used to request"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 2,
"logprob": null,
"text": "<bos>"
},
{
"id": 2015,
"logprob": -10.0,
"text": "Test"
},
{
"id": 3853,
"logprob": -10.875,
"text": " request"
}
],
"seed": null,
"tokens": [
{
"id": 1736,
"logprob": -2.09375,
"special": false,
"text": " form"
},
{
"id": 109,
"logprob": -1.9140625,
"special": false,
"text": "\n\n"
},
{
"id": 651,
"logprob": -2.453125,
"special": false,
"text": "The"
},
{
"id": 2121,
"logprob": -1.8984375,
"special": false,
"text": " test"
},
{
"id": 3853,
"logprob": -0.23535156,
"special": false,
"text": " request"
},
{
"id": 1736,
"logprob": -0.091308594,
"special": false,
"text": " form"
},
{
"id": 603,
"logprob": -0.96875,
"special": false,
"text": " is"
},
{
"id": 1671,
"logprob": -1.6484375,
"special": false,
"text": " used"
},
{
"id": 577,
"logprob": -0.43164062,
"special": false,
"text": " to"
},
{
"id": 3853,
"logprob": -1.2421875,
"special": false,
"text": " request"
}
],
"top_tokens": null
},
"generated_text": " form\n\nThe test request form is used to request"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 2,
"logprob": null,
"text": "<bos>"
},
{
"id": 2015,
"logprob": -10.0,
"text": "Test"
},
{
"id": 3853,
"logprob": -10.875,
"text": " request"
}
],
"seed": null,
"tokens": [
{
"id": 1736,
"logprob": -2.09375,
"special": false,
"text": " form"
},
{
"id": 109,
"logprob": -1.9140625,
"special": false,
"text": "\n\n"
},
{
"id": 651,
"logprob": -2.453125,
"special": false,
"text": "The"
},
{
"id": 2121,
"logprob": -1.8984375,
"special": false,
"text": " test"
},
{
"id": 3853,
"logprob": -0.23535156,
"special": false,
"text": " request"
},
{
"id": 1736,
"logprob": -0.091308594,
"special": false,
"text": " form"
},
{
"id": 603,
"logprob": -0.96875,
"special": false,
"text": " is"
},
{
"id": 1671,
"logprob": -1.6484375,
"special": false,
"text": " used"
},
{
"id": 577,
"logprob": -0.43164062,
"special": false,
"text": " to"
},
{
"id": 3853,
"logprob": -1.2421875,
"special": false,
"text": " request"
}
],
"top_tokens": null
},
"generated_text": " form\n\nThe test request form is used to request"
}
]
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma/test_flash_gemma_load.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma/test_flash_gemma_load.json",
"repo_id": "text-generation-inference",
"token_count": 4916
}
| 230
|
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -10.734375,
"text": "What"
},
{
"id": 338,
"logprob": -1.5488281,
"text": "is"
},
{
"id": 21784,
"logprob": -9.2890625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.2753906,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.48046875,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.1845703,
"special": false,
"text": "\n"
},
{
"id": 2772,
"logprob": -0.5727539,
"special": false,
"text": "De"
},
{
"id": 1022,
"logprob": -0.00010967255,
"special": false,
"text": "ep"
},
{
"id": 6509,
"logprob": -0.1239624,
"special": false,
"text": " learning"
},
{
"id": 338,
"logprob": -0.04510498,
"special": false,
"text": " is"
},
{
"id": 263,
"logprob": -0.018295288,
"special": false,
"text": " a"
},
{
"id": 11306,
"logprob": -0.45922852,
"special": false,
"text": " subset"
},
{
"id": 310,
"logprob": -0.00020992756,
"special": false,
"text": " of"
},
{
"id": 4933,
"logprob": -0.0046539307,
"special": false,
"text": " machine"
},
{
"id": 6509,
"logprob": -0.00025844574,
"special": false,
"text": " learning"
}
]
},
"generated_text": "\nDeep learning is a subset of machine learning"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -10.734375,
"text": "What"
},
{
"id": 338,
"logprob": -1.5488281,
"text": "is"
},
{
"id": 21784,
"logprob": -9.2890625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.2724609,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.47729492,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.1826172,
"special": false,
"text": "\n"
},
{
"id": 2772,
"logprob": -0.56689453,
"special": false,
"text": "De"
},
{
"id": 1022,
"logprob": -0.000108003616,
"special": false,
"text": "ep"
},
{
"id": 6509,
"logprob": -0.1239624,
"special": false,
"text": " learning"
},
{
"id": 338,
"logprob": -0.044433594,
"special": false,
"text": " is"
},
{
"id": 263,
"logprob": -0.018295288,
"special": false,
"text": " a"
},
{
"id": 11306,
"logprob": -0.45922852,
"special": false,
"text": " subset"
},
{
"id": 310,
"logprob": -0.0002104044,
"special": false,
"text": " of"
},
{
"id": 4933,
"logprob": -0.004711151,
"special": false,
"text": " machine"
},
{
"id": 6509,
"logprob": -0.00025892258,
"special": false,
"text": " learning"
}
]
},
"generated_text": "\nDeep learning is a subset of machine learning"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -10.734375,
"text": "What"
},
{
"id": 338,
"logprob": -1.5488281,
"text": "is"
},
{
"id": 21784,
"logprob": -9.2890625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.2724609,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.47729492,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.1826172,
"special": false,
"text": "\n"
},
{
"id": 2772,
"logprob": -0.56689453,
"special": false,
"text": "De"
},
{
"id": 1022,
"logprob": -0.000108003616,
"special": false,
"text": "ep"
},
{
"id": 6509,
"logprob": -0.1239624,
"special": false,
"text": " learning"
},
{
"id": 338,
"logprob": -0.044433594,
"special": false,
"text": " is"
},
{
"id": 263,
"logprob": -0.018295288,
"special": false,
"text": " a"
},
{
"id": 11306,
"logprob": -0.45922852,
"special": false,
"text": " subset"
},
{
"id": 310,
"logprob": -0.0002104044,
"special": false,
"text": " of"
},
{
"id": 4933,
"logprob": -0.004711151,
"special": false,
"text": " machine"
},
{
"id": 6509,
"logprob": -0.00025892258,
"special": false,
"text": " learning"
}
]
},
"generated_text": "\nDeep learning is a subset of machine learning"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -10.734375,
"text": "What"
},
{
"id": 338,
"logprob": -1.5488281,
"text": "is"
},
{
"id": 21784,
"logprob": -9.2890625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.2724609,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.47729492,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.1826172,
"special": false,
"text": "\n"
},
{
"id": 2772,
"logprob": -0.56689453,
"special": false,
"text": "De"
},
{
"id": 1022,
"logprob": -0.000108003616,
"special": false,
"text": "ep"
},
{
"id": 6509,
"logprob": -0.1239624,
"special": false,
"text": " learning"
},
{
"id": 338,
"logprob": -0.044433594,
"special": false,
"text": " is"
},
{
"id": 263,
"logprob": -0.018295288,
"special": false,
"text": " a"
},
{
"id": 11306,
"logprob": -0.45922852,
"special": false,
"text": " subset"
},
{
"id": 310,
"logprob": -0.0002104044,
"special": false,
"text": " of"
},
{
"id": 4933,
"logprob": -0.004711151,
"special": false,
"text": " machine"
},
{
"id": 6509,
"logprob": -0.00025892258,
"special": false,
"text": " learning"
}
]
},
"generated_text": "\nDeep learning is a subset of machine learning"
}
]
|
text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json",
"repo_id": "text-generation-inference",
"token_count": 5726
}
| 231
|
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 12,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 450,
"logprob": -0.26342773,
"special": false,
"text": " The"
},
{
"id": 21282,
"logprob": -0.01838684,
"special": false,
"text": " cow"
},
{
"id": 322,
"logprob": -0.18041992,
"special": false,
"text": " and"
},
{
"id": 521,
"logprob": -0.62841797,
"special": false,
"text": " ch"
},
{
"id": 21475,
"logprob": -0.0037956238,
"special": false,
"text": "icken"
},
{
"id": 526,
"logprob": -0.018737793,
"special": false,
"text": " are"
},
{
"id": 373,
"logprob": -1.0820312,
"special": false,
"text": " on"
},
{
"id": 263,
"logprob": -0.5083008,
"special": false,
"text": " a"
},
{
"id": 25695,
"logprob": -0.07128906,
"special": false,
"text": " beach"
},
{
"id": 29889,
"logprob": -0.12573242,
"special": false,
"text": "."
},
{
"id": 32002,
"logprob": -0.0029792786,
"special": true,
"text": "<end_of_utterance>"
},
{
"id": 2,
"logprob": -0.00024962425,
"special": true,
"text": "</s>"
}
],
"top_tokens": null
},
"generated_text": " The cow and chicken are on a beach."
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics_two_images.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics_two_images.json",
"repo_id": "text-generation-inference",
"token_count": 1028
}
| 232
|
{
"choices": [
{
"finish_reason": "eos_token",
"index": 0,
"logprobs": null,
"message": {
"content": null,
"name": null,
"role": "assistant",
"tool_calls": [
{
"function": {
"arguments": {
"error": "Cannot get current weather forecast from specified location and temperature unit. Please try again with different options."
},
"description": null,
"name": "notify_error"
},
"id": 0,
"type": "function"
}
]
},
"usage": null
}
],
"created": 1712852597,
"id": "",
"model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
"object": "text_completion",
"system_fingerprint": "1.4.5-native",
"usage": {
"completion_tokens": 39,
"prompt_tokens": 496,
"total_tokens": 535
}
}
|
text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_insufficient_information.json/0
|
{
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_insufficient_information.json",
"repo_id": "text-generation-inference",
"token_count": 487
}
| 233
|
import pytest
@pytest.fixture(scope="module")
def flash_llama_exl2_handle(launcher):
with launcher(
"turboderp/Llama-3-8B-Instruct-exl2",
revision="2.5bpw",
# Set max input length to avoid OOM due to extremely large
# scratch buffer.
max_input_length=1024,
num_shard=1,
quantize="exl2",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_exl2(flash_llama_exl2_handle):
await flash_llama_exl2_handle.health(300)
return flash_llama_exl2_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_exl2(flash_llama_exl2, ignore_logprob_response_snapshot):
response = await flash_llama_exl2.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == ignore_logprob_response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_exl2_all_params(
flash_llama_exl2, ignore_logprob_response_snapshot
):
response = await flash_llama_exl2.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert (
response.generated_text == 'Test request. The server responds with a "200 OK"'
)
assert response == ignore_logprob_response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_exl2_load(
flash_llama_exl2, generate_load, ignore_logprob_response_snapshot
):
responses = await generate_load(
flash_llama_exl2, "Test request", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == ignore_logprob_response_snapshot
|
text-generation-inference/integration-tests/models/test_flash_llama_exl2.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_flash_llama_exl2.py",
"repo_id": "text-generation-inference",
"token_count": 886
}
| 234
|
import pytest
import json
from text_generation.types import GrammarType
@pytest.fixture(scope="module")
def non_flash_llama_grammar_handle(launcher):
with launcher(
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
num_shard=1,
disable_grammar_support=False,
use_flash_attention=False,
) as handle:
yield handle
@pytest.fixture(scope="module")
async def non_flash_llama_grammar(non_flash_llama_grammar_handle):
await non_flash_llama_grammar_handle.health(300)
return non_flash_llama_grammar_handle.client
@pytest.mark.release
@pytest.mark.skip
@pytest.mark.asyncio
async def test_non_flash_llama_grammar_json(non_flash_llama_grammar, response_snapshot):
response = await non_flash_llama_grammar.generate(
"info: david holtz like trees and has two cats. ",
max_new_tokens=100,
decoder_input_details=True,
seed=0,
grammar={
"type": GrammarType.Json,
"value": json.dumps(
{
"type": "object",
"$id": "https://example.com/person.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Person",
"properties": {
"firstName": {
"type": "string",
"description": "The person'''s first name.",
},
"lastName": {
"type": "string",
"description": "The person'''s last name.",
},
"hobby": {
"description": "The person'''s hobby.",
"type": "string",
},
"numCats": {
"description": "The number of cats the person has.",
"type": "integer",
"minimum": 0,
},
},
"required": ["firstName", "lastName", "hobby", "numCats"],
}
),
},
)
assert response.details.generated_tokens == 30
assert (
response.generated_text
== '{"firstName":"David","hobby":"Trees","lastName":"Holtz","numCats":2}'
)
assert response == response_snapshot
|
text-generation-inference/integration-tests/models/test_grammar_llama.py/0
|
{
"file_path": "text-generation-inference/integration-tests/models/test_grammar_llama.py",
"repo_id": "text-generation-inference",
"token_count": 1346
}
| 235
|
[pytest]
addopts = --snapshot-warn-unused
asyncio_mode = auto
markers =
private: marks tests as requiring an admin hf token (deselect with '-m "not private"')
|
text-generation-inference/integration-tests/pytest.ini/0
|
{
"file_path": "text-generation-inference/integration-tests/pytest.ini",
"repo_id": "text-generation-inference",
"token_count": 58
}
| 236
|
#!/bin/bash
if [[ -z "${HF_MODEL_ID}" ]]; then
echo "HF_MODEL_ID must be set"
exit 1
fi
export MODEL_ID="${HF_MODEL_ID}"
if [[ -n "${HF_MODEL_REVISION}" ]]; then
export REVISION="${HF_MODEL_REVISION}"
fi
if [[ -n "${SM_NUM_GPUS}" ]]; then
export NUM_SHARD="${SM_NUM_GPUS}"
fi
if [[ -n "${HF_MODEL_QUANTIZE}" ]]; then
export QUANTIZE="${HF_MODEL_QUANTIZE}"
fi
if [[ -n "${HF_MODEL_TRUST_REMOTE_CODE}" ]]; then
export TRUST_REMOTE_CODE="${HF_MODEL_TRUST_REMOTE_CODE}"
fi
text-generation-launcher --port 8080
|
text-generation-inference/sagemaker-entrypoint.sh/0
|
{
"file_path": "text-generation-inference/sagemaker-entrypoint.sh",
"repo_id": "text-generation-inference",
"token_count": 239
}
| 237
|
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
extra_compile_args = ["-std=c++17"]
setup(
name="custom_kernels",
ext_modules=[
CUDAExtension(
name="custom_kernels.fused_bloom_attention_cuda",
sources=["custom_kernels/fused_bloom_attention_cuda.cu"],
extra_compile_args=extra_compile_args,
),
CUDAExtension(
name="custom_kernels.fused_attention_cuda",
sources=["custom_kernels/fused_attention_cuda.cu"],
extra_compile_args=extra_compile_args,
),
],
cmdclass={"build_ext": BuildExtension},
)
|
text-generation-inference/server/custom_kernels/setup.py/0
|
{
"file_path": "text-generation-inference/server/custom_kernels/setup.py",
"repo_id": "text-generation-inference",
"token_count": 309
}
| 238
|
#ifndef _config_h
#define _config_h
#define MAX_Q_GEMM_ROWS 50
#define MAX_Q_GEMM_WEIGHTS 4 // must be <= MAX_Q_GEMM_ROWS
#define QMODE_2BIT 1
#define QMODE_3BIT 1
#define QMODE_4BIT 1
#define QMODE_5BIT 1
#define QMODE_6BIT 0
#define QMODE_8BIT 0
#endif
|
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/config.h/0
|
{
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/config.h",
"repo_id": "text-generation-inference",
"token_count": 119
}
| 239
|
#ifndef _qdq_util_cuh
#define _qdq_util_cuh
union half2_uint32
{
uint32_t as_uint32;
half2 as_half2;
__device__ half2_uint32(uint32_t val) : as_uint32(val) {}
__device__ half2_uint32(half2 val) : as_half2(val) {}
__device__ half2_uint32() : as_uint32(0) {}
};
union half_uint16
{
uint16_t as_uint16;
half as_half;
__device__ half_uint16(uint16_t val) : as_uint16(val) {}
__device__ half_uint16(half val) : as_half(val) {}
__device__ half_uint16() : as_uint16(0) {}
};
// Max_scale premultiplied by 1/256
__forceinline__ __device__ half dq_scale(const int qs, const half max_scale)
{
int qs_i = qs + 1;
half qs_h = __int2half_rn(qs_i * qs_i);
qs_h = __hmul(qs_h, max_scale);
return qs_h;
}
__forceinline__ __device__ half dq(const int q, const int qzero, const half scale)
{
return __hmul(__int2half_rn(q - qzero), scale);
}
__forceinline__ __device__ half dq_ns(const int q, const int qzero)
{
//return __hsub(__int2half_rn(q), __int2half_rn(qzero));
return __int2half_rn(q - qzero);
}
__forceinline__ __device__ int exb(const uint32_t q, const int shift, const int mask)
{
return (int)((q >> shift) & mask);
}
__forceinline__ __device__ int exb(const uint32_t q1, const uint32_t q0, const int shift, const int mask)
{
return (int)(__funnelshift_rc(q0, q1, shift) & mask);
}
#endif
|
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_util.cuh/0
|
{
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_util.cuh",
"repo_id": "text-generation-inference",
"token_count": 602
}
| 240
|
from text_generation_server.utils.hub import (
download_weights,
weight_hub_files,
weight_files,
)
from text_generation_server.utils.convert import convert_files
def test_convert_files():
model_id = "bigscience/bloom-560m"
pt_filenames = weight_hub_files(model_id, extension=".bin")
local_pt_files = download_weights(pt_filenames, model_id)
local_st_files = [
p.parent / f"{p.stem.lstrip('pytorch_')}.safetensors" for p in local_pt_files
]
convert_files(local_pt_files, local_st_files, discard_names=[])
found_st_files = weight_files(model_id)
assert all([p in found_st_files for p in local_st_files])
|
text-generation-inference/server/tests/utils/test_convert.py/0
|
{
"file_path": "text-generation-inference/server/tests/utils/test_convert.py",
"repo_id": "text-generation-inference",
"token_count": 259
}
| 241
|
from dataclasses import dataclass
from text_generation_server.models.globals import ATTENTION
import torch
from typing import Optional
if ATTENTION in {"flashinfer", "flashdecoding"}:
@dataclass
class Seqlen:
input_lengths: torch.Tensor
prefix_lengths: torch.Tensor
cu_seqlen_q: Optional[torch.Tensor]
cu_seqlen_k: Optional[torch.Tensor]
max_q: int
max_k: int
def __init__(
self,
input_lengths,
prefix_lengths,
cu_seqlen_q=None,
max_q=None,
max_k=None,
):
self.input_lengths = input_lengths
self.prefix_lengths = prefix_lengths
device = self.input_lengths.device
shape = self.input_lengths.shape
if cu_seqlen_q is None:
cu_seqlen_q = torch.arange(
shape[0] + 1,
device=device,
dtype=torch.int32,
)
max_q = 1
else:
assert max_q is not None
assert max_k is not None
cu_seqlen_k = torch.zeros(shape[-1] + 1, device=device, dtype=torch.int32)
# cuda graphs don't like this and this is necessary to clamp within mistral
# Although FA2 might not want the clamping
# cu_seqlen_k[0] = 0
total = self.input_lengths + self.prefix_lengths
torch.cumsum(total, -1, out=cu_seqlen_k[1:])
self.cu_seqlen_q = cu_seqlen_q
self.cu_seqlen_k = cu_seqlen_k
self.max_q = max_q
self.max_k = max_k
def clamp(self, max):
# Flash decoding doesn't need to clamp
return self
else:
@dataclass
class Seqlen:
input_lengths: torch.Tensor
prefix_lengths: torch.Tensor
cu_seqlen_q: torch.Tensor
max_q: int
max_k: int
def clamp(self, max):
raise NotImplementedError("Not implemented seqlen for paged")
return Seqlen(torch.clamp(self.input_lengths, max=max))
|
text-generation-inference/server/text_generation_server/layers/attention/common.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/layers/attention/common.py",
"repo_id": "text-generation-inference",
"token_count": 1117
}
| 242
|
# Adapted from turboderp exllama: https://github.com/turboderp/exllamav2
from dataclasses import dataclass
from typing import Optional
import torch
import torch.nn as nn
from loguru import logger
from text_generation_server.layers.exl2 import Exl2Weight
from text_generation_server.layers.gptq import GPTQWeight
from text_generation_server.utils.log import log_master
try:
from exllamav2.ext import exllamav2_ext
make_q_matrix = exllamav2_ext.make_q_matrix
gemm_half_q_half = exllamav2_ext.gemm_half_q_half
except ImportError:
log_master(logger.warning, "exllamav2_kernels not installed.")
raise
# Dummy tensor to pass instead of g_idx since there is no way to pass "None" to a C++ extension
none_tensor = torch.empty((1, 1), device="meta")
@dataclass
class _ExtraTensors:
"""Additional generated quantizer tensors."""
q_group_map: Optional[torch.Tensor] = None
q_invperm: Optional[torch.Tensor] = None
q_perm: Optional[torch.Tensor] = None
def ext_gemm_half_q_half(x, q_handle, q4_width, force_cuda):
"""Matrix multiplication, returns x @ q4"""
output_shape = x.shape[:-1] + (q4_width,)
x = x.view(-1, x.shape[-1])
output = torch.empty((x.shape[0], q4_width), dtype=torch.half, device=x.device)
gemm_half_q_half(x, q_handle, output, force_cuda)
return output.view(output_shape)
def make_group_map(q_groups: torch.Tensor, num_qrows: int):
gr = q_groups.tolist()
group_map = []
num_groups = len(gr) // 2
for i in range(num_groups):
bits = gr[i * 2]
if i < num_groups - 1:
qrows = gr[i * 2 + 3] - gr[i * 2 + 1]
else:
qrows = num_qrows - gr[i * 2 + 1]
rows = qrows * 32 // bits
for j in range(rows):
group_map += [i]
group_map += [rows - j]
return torch.tensor(group_map, dtype=torch.short, device=q_groups.device)
# Create Q matrix
def ext_make_q_matrix(
w: Exl2Weight | GPTQWeight,
extra: _ExtraTensors,
temp_dq,
key: Optional[str] = None,
):
"""
Create Q matrix
"""
# max_dq_size = 512*(1024**2)
# max_dq_rows = max_dq_size // out_features[0]
max_dq_rows = 0
# EXL2
if isinstance(w, Exl2Weight):
extra.q_group_map = make_group_map(w.q_groups, w.q_weight.shape[0])
extra.q_perm = torch.argsort(w.q_invperm).short()
return make_q_matrix(
w.q_weight,
extra.q_perm,
w.q_invperm,
w.q_scale,
w.q_scale_max,
w.q_groups,
extra.q_group_map,
none_tensor, # zeros
none_tensor, # scales
none_tensor, # g_idx
none_tensor, # bias
temp_dq,
max_dq_rows,
)
# GPTQ
elif isinstance(w, GPTQWeight):
if w.scales.dtype == torch.float:
w.scales = w.scales.half()
# GPTQ with g_idx (act_order)
if w.g_idx is not None and not (w.g_idx == 0).all().item():
extra.q_perm = torch.empty(
(w.qweight.shape[0] * 8,),
dtype=torch.short,
device=w.qweight.device,
)
extra.q_invperm = torch.empty_like(extra.q_perm)
# make_q4 segfaults if g_idx is not on cpu in the act-order case. In the non act-order case, None needs to be passed for g_idx.
return make_q_matrix(
w.qweight,
extra.q_perm,
extra.q_invperm,
none_tensor, # q_scale
none_tensor, # q_scale_max
none_tensor, # q_groups
none_tensor, # q_group_map
w.qzeros,
w.scales,
w.g_idx.cpu(),
none_tensor, # bias
temp_dq,
max_dq_rows,
)
# GPTQ without g_idx
else:
return make_q_matrix(
w.qweight,
none_tensor, # q_perm
none_tensor, # q_invperm
none_tensor, # q_scale
none_tensor, # q_scale_max
none_tensor, # q_groups
none_tensor, # q_group_map
w.qzeros,
w.scales,
none_tensor, # g_idx
none_tensor, # bias
temp_dq,
max_dq_rows,
)
else:
RuntimeError("Cannot create handle")
DEVICE = None
LAYERS = []
def set_device(device):
global DEVICE
DEVICE = device
def create_exllama_buffers(max_total_tokens: int):
global LAYERS, DEVICE
# No need to initialize scratch space if there are no layers
# that use ExLLamav2.
if len(LAYERS) == 0:
return
# Find the size of the scratch space.
scratch_bytes = max(
layer.scratch_space_fixed(max_input_len=max_total_tokens, max_batch_size=1)
for layer in LAYERS
)
temp_dq = ExLlamaV2DeviceTensors(DEVICE, scratch_bytes)
for layer in LAYERS:
layer.post_init(temp_dq)
class QuantLinear(nn.Module):
QUANT_TYPE = "exllamav2"
"""Linear layer implementation with per-group 4-bit quantization of the weights"""
def __init__(
self,
weight: Exl2Weight | GPTQWeight,
bias: torch.Tensor,
):
super().__init__()
self.q_handle = None
self.q_tensors = weight
self.extra_tensors = _ExtraTensors()
if isinstance(weight, Exl2Weight):
self.infeatures = weight.q_invperm.shape[0]
self.outfeatures = weight.q_weight.shape[1]
elif isinstance(weight, GPTQWeight):
if weight.bits != 4:
raise ValueError(
f"Exllamav2 kernel supports only bits=4, requested bits={weight.bits}. Something is wrong in the model initialization."
)
self.infeatures = weight.qweight.shape[0] // weight.bits * 32
self.outfeatures = weight.qweight.shape[1]
self.padding = -self.outfeatures % 32
self.outfeatures = self.outfeatures + self.padding
self.device = weight.device
self.bias = bias if bias is not None else None
global LAYERS
LAYERS.append(self)
def post_init(self, temp_dq):
device = self.q_tensors.device
assert device.type == "cuda"
assert device.index is not None
temp_dq = temp_dq.get_scratch_slice(self.temp_dq_size())
# We NEED to keep a pointer on Python side, otherwise the garbage collector will mess with us,
# and `Memory access fault by GPU node-2` will EAT you.
self.temp_dq = temp_dq
self.q_handle = ext_make_q_matrix(self.q_tensors, self.extra_tensors, temp_dq)
def forward(self, x, force_cuda=False):
output = ext_gemm_half_q_half(x, self.q_handle, self.outfeatures, force_cuda)
if self.bias is not None:
output.add_(self.bias)
return output
def temp_dq_size(self):
return self.infeatures * self.outfeatures * 2 + 128
def temp_fwd_size(self, max_input_len, max_batch_size):
return self.outfeatures * max_input_len * max_batch_size * 4 + 128
def scratch_space_fixed(self, max_input_len, max_batch_size):
return self.temp_dq_size() + self.temp_fwd_size(max_input_len, max_batch_size)
class ExLlamaV2DeviceTensors:
device_idx: int
scratch_bytes: int
scratch_idx: int
scratch: torch.tensor = None
def __init__(self, device, scratch_bytes):
self.device = device
self.scratch_bytes = scratch_bytes
def prepare(self):
self.scratch = torch.empty(
(self.scratch_bytes // 2,), dtype=torch.half, device=self.device
)
def get_scratch_slice(self, size_bytes):
if self.scratch is None:
self.prepare()
size_bytes = ((size_bytes + 127) // 128) * 128
size_half = size_bytes // 2
scratch_slice = self.scratch.narrow(0, 0, size_half)
return scratch_slice
|
text-generation-inference/server/text_generation_server/layers/gptq/exllamav2.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/layers/gptq/exllamav2.py",
"repo_id": "text-generation-inference",
"token_count": 3935
}
| 243
|
import torch
from torch.nn import functional as F
from typing import Iterable, List
from text_generation_server.layers.linear import get_linear, FastLinear
from text_generation_server.utils.import_utils import SYSTEM
if SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
class LayerConcat(torch.nn.Module):
"""
Apply multiple layers to the input and concatenate their
outputs.
"""
def __init__(self, layers: Iterable[torch.nn.Module], dim: int = -1):
"""
`dim` is the dimension along which layer outputs are concatenated.
"""
super().__init__()
self.layers = layers
self.dim = dim
def forward(self, x: torch.Tensor):
outputs = [layer(x) for layer in self.layers]
return torch.cat(outputs, self.dim)
class SuperLayer(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear.forward(x)
class TensorParallelHead(SuperLayer):
def __init__(self, linear, process_group, should_gather: bool):
super().__init__(linear)
self.process_group = process_group
self.should_gather = should_gather
@staticmethod
def load(config, prefix: str, weights):
if config.quantize == "exl2":
try:
# If the piece and LM head embeddings are shared, we have
# non-quantized weights...
weight = weights.get_tensor(f"{prefix}.weight")
except Exception:
# ...otherwise they are quantized.
weight = weights.get_weights_col(prefix)
should_gather = weights.process_group.size() > 1
elif weights.process_group.size() > 1:
try:
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
should_gather = True
except AssertionError:
# If the vocab size is not divisible by number of shards
# just load the entire thing.
weight = weights.get_tensor(f"{prefix}.weight")
should_gather = False
else:
weight = weights.get_tensor(f"{prefix}.weight")
should_gather = False
return TensorParallelHead(
get_linear(weight, bias=None),
process_group=weights.process_group,
should_gather=should_gather,
)
def forward(self, input: torch.Tensor) -> torch.Tensor:
if not self.should_gather:
return super().forward(input)
world_size = self.process_group.size()
if len(input.shape) == 2 and isinstance(self.linear, FastLinear):
out_dim = self.linear.weight.shape[0]
if input.shape[0] == 1:
world_out = input.new_empty(1, out_dim * world_size)
local_out = input.new_empty(1, out_dim)
gather_input = local_out
else:
world_out = input.new_empty(out_dim * world_size, input.shape[0])
gather_input = input.new_empty(out_dim, input.shape[0])
local_out = gather_input.T
torch.mm(input, self.linear.weight.T, out=local_out)
if SYSTEM == "ipex":
ipex.distributed.all_gather_into_tensor(
world_out, gather_input, group=self.process_group
)
else:
torch.distributed.all_gather_into_tensor(
world_out, gather_input, group=self.process_group
)
if input.shape[0] == 1:
return world_out
return world_out.T
output = super().forward(input)
world_output = [
torch.empty_like(output) for _ in range(self.process_group.size())
]
if SYSTEM == "ipex":
ipex.distributed.all_gather(world_output, output, group=self.process_group)
else:
torch.distributed.all_gather(world_output, output, group=self.process_group)
world_output = torch.cat(world_output, dim=-1)
return world_output
class TensorParallelColumnLinear(SuperLayer):
@classmethod
def load_gate_up(cls, config, prefix: str, weights, bias: bool):
"""Specific method when the QKV was joined after the fact"""
weight = weights.get_weights_col_packed_gate_up(prefix)
if bias:
raise NotImplementedError("packed_gate_up only implemented without bias")
else:
bias = None
linear = get_linear(weight, bias)
return cls(linear)
@classmethod
def load_qkv(
cls,
config,
prefix: str,
weights,
bias: bool,
num_heads: int,
num_key_value_heads: int,
):
"""Specific method when the QKV was joined after the fact"""
weight = weights.get_weights_col_packed_qkv(
prefix,
num_heads=num_heads,
num_key_value_heads=num_key_value_heads,
)
if bias:
raise NotImplementedError("packed_qkv only implemented for baichuan")
else:
bias = None
linear = get_linear(weight, bias)
return cls(linear)
@classmethod
def load(cls, config, prefix: str, weights, bias: bool):
weight = weights.get_weights_col(prefix)
if bias:
bias = weights.get_sharded(f"{prefix}.bias", dim=0)
else:
bias = None
linear = get_linear(weight, bias)
return cls(linear)
@classmethod
def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int):
if config.quantize == "exl2":
linears = []
for prefix in prefixes:
weight = weights.get_weights_col(prefix)
b = weights.get_tensor(f"{prefix}.bias") if bias else None
linears.append(get_linear(weight, b))
linear = LayerConcat(linears)
else:
weight = weights.get_multi_weights_col(prefixes, dim=dim)
if bias:
b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes]
bias = torch.cat(b, dim=dim)
else:
bias = None
linear = get_linear(weight, bias)
return cls(linear)
class TensorParallelRowLinear(SuperLayer):
def __init__(self, linear, process_group):
super().__init__(linear)
self.process_group = process_group
@classmethod
def load(cls, config, prefix: str, weights, bias: bool):
weight = weights.get_weights_row(prefix)
if bias and weights.process_group.rank() == 0:
# Rank is only on the first rank process
bias = weights.get_tensor(f"{prefix}.bias")
else:
bias = None
return cls(
get_linear(weight, bias),
process_group=weights.process_group,
)
def forward(self, input: torch.Tensor, reduce: bool = True) -> torch.Tensor:
out = super().forward(input)
if self.process_group.size() > 1 and reduce:
if SYSTEM == "ipex":
ipex.distributed.all_reduce(out, group=self.process_group)
else:
torch.distributed.all_reduce(out, group=self.process_group)
return out
class TensorParallelEmbedding(torch.nn.Module):
def __init__(self, prefix: str, weights, reduce=True):
super().__init__()
weight = weights.get_partial_sharded(f"{prefix}.weight", dim=0)
num_embeddings = weights.get_shape(f"{prefix}.weight")[0]
process_group = weights.process_group
world_size = process_group.size()
rank = process_group.rank()
block_size = (num_embeddings + world_size - 1) // world_size
self.min_id = rank * block_size
self.max_id = min(num_embeddings, (rank + 1) * block_size)
self.null_idx = weight.shape[
0
] # Usually block_size, might be less in non even vocab_size.
self.process_group = weights.process_group
self.reduce = reduce
"""Additional 0 entry used for masking"""
self.weight = torch.nn.Parameter(F.pad(weight, (0, 0, 0, 1)))
def forward(self, input: torch.Tensor) -> torch.Tensor:
# default all out of bounds values to `self.null_idx` that will then be mapped to 0
# translate for [0, self.max_id - self.min_id[
input = torch.where(
(self.min_id > input) | (input >= self.max_id),
self.null_idx,
input - self.min_id,
)
out = torch.nn.functional.embedding(input, self.weight)
if self.reduce and self.process_group.size() > 1:
if SYSTEM == "ipex":
ipex.distributed.all_reduce(out, group=self.process_group)
else:
torch.distributed.all_reduce(out, group=self.process_group)
return out
|
text-generation-inference/server/text_generation_server/layers/tensor_parallel.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/layers/tensor_parallel.py",
"repo_id": "text-generation-inference",
"token_count": 4175
}
| 244
|
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from text_generation_server.utils.import_utils import SYSTEM
if SYSTEM != "ipex":
from vllm.model_executor.layers.fused_moe import fused_moe
from transformers.activations import ACT2FN
from transformers.configuration_utils import PretrainedConfig
from typing import Optional, List, Tuple
from text_generation_server.layers.attention import (
paged_attention,
attention,
reshape_and_cache,
Seqlen,
)
from text_generation_server.layers import (
FastLinear,
TensorParallelRowLinear,
TensorParallelColumnLinear,
TensorParallelEmbedding,
SpeculativeHead,
get_linear,
)
from text_generation_server.layers.layernorm import (
FastRMSNorm,
)
from text_generation_server.layers.rotary import (
PositionRotaryEmbedding,
)
from text_generation_server.utils.weights import UnquantizedWeight
class MixtralConfig(PretrainedConfig):
model_type = "mixtral"
def __init__(
self,
vocab_size=32000,
hidden_size=4096,
intermediate_size=14336,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=8,
hidden_act="silu",
max_position_embeddings=4096 * 32,
initializer_range=0.02,
rms_norm_eps=1e-05,
use_cache=True,
pad_token_id=None,
bos_token_id=1,
eos_token_id=2,
pretraining_tp=1,
tie_word_embeddings=False,
rope_theta=10000.0,
sliding_window=None,
num_experts_per_tok=2,
num_local_experts=8,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.num_experts_per_tok = num_experts_per_tok
self.num_local_experts = num_local_experts
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def promote_scalar(x: torch.Tensor) -> torch.Tensor:
return x.view(1) if len(x.size()) == 0 else x
def load_attention(config, prefix: str, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=False,
)
def _load_gqa(config, prefix: str, weights):
assert config.hidden_size % config.num_attention_heads == 0
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
)
if isinstance(weight, UnquantizedWeight):
weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.hidden_size // config.num_attention_heads
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
return TensorParallelColumnLinear(get_linear(weight, bias=None))
def _load_experts(config, prefix: str, mat, weights):
if config.quantize is not None:
raise NotImplementedError("Mixtral does not support weight quantization yet.")
assert mat in ["w1", "w2", "w3"]
world_size = weights.process_group.size()
rank = weights.process_group.rank()
assert (
config.intermediate_size % world_size == 0
), f"The chosen size {config.intermediate_size} is not compatible with sharding on {world_size} shards"
block_size = config.intermediate_size // world_size
start = rank * block_size
stop = (rank + 1) * block_size
tensor = torch.empty(
(config.num_local_experts * block_size, config.hidden_size),
dtype=weights.dtype,
device=weights.device,
)
for i in range(config.num_local_experts):
slice_ = weights._get_slice(f"{prefix}.{i}.{mat}.weight")
if mat == "w2":
expert_slice = slice_[:, start:stop].t().contiguous()
else:
expert_slice = slice_[start:stop]
tensor[i * block_size : (i + 1) * block_size] = expert_slice.to(
dtype=weights.dtype
).to(device=weights.device)
return tensor
class MixtralAttention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.max_past = (
config.sliding_window if config.sliding_window is not None else -1
)
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights)
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=False,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
):
qkv = self.query_key_value(hidden_states)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
if prefill_cache_indices is not None:
kv_to_cache = kv[prefill_cache_indices]
else:
kv_to_cache = kv
reshape_and_cache(
kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query,
kv_cache[0],
kv_cache[1],
seqlen,
block_tables,
self.softmax_scale,
window_size_left=self.max_past,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache[0],
kv_cache[1],
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
)
return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size))
@torch.jit.script
def select_experts(gate_logits: torch.Tensor, top_k: int):
# all_probs: (sequence_length, n_experts) and upcast for softmax
all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float)
# weights, selected_experts: (sequence_length, top-k)
weights, selected_experts = torch.topk(all_probs, top_k, dim=-1)
weights /= weights.sum(dim=-1, keepdim=True)
weights = weights.view(-1)
selected_experts = selected_experts.view(-1)
return selected_experts, weights
@torch.jit.script
def round_up(x: torch.Tensor, value: int):
return torch.div(x + (value - 1), value, rounding_mode="trunc") * value
class BlockSparseMoE(nn.Module):
def __init__(self, prefix, config: MixtralConfig, weights):
super().__init__()
self.hidden_dim = config.hidden_size
self.ffn_dim = config.intermediate_size // weights.process_group.size()
self.num_experts = config.num_local_experts
self.top_k = config.num_experts_per_tok
act = config.hidden_act
if "gelu" in act:
self.act = lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
elif "silu" in act:
self.act = torch.nn.functional.silu
else:
self.act = ACT2FN[act]
# gating
self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False)
# merged expert weights, all of size (n_experts * ffn_dim, hidden_dim)
w1 = _load_experts(config, f"{prefix}.experts", "w1", weights).view(
self.num_experts, self.ffn_dim, self.hidden_dim
)
w3 = _load_experts(config, f"{prefix}.experts", "w3", weights).view(
self.num_experts, self.ffn_dim, self.hidden_dim
)
self.w13 = torch.cat([w1, w3], dim=1)
self.w2 = (
_load_experts(config, f"{prefix}.experts", "w2", weights)
.view(self.num_experts, self.ffn_dim, self.hidden_dim)
.transpose(1, 2)
.contiguous()
)
self.process_group = weights.process_group
def forward(self, x: torch.Tensor) -> torch.Tensor:
# router_logits: (num_tokens, n_experts)
router_logits = self.gate(x)
out = fused_moe(
x,
self.w13,
self.w2,
router_logits,
self.top_k,
renormalize=True,
inplace=True,
)
# Reduce sum
if self.process_group.size() > 1:
torch.distributed.all_reduce(out, group=self.process_group)
return out.view(*x.shape)
class DenseMoE(nn.Module):
def __init__(self, prefix, config: MixtralConfig, weights):
super().__init__()
self.hidden_dim = config.hidden_size
self.ffn_dim = config.intermediate_size // weights.process_group.size()
self.num_experts = config.num_local_experts
self.top_k = config.num_experts_per_tok
act = config.hidden_act
if "gelu" in act:
self.act = lambda x: torch.nn.functional.gelu(
x,
approximate=(
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
),
)
elif "silu" in act:
self.act = torch.nn.functional.silu
else:
self.act = ACT2FN[act]
# gating
self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False)
self.w1 = [
TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.experts.{i}.w1", weights=weights, bias=False
)
for i in range(self.num_experts)
]
self.w3 = [
TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.experts.{i}.w3", weights=weights, bias=False
)
for i in range(self.num_experts)
]
self.w2 = [
TensorParallelRowLinear.load(
config, prefix=f"{prefix}.experts.{i}.w2", weights=weights, bias=False
)
for i in range(self.num_experts)
]
self.process_group = weights.process_group
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
x: (sequence_length, model_dim)
gate_logits: (sequence_length, n_experts)
"""
# optional reshape
input_shape = x.shape
x = x.view(-1, input_shape[-1])
# gate_logits: (sequence_length, n_experts)
gate_logits = self.gate(x)
# all_probs: (sequence_length, n_experts) and upcast for softmax
all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float)
if self.top_k < self.num_experts:
_, not_selected_experts = torch.topk(
all_probs,
self.num_experts - self.top_k,
largest=False,
sorted=False,
dim=1,
)
# Mask not selected experts
all_probs.scatter_(1, not_selected_experts, 0)
# Re-normalize
weights = all_probs / all_probs.sum(dim=1, keepdim=True)
weights = weights.to(x.dtype)
# Final output tensor
out = x.new_zeros(x.shape[0], self.hidden_dim)
for i in range(self.num_experts):
h = self.act(self.w1[i](x)) * self.w3[i](x)
h = self.w2[i](h, reduce=False)
# Add expert output to out with masking
out += h * weights[:, i].view(-1, 1)
# Reduce sum
if self.process_group.size() > 1:
torch.distributed.all_reduce(out, group=self.process_group)
return out
class MixtralLayer(nn.Module):
def __init__(self, prefix: str, layer_id, config, weights):
super().__init__()
prefix = f"{prefix}.layers.{layer_id}"
self.self_attn = MixtralAttention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
moe_cls = BlockSparseMoE if config.quantize is None else DenseMoE
self.moe = moe_cls(f"{prefix}.block_sparse_moe", config, weights)
self.input_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
)
# faster post attention rms norm
normed_attn_res_output, attn_res = self.post_attention_layernorm(
attn_output, res
)
moe_output = self.moe(normed_attn_res_output)
return moe_output, attn_res
class MixtralModel(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
self.embed_tokens = TensorParallelEmbedding(
prefix=(
"model.embed_tokens" if not prefix else f"{prefix}.model.embed_tokens"
),
weights=weights,
)
self.layers = nn.ModuleList(
[
MixtralLayer(
"model" if not prefix else f"{prefix}.model",
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = FastRMSNorm.load(
prefix="model.norm" if not prefix else f"{prefix}.model.norm",
weights=weights,
eps=config.rms_norm_eps,
)
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
true_max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, true_max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashMixtralForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
self.model = MixtralModel(prefix, config, weights)
self.lm_head = SpeculativeHead.load(
config,
prefix="lm_head" if not prefix else f"{prefix}.lm_head",
weights=weights,
)
self.max_past = config.sliding_window
self.max_past_tensor = (
torch.tensor(config.sliding_window, device=weights.device)
if self.max_past is not None
else None
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> torch.Tensor:
true_max_s = max_s
if prefill_cache_indices is not None:
# Slots also need to be sliced as it has the same size as the whole kv tensor
slots = slots[prefill_cache_indices]
elif self.max_past is not None:
# Clamp in decode mode as paged attention requires clamped values whereas the flash attention
# kernel requires the true values
seqlen = seqlen.clamp(max=self.max_past_tensor)
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
true_max_s,
prefill_cache_indices,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits = self.lm_head(hidden_states)
return logits
|
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 10569
}
| 245
|
import torch
import torch.distributed
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
from mamba_ssm.ops.selective_scan_interface import selective_scan_fn
from torch import nn
from typing import Optional, Tuple, Any
from transformers.configuration_utils import PretrainedConfig
import torch.nn.functional as F
from text_generation_server.layers import (
SpeculativeHead,
TensorParallelEmbedding,
FastLinear,
)
from text_generation_server.layers.layernorm import FastRMSNorm
from einops import rearrange
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
import math
from dataclasses import dataclass
@dataclass
class InferenceParams:
"""Inference parameters that are passed to the main model in order
to efficienly calculate and store the context during inference."""
max_seqlen: int
max_batch_size: int
conv_states: torch.Tensor
ssm_states: torch.Tensor
seqlen_offset: int
class MambaConfig(PretrainedConfig):
def __init__(
self,
vocab_size=50280,
d_model=768,
d_state=16,
n_layer=32,
layer_norm_epsilon=1e-5,
tie_word_embeddings=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
expand=2,
dt_rank="auto",
**kwargs,
):
self.vocab_size = vocab_size
self.n_layer = n_layer
self.layer_norm_epsilon = layer_norm_epsilon
self.d_model = d_model
self.d_inner = d_model * 2
self.d_conv = 4
self.d_state = d_state
self.expand = expand
self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class MambaBlock(nn.Module):
def __init__(self, prefix, config, weights, layer_id):
super().__init__()
self.layer_id = layer_id
self.in_proj = FastLinear.load(config, f"{prefix}.in_proj", weights, bias=False)
self.x_proj = FastLinear.load(config, f"{prefix}.x_proj", weights, bias=False)
self.dt_proj = FastLinear.load(config, f"{prefix}.dt_proj", weights, bias=True)
self.dt_proj_no_bias = FastLinear.load(
config, f"{prefix}.dt_proj", weights, bias=False
)
self.out_proj = FastLinear.load(
config, f"{prefix}.out_proj", weights, bias=False
)
self.conv1d = FastLinear.load(config, f"{prefix}.conv1d", weights, bias=True)
self.negA = -torch.exp(weights.get_tensor(f"{prefix}.A_log").float())
self.D = weights.get_tensor(f"{prefix}.D")
self.activation = "silu"
self.dt_rank = config.dt_rank
self.d_state = config.d_state
self.d_conv = config.d_conv
self.act = nn.SiLU()
# inference_params
def forward(self, hidden_states: torch.Tensor, inference_params=None):
if inference_params.seqlen_offset > 0:
conv_state = inference_params.conv_states[self.layer_id]
ssm_state = inference_params.ssm_states[self.layer_id]
out, conv_state, ssm_state = self.step(hidden_states, conv_state, ssm_state)
return out, conv_state, ssm_state
_, seqlen, _ = hidden_states.shape
projected_states = self.in_proj(hidden_states).transpose(1, 2)
# assert projected_states.shape == [batch_size, 2 * dstate, seqlen], f"{projected_states.shape} [{batch_size}, {dstate}, {seqlen}]"
x, z = projected_states.chunk(2, dim=1)
conv_state = F.pad(x, (self.d_conv - seqlen, 0))
x = causal_conv1d_fn(
x=x,
weight=self.conv1d.weight.squeeze(1),
bias=self.conv1d.bias,
activation=self.activation,
)
# We're careful here about the layout, to avoid extra transposes.
# We want dt to have d as the slowest moving dimension
# and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d)
dt, B, C = torch.split(
x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1
)
dt = self.dt_proj.weight @ dt.t()
dt = rearrange(dt, "d (b l) -> b d l", l=seqlen)
B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous()
C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous()
y, last_state = selective_scan_fn(
x,
dt,
self.negA,
B,
C,
self.D.float(),
z=z,
delta_bias=self.dt_proj.bias.float(),
delta_softplus=True,
return_last_state=True,
)
y = rearrange(y, "b d l -> b l d")
attn_outputs = self.out_proj(y)
return attn_outputs, conv_state, last_state
def step(self, hidden_states, conv_state, ssm_state):
xz = self.in_proj(hidden_states.squeeze(1))
x, z = xz.chunk(2, dim=-1) # (B D)
x = causal_conv1d_update(
x,
conv_state,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
x_db = self.x_proj(x) # (B dt_rank+2*d_state)
dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1)
dt = F.linear(dt, self.dt_proj.weight)
A = self.negA
y = selective_state_update(
ssm_state,
x,
dt,
A,
B,
C,
self.D,
z=z,
dt_bias=self.dt_proj.bias,
dt_softplus=True,
)
out = self.out_proj(y)
return out.unsqueeze(1), conv_state.clone(), ssm_state.clone()
class ResidualBlock(nn.Module):
def __init__(self, prefix, config, weights, layer_id):
super().__init__()
self.mamba_block = MambaBlock(
prefix=f"{prefix}.mixer", config=config, weights=weights, layer_id=layer_id
)
self.layer_norm = FastRMSNorm.load(
prefix=f"{prefix}.norm", weights=weights, eps=config.layer_norm_epsilon
)
def forward(
self,
hidden_states: torch.Tensor,
residual: Optional[torch.Tensor] = None,
inference_params: Optional[Any] = None,
):
residual = (hidden_states + residual) if residual is not None else hidden_states
shape = residual.shape
hidden_states, _ = self.layer_norm(residual.view(-1, shape[-1]))
hidden_states, conv_state, last_ssm_state = self.mamba_block(
hidden_states.view(*shape), inference_params
)
return hidden_states, residual, conv_state, last_ssm_state
class MambaModel(nn.Module):
def __init__(self, config, weights):
super().__init__()
prefix = "backbone"
self.embed_tokens = TensorParallelEmbedding(f"{prefix}.embedding", weights)
self.blocks = nn.ModuleList(
[
ResidualBlock(f"{prefix}.layers.{i}", config, weights, layer_id=i)
for i in range(config.n_layer)
]
)
self.norm_f = FastRMSNorm.load(
f"{prefix}.norm_f", weights, eps=config.layer_norm_epsilon
)
self.lm_head = SpeculativeHead.load(config, f"{prefix}.embedding", weights)
self.config = config
def forward(
self, input_ids: torch.Tensor, inference_params=None, residual=None
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
hidden_states = self.embed_tokens(input_ids)
for i, block in enumerate(self.blocks):
hidden_states, residual, conv_state, ssm_state = block(
hidden_states, residual, inference_params
)
inference_params.conv_states[i].copy_(conv_state)
inference_params.ssm_states[i].copy_(ssm_state)
hidden_states = (
hidden_states + residual if residual is not None else hidden_states
)
hidden_states, _ = self.norm_f(hidden_states.view(-1, hidden_states.size(-1)))
hidden_states = hidden_states.view(residual.shape)
logits, speculative_logits = self.lm_head(hidden_states)
# update the offset for the next inference using these params
inference_params.seqlen_offset += input_ids.size(1)
return logits, speculative_logits
|
text-generation-inference/server/text_generation_server/models/custom_modeling/mamba_modeling.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/mamba_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 4108
}
| 246
|
import torch
import torch.distributed
import time
from dataclasses import dataclass
from opentelemetry import trace
from transformers import (
AutoTokenizer,
AutoModelForSeq2SeqLM,
PreTrainedTokenizerBase,
AutoConfig,
)
from typing import Optional, Tuple, List, Type, Dict
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
)
from text_generation_server.utils.chunks import concat_text_chunks
from text_generation_server.utils.quantization import get_loader
from text_generation_server.utils.tokens import batch_top_tokens
from text_generation_server.models import Model
from text_generation_server.models.types import (
GeneratedText,
Batch,
Generation,
Tokens,
)
from text_generation_server.pb import generate_pb2
from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling
tracer = trace.get_tracer(__name__)
@dataclass
class Seq2SeqLMBatch(Batch):
batch_id: int
requests: List[generate_pb2.Request]
requests_idx_mapping: Dict[int, int]
# Encoder values
input_ids: Optional[torch.Tensor]
attention_mask: torch.Tensor
# Decoder values
decoder_input_ids: torch.Tensor
decoder_attention_mask: Optional[torch.Tensor]
encoder_last_hidden_state: Optional[torch.Tensor]
# All tokens
all_decoder_input_ids: List[torch.Tensor]
# Seq2SeqLM keeps track of both encoder and decoder attention keys and values
past_key_values: Optional[List[Tuple]]
# Lengths of all generations present in the batch
input_lengths: List[int]
decoder_input_lengths: List[int]
prefix_offsets: List[int]
read_offsets: List[int]
# Generation helpers
next_token_choosers: List[NextTokenChooser]
stopping_criterias: List[StoppingCriteria]
top_n_tokens: List[int]
top_n_tokens_tensor: torch.Tensor
# Metadata used for padding
max_input_length: int
max_decoder_input_length: int
padding_right_offset: int
# Maximum number of tokens this batch will grow to
max_tokens: int
def to_pb(self) -> generate_pb2.CachedBatch:
"""Convert a Seq2SeqLMBatch to a text_generation_server.v1.CachedBatch protobuf"""
return generate_pb2.CachedBatch(
id=self.batch_id,
request_ids=[r.id for r in self.requests],
size=len(self),
max_tokens=self.max_tokens,
)
@classmethod
def from_pb(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
device: torch.device,
) -> "Seq2SeqLMBatch":
"""Convert a text_generation_server.v1.Batch protobuf to a Seq2SeqLMBatch"""
inputs = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
decoder_input_lengths = []
prefix_offsets = []
read_offsets = []
requests_idx_mapping = {}
# Parse batch
max_truncation = 0
padding_right_offset = 0
max_decode_tokens = 0
for i, r in enumerate(pb.requests):
inputs.append(concat_text_chunks(r.input_chunks.chunks))
requests_idx_mapping[r.id] = i
decoder_input_lengths.append(1)
next_token_choosers.append(
NextTokenChooser.from_pb(r.parameters, device, tokenizer)
)
stopping_criteria = StoppingCriteria.from_pb(
r.stopping_parameters, tokenizer
)
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(r.top_n_tokens)
max_truncation = max(max_truncation, r.truncate)
max_decode_tokens += stopping_criteria.max_new_tokens
padding_right_offset = max(
padding_right_offset, stopping_criteria.max_new_tokens
)
# Tokenize batch
tokenized_inputs = tokenizer(
inputs,
return_tensors="pt",
padding=True,
return_token_type_ids=False,
truncation=True,
max_length=max_truncation,
).to(device)
input_lengths = tokenized_inputs["attention_mask"].sum(1)
max_input_length = input_lengths.max()
# Decoder sequence only contains the bos_token
decoder_input_ids = (
torch.tensor(tokenizer.bos_token_id, device=device)
.repeat(len(pb.requests))
.view(-1, 1)
)
for _ in pb.requests:
prefix_offsets.append(0)
read_offsets.append(1)
all_decoder_input_ids = decoder_input_ids.view(-1).split(1)
top_n_tokens_tensor = torch.tensor(
top_n_tokens, device=device, dtype=torch.int64
)
max_tokens = len(inputs) * (max_input_length + max_decode_tokens)
return cls(
batch_id=pb.id,
requests=pb.requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=tokenized_inputs["input_ids"],
attention_mask=tokenized_inputs["attention_mask"],
decoder_input_ids=decoder_input_ids,
all_decoder_input_ids=list(all_decoder_input_ids),
decoder_attention_mask=None,
encoder_last_hidden_state=None,
past_key_values=None,
input_lengths=input_lengths.tolist(),
decoder_input_lengths=decoder_input_lengths,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length.item(),
max_decoder_input_length=1,
padding_right_offset=padding_right_offset,
max_tokens=max_tokens,
)
@tracer.start_as_current_span("filter")
def filter(self, request_ids: List[int]) -> Optional["Seq2SeqLMBatch"]:
if len(request_ids) == 0:
raise ValueError("Batch must have at least one request")
if len(request_ids) == len(self):
return self
keep_indices = []
# New values after filtering
requests_idx_mapping = {}
requests = []
input_lengths = []
decoder_input_lengths = []
prefix_offsets = []
read_offsets = []
all_decoder_input_ids = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
max_input_length = 0
max_decoder_input_length = 0
padding_right_offset = 0
total_remaining_decode_tokens = 0
for i, request_id in enumerate(request_ids):
idx = self.requests_idx_mapping[request_id]
requests_idx_mapping[request_id] = i
keep_indices.append(idx)
requests.append(self.requests[idx])
prefix_offsets.append(self.prefix_offsets[idx])
read_offsets.append(self.read_offsets[idx])
all_decoder_input_ids.append(self.all_decoder_input_ids[idx])
request_input_length = self.input_lengths[idx]
input_lengths.append(request_input_length)
max_input_length = max(max_input_length, request_input_length)
request_decoder_input_length = self.decoder_input_lengths[idx]
decoder_input_lengths.append(request_decoder_input_length)
max_decoder_input_length = max(
max_decoder_input_length, request_decoder_input_length
)
next_token_choosers.append(self.next_token_choosers[idx])
stopping_criteria = self.stopping_criterias[idx]
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(self.top_n_tokens[idx])
remaining_decode_tokens = (
stopping_criteria.max_new_tokens - stopping_criteria.current_tokens
)
total_remaining_decode_tokens += remaining_decode_tokens
padding_right_offset = max(padding_right_offset, remaining_decode_tokens)
# Apply indices to input_ids, attention mask, past key values and other items that need to be cached
self.decoder_input_ids = self.decoder_input_ids[keep_indices]
self.attention_mask = self.attention_mask[keep_indices, -max_input_length:]
if self.decoder_attention_mask is not None:
self.decoder_attention_mask = self.decoder_attention_mask[
keep_indices,
-(self.padding_right_offset + max_decoder_input_length) : (
self.decoder_attention_mask.shape[1] - self.padding_right_offset
)
+ padding_right_offset,
]
self.encoder_last_hidden_state = self.encoder_last_hidden_state[
keep_indices, -max_input_length:
]
# Ensure that past_key_values tensors can be updated in-place
if type(self.past_key_values[0]) is tuple:
self.past_key_values = [
[t for t in layer] for layer in self.past_key_values
]
decoder_past_seq_len = max_decoder_input_length - 1
for layer in self.past_key_values:
layer[0] = layer[0][keep_indices, :, -decoder_past_seq_len:]
layer[1] = layer[1][keep_indices, :, -decoder_past_seq_len:]
layer[2] = layer[2][keep_indices, :, -max_input_length:]
layer[3] = layer[3][keep_indices, :, -max_input_length:]
top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices]
max_tokens = (
len(request_ids) * (max_input_length + max_decoder_input_length)
+ remaining_decode_tokens
)
self.requests = requests
self.requests_idx_mapping = requests_idx_mapping
self.input_ids = None
self.all_decoder_input_ids = all_decoder_input_ids
self.input_lengths = input_lengths
self.decoder_input_lengths = decoder_input_lengths
self.prefix_offsets = prefix_offsets
self.read_offsets = read_offsets
self.next_token_choosers = next_token_choosers
self.stopping_criterias = stopping_criterias
self.top_n_tokens = top_n_tokens
self.top_n_tokens_tensor = top_n_tokens_tensor
self.max_input_length = max_input_length
self.max_decoder_input_length = max_decoder_input_length
self.padding_right_offset = padding_right_offset
self.max_tokens = max_tokens
return self
@classmethod
@tracer.start_as_current_span("concatenate")
def concatenate(cls, batches: List["Seq2SeqLMBatch"]) -> "Seq2SeqLMBatch":
"""Concatenate multiple batches together by padding internal torch tensors"""
# Used for padding
total_batch_size = 0
max_input_length = 0
max_decoder_input_length = 0
padding_right_offset = 0
for batch in batches:
total_batch_size += len(batch)
max_input_length = max(max_input_length, batch.max_input_length)
max_decoder_input_length = max(
max_decoder_input_length, batch.max_decoder_input_length
)
padding_right_offset = max(padding_right_offset, batch.padding_right_offset)
# Batch attributes
requests = []
requests_idx_mapping = {}
all_decoder_input_ids = []
input_lengths = []
decoder_input_lengths = []
prefix_offsets = []
read_offsets = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
max_tokens = 0
# Batch tensors
attention_mask = None
decoder_input_ids = None
decoder_attention_mask = None
encoder_last_hidden_state = None
top_n_tokens_tensor = None
past_key_values = []
# Used for slicing correctly inside the tensors
# Equivalent to a cumsum on batch sizes
start_index = 0
for i, batch in enumerate(batches):
# Extend all list attributes
requests.extend(batch.requests)
all_decoder_input_ids.extend(batch.all_decoder_input_ids)
input_lengths.extend(batch.input_lengths)
decoder_input_lengths.extend(batch.decoder_input_lengths)
prefix_offsets.extend(batch.prefix_offsets)
read_offsets.extend(batch.read_offsets)
next_token_choosers.extend(batch.next_token_choosers)
stopping_criterias.extend(batch.stopping_criterias)
top_n_tokens.extend(batch.top_n_tokens)
if i == 0:
requests_idx_mapping = batch.requests_idx_mapping
else:
# We need to offset the mapping for each batch by the cumulative batch size
for k, v in batch.requests_idx_mapping.items():
requests_idx_mapping[k] = v + start_index
# Slicing end index for this batch
end_index = start_index + len(batch)
# We only concatenate batches that did at least one step
if batch.encoder_last_hidden_state is None:
raise ValueError("Batch encoder_last_hidden_state cannot be None")
# Create padded tensor
if attention_mask is None:
attention_mask = batch.attention_mask.new_zeros(
(total_batch_size, max_input_length),
)
# Copy to correct indices
attention_mask[start_index:end_index, -batch.max_input_length :] = (
batch.attention_mask[:, -batch.max_input_length :]
)
# Create padded tensor
if decoder_input_ids is None:
decoder_input_ids = batch.decoder_input_ids.new_zeros(
(total_batch_size, 1),
)
# Copy to correct indices
decoder_input_ids[start_index:end_index] = batch.decoder_input_ids
# Create padded tensor
if decoder_attention_mask is None:
# As decoder_attention_mask might not exist, we use `batch.attention_mask` for device here
decoder_attention_mask = batch.attention_mask.new_zeros(
(total_batch_size, max_decoder_input_length + padding_right_offset),
)
# If the decoder mask does not exist yet, all generations started at the same time and we never concatenated
# this batch. All generations are of length `batch.max_decoder_input_length`.
left_offset = max_decoder_input_length - batch.max_decoder_input_length
if batch.decoder_attention_mask is None:
decoder_attention_mask[
start_index:end_index,
left_offset:-padding_right_offset,
] = 1
# If it exists, we need to index
else:
batch_left_offset = (
batch.decoder_attention_mask.shape[1]
- batch.max_decoder_input_length
- batch.padding_right_offset
)
decoder_attention_mask[
start_index:end_index,
left_offset:-padding_right_offset,
] = batch.decoder_attention_mask[
:,
batch_left_offset : -batch.padding_right_offset,
]
# Create padded tensor
if encoder_last_hidden_state is None:
encoder_last_hidden_state = batch.encoder_last_hidden_state.new_zeros(
(
total_batch_size,
max_input_length,
batch.encoder_last_hidden_state.shape[-1],
),
)
if top_n_tokens_tensor is None:
top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(
total_batch_size,
)
top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor
# Copy to correct indices
encoder_last_hidden_state[
start_index:end_index, -batch.max_input_length :, :
] = batch.encoder_last_hidden_state[:, -batch.max_input_length :, :]
batch.encoder_last_hidden_state = None
# Ensure that we can update tensors in-place
if isinstance(batch.past_key_values[0], tuple):
batch.past_key_values = [
[t for t in layer] for layer in batch.past_key_values
]
# Add eventual padding tokens that were added while concatenating
max_tokens += batch.max_tokens + (
max_input_length
- batch.max_input_length
+ max_decoder_input_length
- batch.max_decoder_input_length
) * len(batch)
start_index = end_index
# Determine shapes for new past kv tensors
first_past_kvs = batches[0].past_key_values
_, num_heads, _, head_dim = first_past_kvs[0][0].shape
padded_dec_t_shape = (
total_batch_size,
num_heads,
(max_decoder_input_length - 1),
head_dim,
)
padded_enc_t_shape = (
total_batch_size,
num_heads,
max_input_length,
head_dim,
)
# Iterate over attention layers
for j in range(len(first_past_kvs)):
past_key_values.append([])
# Decoder past
for k in range(0, 2):
# Initialize tensors
padded_past_values = first_past_kvs[j][k].new_zeros(padded_dec_t_shape)
past_key_values[j].append(padded_past_values)
start_index = 0
for batch in batches:
t = batch.past_key_values[j][k]
# Clear reference to the original tensor
batch.past_key_values[j][k] = None
# Slicing end index for this batch
end_index = start_index + len(batch)
# We slice the past keys and values to remove the padding from previous batches
past_seq_len = batch.max_decoder_input_length - 1
padded_past_values[start_index:end_index, :, -past_seq_len:, :] = t[
:, :, -past_seq_len:, :
]
del t
start_index = end_index
# Encoder past
for k in range(2, 4):
# Initialize tensors
padded_past_values = first_past_kvs[j][k].new_zeros(padded_enc_t_shape)
past_key_values[j].append(padded_past_values)
start_index = 0
for batch in batches:
t = batch.past_key_values[j][k]
# Clear reference to the original tensor
batch.past_key_values[j][k] = None
# Slicing end index for this batch
end_index = start_index + len(batch)
# We slice the past keys and values to remove the padding from previous batches
padded_past_values[
start_index:end_index, :, -batch.max_input_length :, :
] = t[:, :, -batch.max_input_length :, :]
del t
start_index = end_index
return cls(
batch_id=batches[0].batch_id,
requests=requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=None,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
all_decoder_input_ids=all_decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_last_hidden_state=encoder_last_hidden_state,
past_key_values=past_key_values,
input_lengths=input_lengths,
decoder_input_lengths=decoder_input_lengths,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length,
max_decoder_input_length=max_decoder_input_length,
padding_right_offset=padding_right_offset,
max_tokens=max_tokens,
)
def __len__(self):
return len(self.requests)
class Seq2SeqLM(Model):
def __init__(
self,
model_id: str,
model_class,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
default_dtype=torch.float16,
trust_remote_code: bool = False,
config_class=AutoConfig,
tokenizer_class=AutoTokenizer,
aliases=None,
):
self.quantize = quantize
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
dtype = default_dtype if dtype is None else dtype
elif SYSTEM == "ipex":
if hasattr(torch, "xpu") and torch.xpu.is_available():
device = torch.device(f"xpu:{rank}")
dtype = default_dtype if dtype is None else dtype
else:
device = torch.device("cpu")
# Float16 doesn't exist on target.
dtype = torch.bfloat16 if dtype is None else dtype
else:
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
config = config_class.from_pretrained(
model_id,
revision=revision,
trust_remote_code=trust_remote_code,
)
config.quantize = quantize
config.speculator = speculator
tokenizer = tokenizer_class.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
tokenizer.bos_token_id = config.decoder_start_token_id
weights_loader = get_loader(
quantize=quantize, model_id=model_id, revision=revision
)
torch.distributed.barrier(group=self.process_group)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(
filenames,
device=device,
dtype=dtype,
process_group=self.process_group,
aliases=aliases,
weights_loader=weights_loader,
)
if config.quantize in ["awq", "exl2", "gptq", "marlin"]:
weights._set_gptq_params(model_id, revision)
model = model_class(config, weights)
torch.distributed.barrier(group=self.process_group)
super().__init__(
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
rank=rank,
world_size=world_size,
)
@classmethod
def fallback(
cls,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
if speculator:
raise RuntimeError("Speculator decoding is not enabled for AutoModel")
if torch.cuda.is_available():
device = torch.device("cuda")
dtype = torch.float16 if dtype is None else dtype
else:
if quantize:
raise ValueError("quantization is not available on CPU")
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
model = AutoModelForSeq2SeqLM.from_pretrained(
model_id,
revision=revision,
torch_dtype=dtype,
device_map=(
"auto"
if torch.cuda.is_available() and torch.cuda.device_count() > 1
else None
),
load_in_8bit=quantize == "bitsandbytes",
trust_remote_code=trust_remote_code,
)
if torch.cuda.is_available() and torch.cuda.device_count() == 1:
model = model.cuda()
tokenizer = AutoTokenizer.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
tokenizer.bos_token_id = model.config.decoder_start_token_id
self = cls.__new__(
cls,
)
super().__init__(
self,
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
)
self.quantize = quantize
return self
@property
def batch_type(self) -> Type[Seq2SeqLMBatch]:
return Seq2SeqLMBatch
def forward(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask: Optional,
encoder_last_hidden_state: Optional,
past_key_values: Optional = None,
) -> Tuple[
torch.Tensor,
Optional[torch.Tensor],
torch.Tensor,
List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]],
]:
# Model Forward
outputs = self.model.forward(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_last_hidden_state,
past_key_values=past_key_values,
use_cache=True,
)
if isinstance(outputs, tuple):
# Our custom models
outputs, speculative_logits = outputs
else:
# Generic transformers models
speculative_logits = None
return (
outputs.logits,
speculative_logits,
outputs.encoder_last_hidden_state,
outputs.past_key_values,
)
@tracer.start_as_current_span("generate_token")
def generate_token(
self, batch: Seq2SeqLMBatch
) -> Tuple[List[Generation], Optional[Seq2SeqLMBatch], Tuple[int, int]]:
start = time.time_ns()
if batch.decoder_attention_mask is not None:
# slice to the correct shape
decoder_attention_mask = batch.decoder_attention_mask[
:, : -batch.padding_right_offset
]
else:
decoder_attention_mask = None
# Wrap `encoder_last_hidden_state` because for some reason, Transformers does a `encoder_last_hidden_state[0]`
# internally...
if batch.encoder_last_hidden_state is not None:
encoder_last_hidden_state = [batch.encoder_last_hidden_state]
else:
encoder_last_hidden_state = None
logits, speculative_logits, encoder_last_hidden_state, past = self.forward(
batch.input_ids,
batch.attention_mask,
batch.decoder_input_ids,
decoder_attention_mask,
encoder_last_hidden_state,
batch.past_key_values,
)
# Speculation is not active for seq2seq
accepted_ids = torch.ones_like(batch.decoder_input_ids)[:, 0]
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
batch.top_n_tokens,
batch.top_n_tokens_tensor,
torch.log_softmax(logits[:, -1], -1),
accepted_ids,
)
start_decode = time.time_ns()
# Finished requests
generations: List[Generation] = []
stopped = True
# Zipped iterator
iterator = zip(
batch.requests,
batch.input_lengths,
batch.prefix_offsets,
batch.read_offsets,
batch.decoder_input_lengths,
logits,
batch.next_token_choosers,
batch.stopping_criterias,
batch.all_decoder_input_ids,
batch.top_n_tokens,
batch_top_token_ids,
batch_top_token_logprobs,
)
# For each member of the batch
for i, (
request,
input_length,
prefix_offset,
read_offset,
decoder_input_length,
logits,
next_token_chooser,
stopping_criteria,
all_decoder_input_ids,
top_n_tokens,
top_token_ids,
top_token_logprobs,
) in enumerate(iterator):
# Select next token
next_token_id, logprobs = next_token_chooser(
all_decoder_input_ids.view(1, -1), logits[-1:, :]
)
# Append next token to decoder tokens
all_decoder_input_ids = torch.cat(
[all_decoder_input_ids, next_token_id.squeeze(1)]
)
new_decoder_input_length = decoder_input_length + 1
# Generated token
next_token_logprob = logprobs[-1, next_token_id]
next_token_id_squeezed = next_token_id.squeeze()
next_token_text, prefix_offset, read_offset = self.decode_token(
all_decoder_input_ids, prefix_offset, read_offset
)
# Evaluate stopping criteria
stop, reason = stopping_criteria(next_token_id, next_token_text)
if not stop:
stopped = False
# Shard generations
# All generations will be appended in the rust sharded client
if i % self.world_size == self.rank:
if stop:
# Slice with decoder_input_length to remove padding
# Decode all tokens
output_text, _, _ = self.decode_token(
all_decoder_input_ids,
prefix_offset=len(all_decoder_input_ids)
- decoder_input_length
- 1,
read_offset=len(all_decoder_input_ids) - decoder_input_length,
skip_special_tokens=True,
)
# Get seed
if isinstance(next_token_chooser.choice, Sampling):
seed = next_token_chooser.choice.seed
else:
seed = None
generated_text = GeneratedText(
output_text, stopping_criteria.current_tokens, reason, seed
)
else:
generated_text = None
# Prefill
if stopping_criteria.current_tokens == 1 and request.prefill_logprobs:
prefill_tokens = Tokens(
[self.tokenizer.bos_token_id],
[float("nan")],
[self.tokenizer.bos_token],
[False],
)
else:
prefill_tokens = None
if top_n_tokens > 0:
all_top_tokens = []
for top_token_ids, top_token_logprobs in zip(
top_token_ids, top_token_logprobs
):
toptoken_texts = self.tokenizer.batch_decode(
top_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
special_toptokens = [
token_id in self.all_special_ids
for token_id in top_token_ids
]
top_tokens = Tokens(
top_token_ids,
top_token_logprobs,
toptoken_texts,
special_toptokens,
)
all_top_tokens.append(top_tokens)
top_tokens = all_top_tokens
else:
top_tokens = None
generation = Generation(
request.id,
prefill_tokens,
Tokens(
[next_token_id_squeezed],
[next_token_logprob],
[next_token_text],
[next_token_id_squeezed.item() in self.all_special_ids],
),
generated_text,
top_tokens,
)
generations.append(generation)
# Update values
batch.next_token_choosers[i] = batch.next_token_choosers[i].advance_grammar(
next_token_id_squeezed.item()
)
batch.decoder_input_ids[i] = next_token_id
batch.all_decoder_input_ids[i] = all_decoder_input_ids
batch.input_lengths[i] = input_length
batch.decoder_input_lengths[i] = new_decoder_input_length
batch.prefix_offsets[i] = prefix_offset
batch.read_offsets[i] = read_offset
batch.max_input_length = max(batch.max_input_length, input_length)
batch.max_decoder_input_length = max(
batch.max_decoder_input_length, new_decoder_input_length
)
# We finished all generations in the batch; there is no next batch
if stopped:
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, None, (forward_ns, decode_ns)
# We don't need input_ids after the prefill forward
batch.input_ids = None
batch.encoder_last_hidden_state = encoder_last_hidden_state
batch.past_key_values = past
# Update decoder_attention_mask as we added a new token to input_ids
if batch.decoder_attention_mask is not None:
batch.decoder_attention_mask[:, -batch.padding_right_offset] = 1
batch.padding_right_offset -= 1
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, batch, (forward_ns, decode_ns)
|
text-generation-inference/server/text_generation_server/models/seq2seq_lm.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/models/seq2seq_lm.py",
"repo_id": "text-generation-inference",
"token_count": 17934
}
| 247
|
# coding=utf-8
# From: https://github.com/huggingface/peft/pull/1364
# Copyright 2024-present the HuggingFace Inc. team.
# Modifications by Predibase, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Literal
import torch
def magnitude_based_pruning(tensor: torch.Tensor, density: float) -> torch.Tensor:
"""
Prune the smallest values of the task tensors and retain the top-k values based on the specified fraction
`density`.
Args:
tensor (`torch.Tensor`):The tensor to prune.
density (`float`):The fraction of values to preserve. Should be in [0,1].
"""
mask = torch.zeros_like(tensor).reshape(-1)
k = int(density * tensor.reshape(-1).shape[0])
top_k = torch.topk(tensor.abs().reshape(-1), k=k, largest=True)
mask[top_k[1]] = 1
return tensor * mask.reshape(tensor.shape)
def random_pruning(tensor: torch.Tensor, density: float, rescale: bool) -> torch.Tensor:
"""
Prune the smallest values of the task tensors and retain the top-k values based on the specified fraction
`density`.
Args:
tensor (`torch.Tensor`):The tensor to prune.
density (`float`):The fraction of values to preserve. Should be in [0,1].
rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor.
"""
mask = torch.bernoulli(torch.full_like(input=tensor, fill_value=density))
pruned_tensor = tensor * mask
if rescale:
torch.div(input=pruned_tensor, other=density)
return pruned_tensor
def prune(
tensor: torch.Tensor,
density: float,
method: Literal["magnitude", "random"],
rescale: bool = False,
) -> torch.Tensor:
"""
Prune the values of task tensors based on the `method`.
Args:
tensor (`torch.Tensor`):The tensor to prune.
density (`float`):The fraction of values to preserve. Should be in [0,1].
method (`str`):The method to use to prune. Should be one of ["magnitude", "random"].
rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor.
"""
if density >= 1:
return tensor
elif density < 0:
raise ValueError("Density should be >= 0, got {density}")
if method == "magnitude":
return magnitude_based_pruning(tensor, density)
elif method == "random":
return random_pruning(tensor, density, rescale=rescale)
else:
raise ValueError(f"Unknown method {method}")
def calculate_majority_sign_mask(
tensor: torch.Tensor, method: Literal["total", "frequency"] = "total"
):
"""
Get the mask of the majority sign across the task tensors. Task tensors are stacked on dimension 0.
Args:
tensor (`torch.Tensor`):The tensor to get the mask from.
method (`str`):The method to use to get the mask. Should be one of ["total", "frequency"].
"""
sign = tensor.sign()
if method == "total":
sign_magnitude = (sign * tensor.abs()).sum(dim=0)
elif method == "frequency":
sign_magnitude = sign.sum(dim=0)
else:
raise RuntimeError(f'Unimplemented mask method "{method}"')
majority_sign = torch.where(sign_magnitude >= 0, 1, -1)
return sign == majority_sign
def disjoint_merge(task_tensors, majority_sign_mask):
mixed_task_tensors = (task_tensors * majority_sign_mask).sum(dim=0)
num_params_preserved = majority_sign_mask.sum(dim=0)
return mixed_task_tensors / torch.clamp(num_params_preserved, min=1.0)
|
text-generation-inference/server/text_generation_server/utils/merges/utils.py/0
|
{
"file_path": "text-generation-inference/server/text_generation_server/utils/merges/utils.py",
"repo_id": "text-generation-inference",
"token_count": 1422
}
| 248
|
{
"name": "tokenizers-win32-arm64-msvc",
"version": "0.13.4-rc1",
"os": [
"win32"
],
"cpu": [
"arm64"
],
"main": "tokenizers.win32-arm64-msvc.node",
"files": [
"tokenizers.win32-arm64-msvc.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
}
|
tokenizers/bindings/node/npm/win32-arm64-msvc/package.json/0
|
{
"file_path": "tokenizers/bindings/node/npm/win32-arm64-msvc/package.json",
"repo_id": "tokenizers",
"token_count": 277
}
| 249
|
extern crate tokenizers as tk;
use crate::models::Model;
use napi::bindgen_prelude::*;
use std::sync::{Arc, RwLock};
use tokenizers::models::bpe::{BpeBuilder, BPE};
use tokenizers::models::wordlevel::{WordLevel, WordLevelBuilder};
use tokenizers::models::wordpiece::{WordPiece, WordPieceBuilder};
pub struct BPEFromFilesTask {
pub(crate) builder: Option<BpeBuilder>,
}
impl Task for BPEFromFilesTask {
type Output = BPE;
type JsValue = Model;
fn compute(&mut self) -> Result<Self::Output> {
self
.builder
.take()
.ok_or(Error::from_reason("Empty builder".to_string()))?
.build()
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(Model {
model: Some(Arc::new(RwLock::new(output.into()))),
})
}
}
pub struct WordPieceFromFilesTask {
pub(crate) builder: Option<WordPieceBuilder>,
}
impl Task for WordPieceFromFilesTask {
type Output = WordPiece;
type JsValue = Model;
fn compute(&mut self) -> Result<Self::Output> {
self
.builder
.take()
.ok_or(Error::from_reason("Empty builder".to_string()))?
.build()
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(Model {
model: Some(Arc::new(RwLock::new(output.into()))),
})
}
}
pub struct WordLevelFromFilesTask {
pub(crate) builder: Option<WordLevelBuilder>,
}
impl Task for WordLevelFromFilesTask {
type Output = WordLevel;
type JsValue = Model;
fn compute(&mut self) -> Result<Self::Output> {
self
.builder
.take()
.ok_or(Error::from_reason("Empty builder".to_string()))?
.build()
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(Model {
model: Some(Arc::new(RwLock::new(output.into()))),
})
}
}
|
tokenizers/bindings/node/src/tasks/models.rs/0
|
{
"file_path": "tokenizers/bindings/node/src/tasks/models.rs",
"repo_id": "tokenizers",
"token_count": 800
}
| 250
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.