sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
huggingface/peft:src/peft/tuners/cartridge/config.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from peft.config import PromptLearningConfig
from peft.utils import PeftType
@dataclass
class CartridgeConfig(PromptLearningConfig):
"""
Configuration for CARTRIDGE, a KV-cache-parameterized prefix adapter.
This is similar to prefix-tuning in how it is served (as `past_key_values`), but it stores the KV cache directly as
trainable parameters instead of learning it via an MLP projection.
Initialization:
The Cartridges paper discusses multiple initialization options. In PEFT, initialization is a *separate* step
from constructing the adapter config:
- **Random KV initialization (paper option 2)**: Create the adapter via `get_peft_model(...)`. The CARTRIDGE
prompt encoder parameters are randomly initialized by PyTorch.
- **KV derived from the first tokens of a prompt/corpus (paper option 3)**: Run a no-grad prefill on the *base
model* and copy the first `num_virtual_tokens` cached KV tokens into the adapter. PEFT provides utilities for
this (importable from `peft` or from `peft.tuners.cartridge.utils`):
- `initialize_kv_prefix_from_text(model, tokenizer, text=...)`
- `initialize_kv_prefix_from_past_key_values(model, past_key_values=...)`
If you already have a flattened KV-prefix tensor, you can load it directly via the prompt encoder’s
`load_prompt_embeddings(...)` method.
Args:
num_frozen_tokens (`int`, defaults to 1):
Number of *prefix* tokens at the start of the cartridge to keep frozen (no gradients). The Cartridges paper
recommends freezing the first token as an attention sink for stability (set this to `1`), as many LLMs use
early tokens as attention sinks and changing them can harm training.
"""
num_frozen_tokens: int = field(
default=1,
metadata={
"help": (
"Number of initial virtual tokens to freeze (no gradients). The paper recommends freezing the first "
"token as an attention sink for stability."
)
},
)
def __post_init__(self):
super().__post_init__()
if self.num_frozen_tokens < 0:
raise ValueError(f"`num_frozen_tokens` must be >= 0, got {self.num_frozen_tokens}.")
# `num_virtual_tokens` is required for prompt-learning configs. Validate the relationship early for a clearer
# error, even if the encoder also checks it.
if (self.num_virtual_tokens is not None) and (self.num_frozen_tokens > self.num_virtual_tokens):
raise ValueError(
f"`num_frozen_tokens` must be <= `num_virtual_tokens`, got num_frozen_tokens={self.num_frozen_tokens} "
f"and num_virtual_tokens={self.num_virtual_tokens}."
)
self.peft_type = PeftType.CARTRIDGE
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/cartridge/config.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/cartridge/model.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import torch
class CartridgeEncoder(torch.nn.Module):
"""
A parameterized prefix KV cache.
The parameters are stored in the same flattened layout as `PrefixEncoder` output: `[num_virtual_tokens, num_layers
* 2 * token_dim]`, where `token_dim` is per-head hidden size times number of heads (after any GQA adjustment
performed by `_prepare_prompt_learning_config`).
If `num_frozen_tokens > 0`, the first `num_frozen_tokens` virtual tokens are stored as a non-trainable parameter,
and the remaining tokens are trainable.
"""
def __init__(self, config):
super().__init__()
self.config = config
num_virtual_tokens = config.num_virtual_tokens
hidden = config.num_layers * 2 * config.token_dim
num_frozen_tokens = int(config.num_frozen_tokens)
if num_frozen_tokens < 0 or num_frozen_tokens > num_virtual_tokens:
raise ValueError(
f"`num_frozen_tokens` must be in [0, num_virtual_tokens], got {num_frozen_tokens} for "
f"num_virtual_tokens={num_virtual_tokens}."
)
self.num_frozen_tokens = num_frozen_tokens
self.num_trainable_tokens = num_virtual_tokens - num_frozen_tokens
if self.num_frozen_tokens:
frozen = torch.empty(self.num_frozen_tokens, hidden)
self.frozen_embedding = torch.nn.Parameter(frozen, requires_grad=False)
else:
self.frozen_embedding = None
trainable = torch.empty(self.num_trainable_tokens, hidden)
self.trainable_embedding = torch.nn.Parameter(trainable, requires_grad=not config.inference_mode)
self.reset_parameters()
@property
def embedding(self):
"""
Expose a prefix-encoder compatible interface (`.embedding.weight`) for PEFT internals.
"""
class _Proxy(torch.nn.Module):
def __init__(self, parent: CartridgeEncoder):
super().__init__()
self._parent = parent
@property
def weight(self):
return self._parent.weight
return _Proxy(self)
@property
def weight(self) -> torch.Tensor:
if self.frozen_embedding is None:
return self.trainable_embedding
return torch.cat([self.frozen_embedding, self.trainable_embedding], dim=0)
def reset_parameters(self):
# Match `torch.nn.Embedding` initialization (normal with std=1).
with torch.no_grad():
if self.frozen_embedding is not None:
torch.nn.init.normal_(self.frozen_embedding)
torch.nn.init.normal_(self.trainable_embedding)
def load_prompt_embeddings(self, prompt_embeddings: torch.Tensor) -> None:
"""
Load the flattened prompt embeddings saved by PEFT (`prompt_embeddings`).
PEFT saves prompt-learning adapters as a single `prompt_embeddings` tensor. For CARTRIDGE, we split that tensor
into frozen and trainable segments according to `self.num_frozen_tokens`.
"""
if prompt_embeddings.ndim != 2 or prompt_embeddings.shape[0] != (
self.num_frozen_tokens + self.num_trainable_tokens
):
raise ValueError(
"Invalid `prompt_embeddings` shape. Expected "
f"({self.num_frozen_tokens + self.num_trainable_tokens}, hidden), got {tuple(prompt_embeddings.shape)}."
)
with torch.no_grad():
if self.frozen_embedding is not None:
self.frozen_embedding.copy_(
prompt_embeddings[: self.num_frozen_tokens].to(self.frozen_embedding.device)
)
trainable_part = prompt_embeddings[self.num_frozen_tokens :]
else:
trainable_part = prompt_embeddings
self.trainable_embedding.copy_(trainable_part.to(self.trainable_embedding.device))
def forward(self, prefix_tokens: torch.Tensor) -> torch.Tensor:
batch_size = prefix_tokens.shape[0]
# Ignore token ids; they exist for prompt-learning uniformity.
return self.weight.unsqueeze(0).expand(batch_size, -1, -1)
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/cartridge/model.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/cartridge/utils.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Sequence
from dataclasses import replace
from pathlib import Path
from typing import Any, Optional
import torch
from safetensors.torch import save_file
from peft.config import PeftConfig
from peft.utils import PeftType
from peft.utils.constants import SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME
from peft.utils.save_and_load import load_peft_weights
def _to_legacy_past_key_values(past_key_values: Any):
# Support both legacy tuples and transformers.Cache-like objects.
if isinstance(past_key_values, (tuple, list)):
return past_key_values
to_legacy = getattr(past_key_values, "to_legacy_cache", None)
if callable(to_legacy):
return to_legacy()
if hasattr(past_key_values, "__iter__"):
legacy = list(past_key_values)
if legacy and isinstance(legacy[0], (tuple, list)) and len(legacy[0]) >= 2:
return [(layer[0], layer[1]) for layer in legacy]
raise TypeError(
"Unsupported `past_key_values` type. Expected a legacy tuple/list, an object with `to_legacy_cache()`, or an "
"iterable of (key, value) tuples."
)
def prompt_embeddings_from_past_key_values(
past_key_values: Any,
*,
num_virtual_tokens: int,
) -> torch.Tensor:
"""
Convert a (legacy) `past_key_values` cache into the flattened prompt embeddings tensor saved by PEFT.
The output matches the layout expected by `PeftModel.get_prompt()` for prefix-style prompt learning: shape
`[num_virtual_tokens, num_layers * 2 * token_dim]`.
"""
legacy = _to_legacy_past_key_values(past_key_values)
if len(legacy) == 0:
raise ValueError("Empty `past_key_values`.")
# Each layer: (key, value), where key/value are [batch, num_heads, seq_len, head_dim]
num_layers = len(legacy)
key0, value0 = legacy[0]
if key0.ndim != 4:
raise ValueError(f"Expected key/value tensors with rank 4, got key.ndim={key0.ndim}.")
if key0.shape[0] != 1:
raise ValueError(
"This helper expects `past_key_values` from a single-sequence prefill (batch=1). "
f"Got batch={key0.shape[0]}."
)
num_heads = key0.shape[1]
seq_len = key0.shape[2]
head_dim = key0.shape[3]
if seq_len < num_virtual_tokens:
raise ValueError(f"Need at least {num_virtual_tokens} cached tokens, got {seq_len}.")
packed = torch.empty(
num_virtual_tokens,
num_layers * 2,
num_heads,
head_dim,
device=key0.device,
dtype=key0.dtype,
)
for layer_idx, (k, v) in enumerate(legacy):
if k.shape[:2] != (1, num_heads) or v.shape[:2] != (1, num_heads):
raise ValueError("Inconsistent head shapes across layers in `past_key_values`.")
if k.shape[2] < num_virtual_tokens or v.shape[2] < num_virtual_tokens:
raise ValueError("Not enough cached tokens in `past_key_values` for the requested cartridge length.")
if k.shape[3] != head_dim or v.shape[3] != head_dim:
raise ValueError("Inconsistent head_dim across layers in `past_key_values`.")
packed[:, 2 * layer_idx] = k[0, :, :num_virtual_tokens, :].transpose(0, 1).contiguous()
packed[:, 2 * layer_idx + 1] = v[0, :, :num_virtual_tokens, :].transpose(0, 1).contiguous()
return packed.reshape(num_virtual_tokens, -1)
@torch.no_grad()
def initialize_kv_prefix_from_past_key_values(
model,
*,
adapter_name: Optional[str] = None,
past_key_values: Any,
num_virtual_tokens: Optional[int] = None,
) -> torch.Tensor:
"""
Initialize a KV-prefix prompt-learning adapter from an existing cached prefix (`past_key_values`).
Returns the prompt embeddings tensor that was loaded into the adapter.
"""
if adapter_name is None:
adapter_name = model.active_adapter
config = model.peft_config[adapter_name]
if config.peft_type not in (PeftType.CARTRIDGE, PeftType.PREFIX_TUNING):
raise ValueError(
f"Adapter '{adapter_name}' must be a CARTRIDGE or PREFIX_TUNING adapter (got {config.peft_type})."
)
if getattr(config, "prefix_projection", False):
raise ValueError(
"Initialization from KV cache is not supported for prefix tuning with `prefix_projection=True`."
)
if num_virtual_tokens is None:
num_virtual_tokens = config.num_virtual_tokens
prompt_embeddings = prompt_embeddings_from_past_key_values(past_key_values, num_virtual_tokens=num_virtual_tokens)
model.prompt_encoder[adapter_name].load_prompt_embeddings(prompt_embeddings)
return prompt_embeddings
@torch.no_grad()
def initialize_kv_prefix_from_text(
model,
tokenizer,
*,
text: str,
adapter_name: Optional[str] = None,
num_virtual_tokens: Optional[int] = None,
use_chat_template: bool = True,
max_length: Optional[int] = None,
) -> torch.Tensor:
"""
Convenience initializer: prefill the base model on `text` and load the resulting cache prefix into the adapter.
"""
if adapter_name is None:
adapter_name = model.active_adapter
config = model.peft_config[adapter_name]
if config.peft_type not in (PeftType.CARTRIDGE, PeftType.PREFIX_TUNING):
raise ValueError(
f"Adapter '{adapter_name}' must be a CARTRIDGE or PREFIX_TUNING adapter (got {config.peft_type})."
)
if getattr(config, "prefix_projection", False):
raise ValueError(
"Initialization from KV cache is not supported for prefix tuning with `prefix_projection=True`."
)
if num_virtual_tokens is None:
num_virtual_tokens = config.num_virtual_tokens
def _tokenize_plain():
toks = tokenizer(text, return_tensors="pt", truncation=max_length is not None, max_length=max_length)
return toks["input_ids"]
if use_chat_template and hasattr(tokenizer, "apply_chat_template"):
try:
input_ids = tokenizer.apply_chat_template(
[{"role": "system", "content": text}],
tokenize=True,
add_generation_prompt=False,
return_dict=False,
return_tensors="pt",
)
except (TypeError, ValueError):
# Some tokenizers don't support the full signature or do not define a chat template.
input_ids = _tokenize_plain()
else:
if max_length is not None and input_ids.shape[1] > max_length:
input_ids = input_ids[:, :max_length]
else:
input_ids = _tokenize_plain()
input_ids = input_ids.to(model.device)
attention_mask = torch.ones_like(input_ids)
with model.disable_adapter():
outputs = model(input_ids=input_ids, attention_mask=attention_mask, use_cache=True)
return initialize_kv_prefix_from_past_key_values(
model,
adapter_name=adapter_name,
past_key_values=outputs.past_key_values,
num_virtual_tokens=num_virtual_tokens,
)
def compose_cartridge_adapters(
adapter_paths: Sequence[str | Path],
*,
output_path: str | Path,
safe_serialization: bool = True,
) -> None:
"""
Compose multiple CARTRIDGE adapters by concatenating their prompt embeddings.
This implements the paper's "composition via concatenation" behavior at the adapter level (no runtime
multi-adapter).
"""
adapter_paths = [Path(p) for p in adapter_paths]
if len(adapter_paths) < 2:
raise ValueError("Need at least 2 adapters to compose.")
configs = [PeftConfig.from_pretrained(str(p)) for p in adapter_paths]
for p, cfg in zip(adapter_paths, configs):
if cfg.peft_type != PeftType.CARTRIDGE:
raise ValueError(f"Adapter at '{p}' is not a CARTRIDGE adapter (got {cfg.peft_type}).")
base = configs[0]
for cfg in configs[1:]:
for attr in ("task_type", "token_dim", "num_layers", "num_attention_heads", "num_transformer_submodules"):
if getattr(cfg, attr, None) != getattr(base, attr, None):
raise ValueError(f"Incompatible CARTRIDGE configs for attribute '{attr}'.")
weights = [load_peft_weights(str(p), device="cpu") for p in adapter_paths]
prompt_embeddings = [w["prompt_embeddings"] for w in weights]
composed = torch.cat(prompt_embeddings, dim=0)
num_virtual_tokens = composed.shape[0]
# Preserve the "frozen prefix tokens" count of the first adapter only (matches a single attention-sink prefix).
num_frozen_tokens = base.num_frozen_tokens
out_cfg = replace(base, num_virtual_tokens=num_virtual_tokens, num_frozen_tokens=num_frozen_tokens)
output_path = Path(output_path)
output_path.mkdir(parents=True, exist_ok=True)
out_cfg.save_pretrained(str(output_path))
if safe_serialization:
save_file({"prompt_embeddings": composed}, str(output_path / SAFETENSORS_WEIGHTS_NAME))
else:
torch.save({"prompt_embeddings": composed}, str(output_path / WEIGHTS_NAME))
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/cartridge/utils.py",
"license": "Apache License 2.0",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:tests/test_cartridge.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import pytest
import torch
from transformers import AutoModelForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
from peft import (
CartridgeConfig,
PeftConfig,
PeftModel,
compose_cartridge_adapters,
get_peft_model,
initialize_kv_prefix_from_past_key_values,
load_peft_weights,
prompt_embeddings_from_past_key_values,
)
from peft.tuners import PrefixTuningConfig
from .testing_utils import hub_online_once
TINY_CAUSAL_LM = "peft-internal-testing/tiny-random-OPTForCausalLM"
@pytest.fixture
def model_id():
return TINY_CAUSAL_LM
@pytest.fixture
def base_model(model_id):
with hub_online_once(model_id):
return AutoModelForCausalLM.from_pretrained(model_id)
def test_cartridge_offsets_position_ids_in_forward(monkeypatch, base_model):
base = base_model
peft_config = CartridgeConfig(num_virtual_tokens=4, num_frozen_tokens=1, task_type="CAUSAL_LM")
model = get_peft_model(base, peft_config)
captured = {}
def fake_forward(*args, **kwargs):
captured["position_ids"] = kwargs.get("position_ids")
input_ids = kwargs.get("input_ids")
if input_ids is None and args:
input_ids = args[0]
batch, seq_len = input_ids.shape
logits = torch.zeros((batch, seq_len, base.config.vocab_size), device=input_ids.device)
return CausalLMOutputWithPast(logits=logits)
monkeypatch.setattr(model.base_model, "forward", fake_forward)
input_ids = torch.randint(0, base.config.vocab_size, (1, 3))
position_ids = torch.arange(input_ids.shape[1]).unsqueeze(0)
_ = model(input_ids=input_ids, position_ids=position_ids)
assert captured["position_ids"] is not None
assert torch.equal(captured["position_ids"], position_ids + peft_config.num_virtual_tokens)
def test_cartridge_prefill_4d_mask_uses_cache_position(monkeypatch, base_model):
base = base_model
peft_config = CartridgeConfig(num_virtual_tokens=4, num_frozen_tokens=1, task_type="CAUSAL_LM")
model = get_peft_model(base, peft_config)
captured = {}
def fake_create_attention_mask(
model,
*,
model_input,
attention_mask,
past_key_values,
cache_position,
batch_size,
sequence_length,
position_ids,
):
captured["cache_position"] = cache_position
return attention_mask
monkeypatch.setattr("peft.peft_model.create_attention_mask", fake_create_attention_mask)
input_ids = torch.randint(0, base.config.vocab_size, (1, 2))
attention_mask_4d = torch.ones((1, 1, input_ids.shape[1], input_ids.shape[1]))
cache_position = torch.arange(input_ids.shape[1])
def fake_prepare_inputs_for_generation(*args, **kwargs):
return {
"input_ids": input_ids,
"attention_mask": attention_mask_4d,
"cache_position": cache_position,
"past_key_values": None,
}
model.base_model_prepare_inputs_for_generation = fake_prepare_inputs_for_generation
_ = model.prepare_inputs_for_generation(input_ids)
assert captured["cache_position"] is not None
assert torch.equal(captured["cache_position"], cache_position)
@pytest.mark.parametrize("num_frozen_tokens", [0, 2])
def test_cartridge_forward_and_save_load(tmp_path, num_frozen_tokens, base_model, model_id):
base = base_model
peft_config = CartridgeConfig(num_virtual_tokens=4, num_frozen_tokens=num_frozen_tokens, task_type="CAUSAL_LM")
model = get_peft_model(base, peft_config)
assert model.active_peft_config.peft_type.value == "CARTRIDGE"
if num_frozen_tokens:
assert model.prompt_encoder[model.active_adapter].frozen_embedding is not None
assert model.prompt_encoder[model.active_adapter].frozen_embedding.requires_grad is False
else:
assert model.prompt_encoder[model.active_adapter].frozen_embedding is None
assert model.prompt_encoder[model.active_adapter].trainable_embedding.requires_grad is True
input_ids = torch.randint(0, base.config.vocab_size, (1, 8))
out = model(input_ids=input_ids)
assert out.logits.shape[:2] == (1, 8)
model.prompt_encoder[model.active_adapter].trainable_embedding.data.fill_(3.0)
if num_frozen_tokens:
model.prompt_encoder[model.active_adapter].frozen_embedding.data.fill_(7.0)
model.save_pretrained(tmp_path)
with hub_online_once(model_id):
base2 = AutoModelForCausalLM.from_pretrained(model_id)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
loaded = PeftModel.from_pretrained(base2, tmp_path)
assert not any("Found missing adapter keys" in str(warning.message) for warning in w)
out2 = loaded(input_ids=input_ids)
assert out2.logits.shape == out.logits.shape
assert torch.allclose(
loaded.prompt_encoder[loaded.active_adapter].trainable_embedding,
torch.full_like(loaded.prompt_encoder[loaded.active_adapter].trainable_embedding, 3.0),
)
if num_frozen_tokens:
assert torch.allclose(
loaded.prompt_encoder[loaded.active_adapter].frozen_embedding,
torch.full_like(loaded.prompt_encoder[loaded.active_adapter].frozen_embedding, 7.0),
)
else:
assert loaded.prompt_encoder[loaded.active_adapter].frozen_embedding is None
def test_cartridge_init_from_past_key_values_and_compose(tmp_path, base_model, model_id):
base = base_model
peft_config = CartridgeConfig(num_virtual_tokens=4, num_frozen_tokens=1, task_type="CAUSAL_LM")
model = get_peft_model(base, peft_config)
# Prefill on the *base* model and use the cache prefix as initialization.
input_ids = torch.randint(0, base.config.vocab_size, (1, 12))
with model.disable_adapter():
outputs = model(input_ids=input_ids, use_cache=True)
prompt_embeddings = initialize_kv_prefix_from_past_key_values(
model, past_key_values=outputs.past_key_values, num_virtual_tokens=4
)
assert prompt_embeddings.shape[0] == 4
assert model.prompt_encoder[model.active_adapter].weight.device == prompt_embeddings.device
assert torch.allclose(model.prompt_encoder[model.active_adapter].weight, prompt_embeddings)
a1 = tmp_path / "a1"
a2 = tmp_path / "a2"
out_dir = tmp_path / "composed"
model.save_pretrained(a1)
with hub_online_once(model_id):
base2 = AutoModelForCausalLM.from_pretrained(model_id)
model2 = get_peft_model(base2, peft_config)
with model2.disable_adapter():
outputs2 = model2(input_ids=input_ids, use_cache=True)
_ = initialize_kv_prefix_from_past_key_values(
model2, past_key_values=outputs2.past_key_values, num_virtual_tokens=4
)
model2.save_pretrained(a2)
compose_cartridge_adapters([a1, a2], output_path=out_dir)
cfg = PeftConfig.from_pretrained(out_dir)
assert cfg.peft_type.value == "CARTRIDGE"
assert cfg.num_virtual_tokens == 8
w = load_peft_weights(out_dir, device="cpu")
assert w["prompt_embeddings"].shape[0] == 8
def test_cartridge_prompt_embeddings_from_past_key_values_matches_init(base_model):
base = base_model
peft_config = CartridgeConfig(num_virtual_tokens=4, num_frozen_tokens=0, task_type="CAUSAL_LM")
model = get_peft_model(base, peft_config)
input_ids = torch.randint(0, base.config.vocab_size, (1, 10))
with model.disable_adapter():
outputs = model(input_ids=input_ids, use_cache=True)
pe = prompt_embeddings_from_past_key_values(outputs.past_key_values, num_virtual_tokens=4)
assert pe.shape[0] == 4
pe2 = initialize_kv_prefix_from_past_key_values(
model, past_key_values=outputs.past_key_values, num_virtual_tokens=4
)
assert pe.device == pe2.device
assert torch.allclose(pe, pe2)
@pytest.mark.parametrize("num_frozen_tokens", [0, 2])
def test_cartridge_inference_mode_disables_grads_and_forward_works(num_frozen_tokens, base_model):
base = base_model
peft_config = CartridgeConfig(
num_virtual_tokens=4,
num_frozen_tokens=num_frozen_tokens,
task_type="CAUSAL_LM",
inference_mode=True,
)
model = get_peft_model(base, peft_config)
enc = model.prompt_encoder[model.active_adapter]
# In `inference_mode=True`, PEFT should mark adapter parameters as non-trainable (no gradients) so users can
# safely run forward/generation without accidentally updating or tracking grads for the CARTRIDGE parameters.
assert enc.trainable_embedding.requires_grad is False
if num_frozen_tokens:
assert enc.frozen_embedding is not None
assert enc.frozen_embedding.requires_grad is False
else:
assert enc.frozen_embedding is None
input_ids = torch.randint(0, base.config.vocab_size, (1, 6))
out = model(input_ids=input_ids)
assert out.logits.shape[:2] == (1, 6)
def test_cartridge_gradient_checkpointing_raises(base_model):
base = base_model
base.gradient_checkpointing_enable()
peft_config = CartridgeConfig(num_virtual_tokens=4, num_frozen_tokens=0, task_type="CAUSAL_LM")
with pytest.raises(ValueError, match="does not work with gradient checkpointing"):
_ = get_peft_model(base, peft_config)
def test_prefix_tuning_can_be_initialized_from_past_key_values_when_no_projection(base_model):
base = base_model
peft_config = PrefixTuningConfig(num_virtual_tokens=4, task_type="CAUSAL_LM")
model = get_peft_model(base, peft_config)
input_ids = torch.randint(0, base.config.vocab_size, (1, 10))
with model.disable_adapter():
outputs = model(input_ids=input_ids, use_cache=True)
pe = prompt_embeddings_from_past_key_values(outputs.past_key_values, num_virtual_tokens=4)
pe2 = initialize_kv_prefix_from_past_key_values(
model, past_key_values=outputs.past_key_values, num_virtual_tokens=4
)
assert pe.device == pe2.device
assert torch.allclose(pe, pe2)
assert model.prompt_encoder[model.active_adapter].embedding.weight.device == pe.device
assert torch.allclose(model.prompt_encoder[model.active_adapter].embedding.weight, pe)
| {
"repo_id": "huggingface/peft",
"file_path": "tests/test_cartridge.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:examples/bdlora_finetuning/chat.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import time
import aiohttp
import typer
def chat(
msg: str,
target: str = "lora1",
max_tokens: int = 100,
deterministic: bool = True,
record: str = "",
num_requests: int = 32,
):
payload = {
"model": target,
"prompt": msg,
"max_tokens": max_tokens,
}
if deterministic:
payload = payload | {"temperature": 0, "top_p": 1, "top_k": 1}
url = "http://localhost:8000/v1/completions"
async def _request(session):
async with session.post(url, json=payload) as response:
response_json = await response.json()
return response_json["choices"][0]["text"]
async def run_concurrent():
async with aiohttp.ClientSession() as session:
tasks = [_request(session) for _ in range(num_requests)]
return await asyncio.gather(*tasks)
start_time = time.time()
response_texts = asyncio.run(run_concurrent())
end_time = time.time()
print(f"Completed {num_requests} requests in {end_time - start_time:.2f} seconds")
if record:
with open(record, "w") as f:
json.dump({"prompt": msg, "responses": response_texts}, f, indent=2)
else:
print(response_texts[0])
if __name__ == "__main__":
typer.run(chat)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/bdlora_finetuning/chat.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/gralora_finetuning/gralora_finetuning.py | # This script is based on examples/dora_finetuning/dora_finetuning.py
import os
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import GraloraConfig, get_peft_model
def train_model(
base_model: str,
data_path: str,
output_dir: str,
batch_size: int,
num_epochs: int,
learning_rate: float,
cutoff_len: int,
val_set_size: int,
eval_step: int,
save_step: int,
device: str,
gralora_r: int,
gralora_alpha: int,
gralora_dropout: float,
gralora_target_modules: str,
gralora_k: int,
hybrid_r: int,
hub_model_id: str,
push_to_hub: bool,
):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
hf_token = os.getenv("HF_TOKEN")
# Setup device
if device == "auto":
device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
else:
device = torch.device(device)
print(f"Using device: {device}")
# load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token)
model = AutoModelForCausalLM.from_pretrained(base_model, token=hf_token)
# GraLoRA config for the PEFT model
gralora_config = GraloraConfig(
r=gralora_r, # Rank of matrix
alpha=gralora_alpha,
target_modules=(
gralora_target_modules.split(",")
if gralora_target_modules
else ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
),
gralora_dropout=gralora_dropout,
gralora_k=gralora_k,
hybrid_r=hybrid_r,
bias="none",
)
# get the peft model with GraLoRA config
model = get_peft_model(model, gralora_config)
model.to(device) # MODEL TO GPU/CUDA
tokenizer.pad_token = tokenizer.eos_token
# Load the dataset
dataset = load_dataset(data_path)
def tokenize_function(examples):
inputs = tokenizer(examples["text"], padding="max_length", truncation=True, max_length=cutoff_len)
inputs["labels"] = inputs["input_ids"].copy() # setting labels for a language modeling task
return inputs
# Tokenize the dataset and prepare for training
tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=dataset["train"].column_names)
# Data collator to dynamically pad the batched examples
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# Define training arguments
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_steps=100,
weight_decay=0.01,
logging_steps=eval_step,
save_steps=save_step,
save_total_limit=2,
push_to_hub=push_to_hub,
hub_model_id=hub_model_id,
gradient_accumulation_steps=16,
fp16=True,
learning_rate=learning_rate,
hub_token=hf_token,
)
# Clear device cache to free memory
if torch.cuda.is_available():
torch.cuda.empty_cache()
elif torch.xpu.is_available():
torch.xpu.empty_cache()
# Initialize the Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
data_collator=data_collator,
)
# Start model training
trainer.train()
# Save and push the trained model and tokenizer
if push_to_hub:
# Push the main model to the hub
trainer.push_to_hub(commit_message="Fine-tuned model")
# Save the model and tokenizer locally
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Fine-tune LLaMA with GraLoRA and PEFT")
parser.add_argument("--base_model", type=str, default="meta-llama/Llama-3.2-3B", help="Base model path or name")
parser.add_argument(
"--data_path", type=str, default="timdettmers/openassistant-guanaco", help="Dataset path or name"
)
parser.add_argument(
"--output_dir", type=str, default="path/to/output", help="Output directory for the fine-tuned model"
)
parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=1e-4, help="Learning rate")
parser.add_argument("--cutoff_len", type=int, default=512, help="Cutoff length for tokenization")
parser.add_argument("--val_set_size", type=int, default=500, help="Validation set size")
parser.add_argument("--eval_step", type=int, default=10, help="Evaluation step interval")
parser.add_argument("--save_step", type=int, default=100, help="Save step interval")
parser.add_argument("--device", type=str, default="auto", help="Device to use for training")
parser.add_argument("--gralora_r", type=int, default=8, help="LoRA rank")
parser.add_argument("--gralora_alpha", type=int, default=16, help="LoRA alpha")
parser.add_argument("--gralora_dropout", type=float, default=0.05, help="LoRA dropout rate")
parser.add_argument(
"--gralora_target_modules", type=str, default=None, help="Comma-separated list of target modules for LoRA"
)
parser.add_argument("--gralora_k", type=int, default=2, help="GraLoRA k")
parser.add_argument("--hybrid_r", type=int, default=0, help="Hybrid rank")
parser.add_argument(
"--hub_model_id",
type=str,
default="path/to/repo",
help="Repository name to push the model on the Hugging Face Hub",
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to Hugging Face Hub")
args = parser.parse_args()
train_model(
base_model=args.base_model,
data_path=args.data_path,
output_dir=args.output_dir,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
learning_rate=args.learning_rate,
cutoff_len=args.cutoff_len,
val_set_size=args.val_set_size,
eval_step=args.eval_step,
save_step=args.save_step,
device=args.device,
gralora_r=args.gralora_r,
gralora_alpha=args.gralora_alpha,
gralora_dropout=args.gralora_dropout,
gralora_target_modules=args.gralora_target_modules,
gralora_k=args.gralora_k,
hybrid_r=args.hybrid_r,
hub_model_id=args.hub_model_id,
push_to_hub=args.push_to_hub,
)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/gralora_finetuning/gralora_finetuning.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/peft:src/peft/tuners/gralora/config.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class GraloraConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`GraloraModel`].
Args:
r (`int`):
GraLoRA attention dimension determines the rank of the GraLoRA adapter. The total parameter count of the
GraLoRA adapter is same as LoRA with same rank r, while the expressivitiy is multiplied by gralora_k.
hybrid_r (`int`):
Hybrid GraLoRA rank determines the rank allocated to vanilla LoRA method when using Hybrid GraLoRA method.
Hybrid GraLoRA, a combination of GraLoRA and vanilla LoRA, becomes available when hybrid_r > 0. The
parameter count of the GraLoRA adapter is r + hybrid_r.
target_modules (`Union[List[str], str]`):
List of module names or regex expression of the module names to replace with GraLoRA. " For example, ['q',
'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. " This can also be a wildcard 'all-linear'
which matches all linear/Conv1D " "(if the model is a PreTrainedModel, the output layer excluded). " If not
specified, modules will be chosen according to the model architecture, If the architecture is " not known,
an error will be raised -- in this case, you should specify the target modules manually. " To avoid
targeting any modules (because you want to apply `target_parameters`), set " `target_modules=[]`.
alpha (`int`): GraLoRA alpha.
GraLoRA alpha is the scaling factor for the GraLoRA adapter. Scale becomes alpha / (r + hybrid_r).
gralora_dropout (`float`):
GraLoRA dropout is the dropout probability for the GraLoRA adapter. It is used to prevent overfitting and
improve the generalization of the GraLoRA adapter.
gralora_k (`int`):
GraLoRA k determines the number of subblocks in the GraLoRA adapter. The rank r must be divisible by
gralora_k for the GraLoRA adapter to be valid. The total parameter count is preserved regardles of
gralora_k. The entire rank of the GraLoRA adapter is increased by gralora_k, while the rank of each
subblock is reduced by gralora_k. gralora_k=2 is recommended for rank 32 or lower, and gralora_k=4 is
recommended for rank 64 or higher.
fan_in_fan_out (`bool`):
Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
`Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
bias (`str`):
Bias type for gralora. Can be 'none', 'all' or 'gralora_only'. If 'all' or 'gralora_only', the
corresponding biases will be updated during training. Be aware that this means that, even when disabling
the adapters, the model will not produce the same output as the base model would have without adaptation.
init_weights (`bool`):
Whether to initialize the weights of the GraLoRA layers with their default initialization. Don't change
this setting, except if you know exactly what you're doing.
layers_to_transform (`Union[List[int], int]`):
The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes
that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at
this index. This only works when target_modules is a list of str.
layers_pattern (`Optional[Union[List[str], str]]`):
The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is
not in the common layers pattern. This only works when target_modules is a list of str. This should target
the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`.
"""
r: int = field(
default=32,
metadata={
"help": (
"GraLoRA attention dimension determines the rank of the GraLoRA adapter. "
"The total parameter count of the GraLoRA adapter is same as LoRA with same rank r, while the expressivitiy is multiplied by gralora_k."
)
},
)
hybrid_r: int = field(
default=0,
metadata={
"help": (
"hybrid_r is the rank allocated to vanilla LoRA method when using Hybrid GraLoRA method. "
"Hybrid GraLoRA, a combination of GraLoRA and vanilla LoRA, becomes available when hybrid_r > 0. "
"r + hybrid_r determines the parameter count of the GraLoRA adapter."
)
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with LoRA. "
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. "
"This can also be a wildcard 'all-linear' which matches all linear/Conv1D "
"(if the model is a PreTrainedModel, the output layer excluded). "
"If not specified, modules will be chosen according to the model architecture, If the architecture is "
"not known, an error will be raised -- in this case, you should specify the target modules manually. "
"To avoid targeting any modules (because you want to apply `target_parameters`), set "
"`target_modules=[]`."
)
},
)
alpha: int = field(
default=64,
metadata={
"help": (
"gralora alpha is the scaling factor for the GraLoRA adapter. Scale becomes alpha / (r + hybrid_r). "
)
},
)
gralora_dropout: float = field(default=0.0, metadata={"help": "gralora dropout"})
gralora_k: int = field(
default=2,
metadata={
"help": (
"gralora_k determines the number of subblocks in the GraLoRA adapter. "
"The rank r must be divisible by gralora_k for the GraLoRA adapter to be valid. "
"The total parameter count is preserved regardles of gralora_k. "
"The entire rank of the GraLoRA adapter is increased by gralora_k, while the rank of each subblock is reduced by gralora_k. "
"gralora_k=2 is recommended for rank 32 or lower, and gralora_k=4 is recommended for rank 64 or higher. "
)
},
)
fan_in_fan_out: bool = field(
default=False,
metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
)
bias: str = field(
default="none", metadata={"help": "Bias type for gralora. Can be 'none', 'all' or 'gralora_only'"}
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"List of modules apart from gralora layers to be set as trainable and saved in the final checkpoint. For"
" example, in Sequence Classification or Token Classification tasks, the final layer"
" `classifier/score` are randomly initialized and as such need to be trainable and saved."
)
},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Whether to initialize the weights of the GraLoRA layers with their default initialization. "
"Don't change this setting, except if you know exactly what you're doing."
)
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": (
"The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. "
"If a single integer is passed, PEFT will transform only the layer at this index. "
"This only works when target_modules is a list of str."
)
},
)
layers_pattern: Optional[str] = field(
default=None,
metadata={
"help": (
"The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern. "
"This only works when target_modules is a list of str. This should target the `nn.ModuleList` of the "
"model, which is often called `'layers'` or `'h'`."
)
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.GRALORA
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
if self.r % self.gralora_k != 0:
raise ValueError(f"r should be divisible by gralora_k, but got {self.r} and {self.gralora_k}")
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/gralora/config.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/gralora/layer.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils.other import transpose
class GraloraLayer(BaseTunerLayer):
# List all names of layers that may contain adapter weight
adapter_layer_names = ("gralora_A", "gralora_B", "gralora_A_general", "gralora_B_general")
other_param_names = ("r", "hybrid_r", "alpha", "scaling", "gralora_dropout")
def __init__(self, base_layer: nn.Module, **kwargs):
self.base_layer = base_layer
self.r = {}
self.alpha = {}
self.gralora_k = {}
self.hybrid_r = {}
self.scaling = {}
self.gralora_dropout = nn.ModuleDict({})
self.gralora_A = nn.ParameterDict({})
self.gralora_B = nn.ParameterDict({})
self.gralora_A_general = nn.ModuleDict({})
self.gralora_B_general = nn.ModuleDict({})
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
elif isinstance(base_layer, Conv1D):
in_features, out_features = (
base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
)
else:
raise NotImplementedError(f"Unsupported layer type {type(base_layer)}")
self.in_features = in_features
self.out_features = out_features
self.kwargs = kwargs
def update_layer(
self,
adapter_name,
module_name,
r,
alpha,
gralora_dropout,
gralora_k: int = 2,
hybrid_r: int = 0,
init_weights: bool = True,
):
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
elif hybrid_r < 0:
raise ValueError(f"`hybrid_r` should be a non-negative integer value but the value passed is {hybrid_r}")
self.r[adapter_name] = r
self.alpha[adapter_name] = alpha
self.gralora_k[adapter_name] = gralora_k
self.hybrid_r[adapter_name] = hybrid_r
if gralora_dropout > 0.0:
gralora_dropout_layer = nn.Dropout(p=gralora_dropout)
else:
gralora_dropout_layer = nn.Identity()
self.gralora_dropout.update(nn.ModuleDict({adapter_name: gralora_dropout_layer}))
# Actual trainable parameters
if self.in_features % gralora_k != 0:
raise ValueError(
f"in_features should be divisible by gralora_k, but got {self.in_features} and {gralora_k}"
)
if self.out_features % gralora_k != 0:
raise ValueError(
f"out_features should be divisible by gralora_k, but got {self.out_features} and {gralora_k}"
)
subblock_in_features = self.in_features // gralora_k
subblock_out_features = self.out_features // gralora_k
# gralora_r is the rank allocated to GraLoRA method; hybrid_r is the rank allocated to vanilla LoRA
gralora_r = r
gralora_A = []
gralora_B = []
for _ in range(gralora_k):
new_A = nn.Parameter(torch.empty(gralora_r, subblock_in_features))
new_B = nn.Parameter(torch.empty(subblock_out_features, gralora_r))
if init_weights:
# Initialize to identity: A is random, B is zero
nn.init.kaiming_uniform_(new_A, a=math.sqrt(5))
nn.init.zeros_(new_B)
else:
# Initialize to random: both A and B are random (for testing)
nn.init.kaiming_uniform_(new_A, a=math.sqrt(5))
nn.init.kaiming_uniform_(new_B, a=math.sqrt(5))
gralora_A.append(new_A)
gralora_B.append(new_B)
# stack A and B and transpose to get the final shape
gralora_A = torch.stack(tuple(gralora_A), dim=0) # [N, gralora_r, in_features//N]
gralora_A = gralora_A.transpose(1, 2).contiguous() # [N, in_features//N, gralora_r]
gralora_B = torch.stack(tuple(gralora_B), dim=0) # [N, out_features//N, gralora_r]
gralora_B = gralora_B.transpose(1, 2).contiguous() # [N, gralora_r, out_features//N]
if hybrid_r > 0:
general_gralora_A = nn.Linear(self.in_features, hybrid_r, bias=False)
general_gralora_B = nn.Linear(hybrid_r, self.out_features, bias=False)
if init_weights:
# Initialize to identity: A is random, B is zero
nn.init.kaiming_uniform_(general_gralora_A.weight, a=math.sqrt(5))
nn.init.zeros_(general_gralora_B.weight)
else:
# Initialize to random: both A and B are random (for testing)
nn.init.kaiming_uniform_(general_gralora_A.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(general_gralora_B.weight, a=math.sqrt(5))
else:
general_gralora_A = nn.Identity()
general_gralora_B = nn.Identity()
self.gralora_A[adapter_name] = gralora_A
self.gralora_B[adapter_name] = gralora_B
self.gralora_A_general[adapter_name] = general_gralora_A
self.gralora_B_general[adapter_name] = general_gralora_B
self.module_name = module_name
self.scaling[adapter_name] = alpha / (gralora_r + hybrid_r)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
class Linear(nn.Linear, GraloraLayer):
# Gralora implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
module_name,
r: int = 0,
alpha: int = 1,
gralora_dropout: float = 0.0,
gralora_k: int = 2,
hybrid_r: int = 0,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
init_weights: bool = True,
**kwargs,
) -> None:
# this gets the init from nn.Linear's super perspective, i.e. nn.Module.__init__, which should always be called
super(nn.Linear, self).__init__()
GraloraLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, module_name, r, alpha, gralora_dropout, gralora_k, hybrid_r, init_weights)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
from peft.tuners.tuners_utils import check_adapters_to_merge
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.gralora_A.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter)
orig_weights += delta_weight
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
delta_weight = self.get_delta_weight(active_adapter)
base_layer.weight.data += delta_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.gralora_A.keys():
delta_weight = self.get_delta_weight(active_adapter)
self.get_base_layer().weight.data -= delta_weight
def get_delta_weight(self, adapter) -> torch.Tensor:
"""
Compute the delta weight for GraLoRA adapter.
GraLoRA applies block-wise low-rank adaptation with information exchange. This method computes the equivalent
weight matrix that would be added to the base weight during merge.
Args:
adapter (str): The name of the adapter
Returns:
torch.Tensor: The delta weight matrix with shape [out_features, in_features]
"""
gralora_A = self.gralora_A[adapter] # [N, in_features//N, rank]
gralora_B = self.gralora_B[adapter] # [N, rank, out_features//N]
gralora_A_general = self.gralora_A_general[adapter]
gralora_B_general = self.gralora_B_general[adapter]
device = gralora_A.device
dtype = gralora_A.dtype
gralora_k = self.gralora_k[adapter]
hybrid_r = self.hybrid_r[adapter]
r = self.r[adapter]
# Handle CPU fp16/bf16 casting
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
if cast_to_fp32:
gralora_A = gralora_A.float()
gralora_B = gralora_B.float()
# Get dimensions
in_features = self.in_features
out_features = self.out_features
gralora_rank = r
subblock_gralora_rank = gralora_rank // gralora_k
# scatter gralora_A to get the scattered weight matrix
l_indices = torch.arange(in_features, device=device)
n_indices = l_indices // (in_features // gralora_k)
i_indices = l_indices % (in_features // gralora_k)
gralora_A_scattered = torch.zeros(
in_features, gralora_k, gralora_rank, device=device, dtype=torch.float32 if cast_to_fp32 else dtype
)
gralora_A_scattered.scatter_(
1,
n_indices.unsqueeze(1).unsqueeze(2).expand(-1, 1, gralora_rank),
gralora_A[n_indices, i_indices, :].unsqueeze(1),
)
# compute the delta weight
delta_weight = (
torch.einsum(
"ikr, kro -> iko",
gralora_A_scattered.view(in_features, gralora_k, gralora_k, subblock_gralora_rank)
.permute(0, 2, 1, 3)
.reshape(in_features, gralora_k, gralora_rank),
gralora_B,
)
.reshape(in_features, out_features)
.T
)
# Add hybrid LoRA component if present
if hybrid_r > 0:
weight_A_general = gralora_A_general.weight # [hybrid_r, in_features]
weight_B_general = gralora_B_general.weight # [out_features, hybrid_r]
if cast_to_fp32:
weight_A_general = weight_A_general.float()
weight_B_general = weight_B_general.float()
# Compute delta for hybrid part: [out_features, hybrid_r] @ [hybrid_r, in_features]
delta_weight += weight_B_general @ weight_A_general
# Apply scaling and transpose if needed
delta_weight = transpose(delta_weight, self.fan_in_fan_out) * self.scaling[adapter]
# Cast back if needed
if cast_to_fp32:
delta_weight = delta_weight.to(dtype=dtype)
return delta_weight
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
# Handle 2D input: [batch, features] -> [batch, 1, features]
# This is common for MLPs and other non-sequence models
x_is_2d = x.ndim == 2
if x_is_2d:
x = x.unsqueeze(1) # [B, F] -> [B, 1, F]
for active_adapter in self.active_adapters:
if active_adapter not in self.gralora_A.keys():
continue
gralora_A = self.gralora_A[active_adapter]
gralora_B = self.gralora_B[active_adapter]
gralora_A_general = self.gralora_A_general[active_adapter]
gralora_B_general = self.gralora_B_general[active_adapter]
r = self.r[active_adapter]
gralora_rank = r
gralora_k = self.gralora_k[active_adapter]
hybrid_r = self.hybrid_r[active_adapter]
dropout = self.gralora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
gralora_dtype = gralora_A.dtype
B, L, in_features = x.shape
N = gralora_k
subblock_gralora_rank = gralora_rank // N
output = torch.einsum(
"bljr, jro -> bljo",
torch.einsum(
"blni, nir -> blnr",
dropout(x.to(gralora_dtype)).view(B, L, N, in_features // N),
gralora_A,
)
.view(B, L, N, N, subblock_gralora_rank)
.permute(0, 1, 3, 2, 4)
.reshape(B, L, N, N * subblock_gralora_rank),
gralora_B,
).reshape(B, L, -1)
# Squeeze back to 2D if input was 2D
if x_is_2d:
output = output.squeeze(1) # [B, 1, F] -> [B, F]
result += scaling * output.to(torch_result_dtype)
if hybrid_r > 0:
hybrid_output = gralora_B_general(gralora_A_general(dropout(x.to(gralora_dtype))))
if x_is_2d:
hybrid_output = hybrid_output.squeeze(1)
result += scaling * hybrid_output.to(torch_result_dtype)
result = result.to(previous_dtype)
return result
def supports_lora_conversion(self, adapter_name: str = "default") -> bool:
return True
def __repr__(self) -> str:
rep = super().__repr__()
return "gralora." + rep
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/gralora/layer.py",
"license": "Apache License 2.0",
"lines": 329,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/gralora/model.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
import torch
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer
from peft.utils import (
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING,
)
from .layer import GraloraLayer, Linear
class GraloraModel(BaseTuner):
"""
Creates Vector-based Random Matrix Adaptation (Gralora) model from a pretrained transformers model.
Args:
model ([`~transformers.PreTrainedModel`]): The model to be adapted.
config ([`GraloraConfig`]): The configuration of the Gralora model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
Returns:
`torch.nn.Module`: The Gralora model.
Example:
```py
>>> from transformers import AutoModelForCausalLM
>>> from peft import GraloraConfig, get_peft_model
>>> base_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
>>> config = GraloraConfig(r=128)
>>> model = get_peft_model(base_model, config)
```
**Attributes**:
- **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`GraloraConfig`]): The configuration of the Gralora model.
"""
# The unique prefix for GraLoRA method
prefix: str = "gralora_"
# The class of tuner layer for GraLoRA method
tuner_layer_cls = GraloraLayer
target_module_mapping = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING
def _create_and_replace(
self,
gralora_config,
adapter_name,
target,
target_name,
parent,
current_key,
**optional_kwargs,
):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
r = gralora_config.r
bias = hasattr(target, "bias") and target.bias is not None
kwargs = {
"r": r,
"alpha": gralora_config.alpha,
"gralora_dropout": gralora_config.gralora_dropout,
"gralora_k": gralora_config.gralora_k,
"fan_in_fan_out": gralora_config.fan_in_fan_out,
"hybrid_r": gralora_config.hybrid_r,
"init_weights": gralora_config.init_weights,
}
kwargs["bias"] = bias
if isinstance(target, Linear):
target.update_layer(
adapter_name,
current_key,
r,
gralora_config.alpha,
gralora_config.gralora_dropout,
gralora_config.gralora_k,
gralora_config.hybrid_r,
gralora_config.init_weights,
)
else:
new_module = self._create_new_module(gralora_config, adapter_name, target, current_key, **kwargs)
if adapter_name not in self.active_adapters:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _create_new_module(gralora_config, adapter_name, target, module_name, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = gralora_config.fan_in_fan_out = False
elif isinstance(target_base_layer, Conv1D):
kwargs["is_target_conv_1d_layer"] = True
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = gralora_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. Currently, only the following modules are supported: "
"`torch.nn.Linear`, `transformers.pytorch_utils.Conv1D`."
)
new_module = Linear(
target,
adapter_name,
module_name,
**kwargs,
)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/gralora/model.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/osf/config.py | from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class OSFConfig(PeftConfig):
"""
Configuration for Orthogonal Subspace Fine-tuning (OSF).
Args:
effective_rank (`int` or `float`, *optional*):
Preserved SVD rank ("high" subspace). The top-``effective_rank`` singular directions are frozen and
retained across tasks; the remaining dimensions form the trainable low-rank subspace. If `None`, defaults
to 50% of the smaller weight dimension per target module. Note: This differs from LoRA's `r` (trainable
rank). In OSF, the trainable rank is `min(weight.shape) - effective_rank`.
target_modules (`Union[list[str], str]`, *optional*):
The names of the modules to apply OSF to. Can be a list of module names or `"all-linear"`.
rank_pattern (`dict[str, int|float]`, *optional*):
A dictionary of regex patterns to override `effective_rank` for specific modules. Values can be absolute
integers or fractions in (0, 1], interpreted as a fraction of the smaller matrix dimension per target.
"""
effective_rank: Optional[Union[int, float]] = field(
default=None,
metadata={
"help": (
'Preserved SVD rank ("high" subspace). The top-`effective_rank` singular directions are frozen '
"and retained across tasks; the remaining dimensions form the trainable low-rank subspace. "
"Trainable rank equals min(weight.shape) - effective_rank. If None, defaults to 50% of the smaller "
"weight dimension per target module. Floats in (0, 1] are interpreted as a fraction of the smaller "
"matrix dimension per target."
)
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "The names of the modules to apply OSF to. Can be a list of module names or 'all-linear'."},
)
rank_pattern: Optional[dict[str, Union[int, float]]] = field(
default=None,
metadata={
"help": (
"A dictionary of regex patterns to override effective_rank per module. Values can be absolute "
"integers or fractions in (0, 1], interpreted as a fraction of the smaller matrix dimension."
)
},
)
# Additional optional fields for compatibility with generic test harnesses
init_weights: Optional[bool] = field(
default=None,
metadata={
"help": (
"If provided, toggles custom weight initialization behavior for certain methods. OSF ignores this "
"flag but accepts it for config compatibility."
)
},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={"help": "Optional list of module names to save separately (ignored by OSF but accepted)."},
)
target_svd_config: Optional[dict[str, int]] = field(
default=None,
metadata={
"help": (
"Optional per-parameter SVD target rank mapping (e.g., {'lin0.weight': 8}). OSF currently ignores "
"this field but accepts it for forward compatibility."
)
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.OSF
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/osf/config.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/peft:src/peft/tuners/osf/layer.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners._buffer_dict import BufferDict
from peft.tuners.tuners_utils import BaseTunerLayer
from .utils import (
decompose_weight_matrix,
reconstruct_weight_matrix,
)
class OSFLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names: tuple[str, ...] = ("osf_svd_params",)
# All names of other parameters that may contain adapter-related parameters
other_param_names: tuple[str, ...] = ("_osf_U_high", "_osf_S_high", "_osf_V_high")
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.effective_rank = {}
# Map adapter_name -> ParameterDict{"U_low", "S_low", "V_low"}
self.osf_svd_params = nn.ModuleDict({})
# Store high-rank (frozen) components as buffers that track device moves
self._osf_U_high = BufferDict({})
self._osf_S_high = BufferDict({})
self._osf_V_high = BufferDict({})
# Track hook handles for cleanup
self.hook_handles = []
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
# Get layer dimensions
base_layer = self.get_base_layer()
# Prefer the universally available weight shape when possible.
if (
hasattr(base_layer, "weight")
and isinstance(base_layer.weight, torch.Tensor)
and base_layer.weight.ndim == 2
):
# For Linear-like modules, weight is [out_features, in_features]
out_features, in_features = base_layer.weight.shape
elif isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
elif hasattr(base_layer, "infeatures") and hasattr(base_layer, "outfeatures"):
# QuantLinear
in_features, out_features = base_layer.infeatures, base_layer.outfeatures
elif hasattr(base_layer, "input_size") and hasattr(base_layer, "output_size"):
# Megatron ColumnParallelLinear, RowParallelLinear
in_features, out_features = base_layer.input_size, base_layer.output_size
elif hasattr(base_layer, "in_features") and hasattr(base_layer, "out_features"):
in_features, out_features = base_layer.in_features, base_layer.out_features
else:
in_features, out_features = None, None
warnings.warn(
f"Unsupported layer type '{type(base_layer)}' encountered; could not infer in/out features.",
UserWarning,
)
self.in_features = in_features
self.out_features = out_features
def update_layer(self, adapter_name: str, effective_rank: int, **kwargs):
"""Update layer to add a new OSF adapter."""
if effective_rank <= 0:
raise ValueError(
f"`effective_rank` should be a positive integer value but the value passed is {effective_rank}"
)
# Store the rank for this adapter
self.effective_rank[adapter_name] = effective_rank
# Perform SVD decomposition on the base layer weight
base_layer = self.get_base_layer()
weight = base_layer.weight.data
svd_dict = decompose_weight_matrix(weight, top_k=effective_rank)
# Store high-rank (frozen) components as buffers
self._osf_U_high[adapter_name] = svd_dict["U_high"]
self._osf_S_high[adapter_name] = svd_dict["S_high"]
self._osf_V_high[adapter_name] = svd_dict["V_high"]
# Create ParameterDict for trainable low-rank components
svd_params = nn.ParameterDict(
{
"U_low": svd_dict["U_low"],
"S_low": svd_dict["S_low"],
"V_low": svd_dict["V_low"],
}
)
self.osf_svd_params[adapter_name] = svd_params
# Attach gradient hooks for orthogonal projection
self._attach_hooks(adapter_name)
# Set the adapter as active
self.set_adapter(self.active_adapters)
def _attach_hooks(self, adapter_name: str):
"""Attach gradient hooks for the given adapter."""
if adapter_name not in self.osf_svd_params:
return
svd_module = self.osf_svd_params[adapter_name]
def hook(grad, name: str, adapter: str, layer: OSFLayer):
# Project gradient to be orthogonal to high-rank subspace for U_low/V_low
# Access buffers dynamically to ensure they're on the correct device
if name == "U_low":
U_high = layer._osf_U_high[adapter]
proj = U_high @ (U_high.transpose(0, 1) @ grad)
return grad - proj
elif name == "V_low":
V_high = layer._osf_V_high[adapter]
proj = (grad @ V_high.transpose(0, 1)) @ V_high
return grad - proj
return grad
# Store hook handles for later cleanup
handle_u = svd_module["U_low"].register_hook(partial(hook, name="U_low", adapter=adapter_name, layer=self))
handle_v = svd_module["V_low"].register_hook(partial(hook, name="V_low", adapter=adapter_name, layer=self))
self.hook_handles.extend([handle_u, handle_v])
def _detach_hooks(self):
"""Remove all gradient hooks."""
for handle in self.hook_handles:
handle.remove()
self.hook_handles.clear()
def _reconstruct_weight(self, adapter_name: str) -> torch.Tensor:
"""Reconstruct weight matrix from SVD components for given adapter."""
if adapter_name not in self.osf_svd_params:
return self.get_base_layer().weight
svd_module = self.osf_svd_params[adapter_name]
svd_dict = {
"U_high": self._osf_U_high[adapter_name],
"S_high": self._osf_S_high[adapter_name],
"V_high": self._osf_V_high[adapter_name],
"U_low": svd_module["U_low"],
"S_low": svd_module["S_low"],
"V_low": svd_module["V_low"],
}
return reconstruct_weight_matrix(svd_dict)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
if adapter_names is None:
adapter_names = self.active_adapters
for active_adapter in adapter_names:
if active_adapter in self.osf_svd_params.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weight = base_layer.weight.data.clone()
new_weight = self._reconstruct_weight(active_adapter)
if not torch.isfinite(new_weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = new_weight.to(orig_weight.dtype)
else:
new_weight = self._reconstruct_weight(active_adapter)
base_layer.weight.data = new_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
# For OSF, unmerging means restoring the original weight
# Since we modify the weight in-place, we need to store the original weight
# This is a limitation of the current OSF implementation
warnings.warn("OSF does not support unmerging. Original weights are permanently modified.")
def __del__(self):
"""Cleanup hooks on deletion."""
self._detach_hooks()
class Linear(nn.Module, OSFLayer):
# OSF implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
effective_rank: int = None,
**kwargs,
) -> None:
super().__init__()
OSFLayer.__init__(self, base_layer, **kwargs)
# Set default effective_rank if not provided
if effective_rank is None:
# Default to 50% of min dimension
effective_rank = min(self.in_features, self.out_features) // 2
self._active_adapter = adapter_name
self.update_layer(adapter_name, effective_rank, **kwargs)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
if self.disable_adapters:
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
# Use reconstructed weight for forward pass
base_layer = self.get_base_layer()
bias = base_layer.bias
# Use the active adapter's reconstructed weight
active_adapter = self.active_adapters[0] if self.active_adapters else None
if active_adapter and active_adapter in self.osf_svd_params:
weight = self._reconstruct_weight(active_adapter)
orig_dtype = x.dtype # assume that the intended dtype is that of the input
x = self._cast_input_dtype(x, weight.dtype)
if bias is not None:
bias = bias.to(weight.dtype)
result = F.linear(x, weight, bias)
result = result.to(orig_dtype)
else:
result = self.base_layer(x, *args, **kwargs)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "osf." + rep
def dispatch_default(
target: torch.nn.Module,
adapter_name: str,
osf_config,
**kwargs,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
new_module = Linear(target, adapter_name, **kwargs)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/osf/layer.py",
"license": "Apache License 2.0",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/osf/model.py | from __future__ import annotations
import re
import torch
import torch.nn as nn
from peft.tuners.tuners_utils import BaseTuner
from peft.utils.constants import TRANSFORMERS_MODELS_TO_OSF_TARGET_MODULES_MAPPING
from .layer import OSFLayer, dispatch_default
class OSFModel(BaseTuner):
"""A minimal tuner implementing Orthogonal Subspace Fine-tuning."""
prefix: str = "osf_"
tuner_layer_cls = OSFLayer
target_module_mapping = TRANSFORMERS_MODELS_TO_OSF_TARGET_MODULES_MAPPING
def __init__(
self,
model,
config,
adapter_name,
low_cpu_mem_usage: bool = False,
state_dict: dict[str, torch.Tensor] | None = None,
):
# Pass state_dict through for compatibility with BaseTuner
super().__init__(
model,
config,
adapter_name,
low_cpu_mem_usage=low_cpu_mem_usage,
state_dict=state_dict,
)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped base model.
This mirrors the behavior of other tuners (e.g., LoRA), ensuring attributes like `device` resolve to the
underlying transformers model.
"""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "model": # avoid infinite recursion during init
raise
return getattr(self.model, name)
def _prepare_adapter_config(self, peft_config, model_config):
# If target_modules is unspecified, try mapping; else fall back to all linear layers for custom models
if getattr(peft_config, "target_modules", None) is None:
model_type = model_config.get("model_type")
if model_type in self.target_module_mapping:
peft_config.target_modules = set(self.target_module_mapping[model_type])
else:
from peft.utils.constants import INCLUDE_LINEAR_LAYERS_SHORTHAND
peft_config.target_modules = INCLUDE_LINEAR_LAYERS_SHORTHAND
return peft_config
def _create_and_replace(
self,
osf_config,
adapter_name: str,
target: nn.Module,
target_name: str,
parent: nn.Module,
current_key: str,
*,
parameter_name: str | None = None,
) -> None:
# OSF only works on 2D weight matrices
if not hasattr(target, "weight") or len(target.weight.shape) != 2:
return None
# Determine effective rank for this target (supports int or fractional in (0,1])
def _resolve_rank(value, min_dim: int) -> int:
if value is None:
return max(min_dim // 2, 0)
# floats in (0,1] => fraction of min_dim
if isinstance(value, float) and 0 < value <= 1:
r = int(min_dim * value)
else:
r = int(value)
return max(min(min_dim, r), 0)
min_dim = min(target.weight.shape)
effective_rank = _resolve_rank(getattr(osf_config, "effective_rank", None), min_dim)
# Check for per-module rank overrides (allow int or fractional)
if hasattr(osf_config, "rank_pattern") and osf_config.rank_pattern:
for pattern, rank in osf_config.rank_pattern.items():
if re.search(pattern, current_key):
effective_rank = _resolve_rank(rank, min_dim)
break
kwargs = {
"effective_rank": effective_rank,
}
# Create a new or update an existing OSF layer in place
if isinstance(target, OSFLayer):
target.update_layer(adapter_name, **kwargs)
else:
new_module = dispatch_default(target, adapter_name, osf_config, **kwargs)
if new_module is None:
return None
# If adding an additional adapter, keep it frozen initially
if adapter_name not in self.active_adapters:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for n, p in model.named_parameters():
# Only OSF adapter parameters (in osf_svd_params) should be trainable
if "osf_svd_params" not in n:
p.requires_grad = False
# Use BaseTuner's merge and merge_and_unload implementations.
# Explicitly disallow unmerging at the model level for OSF.
def unmerge_adapter(self, *args, **kwargs):
raise NotImplementedError("OSF models do not support unmerging")
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/osf/model.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/peft:src/peft/tuners/osf/utils.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Orthogonal Subspace Learning with Adaptive OSF."""
from __future__ import annotations
from typing import Any
import torch
import torch.distributed as dist
from torch import nn
# Note: OSF now relies on OSFLayer + BaseTuner; no model-level helpers required here.
__all__ = [
"decompose_weight_matrix",
"project_gradient_to_orthogonal_space",
"reconstruct_weight_matrix",
]
def _wait_if_async(tensor):
"""Wait for AsyncCollectiveTensor if needed, otherwise return tensor as-is."""
if hasattr(tensor, "wait"):
return tensor.wait()
return tensor
def decompose_weight_matrix(weight: torch.Tensor, top_k: int) -> dict[str, Any]:
"""Perform an SVD of ``weight`` and split it into frozen and trainable parts."""
device_local = weight.device
orig_dtype = weight.dtype
W = weight.to(torch.float32)
U, S, Vt = torch.linalg.svd(W, full_matrices=False)
k = min(top_k, S.shape[0])
svd = {
"U_high": U[:, :k].contiguous().detach().to(device=device_local, dtype=orig_dtype),
"S_high": S[:k].contiguous().detach().to(device=device_local, dtype=orig_dtype),
"V_high": Vt[:k, :].contiguous().detach().to(device=device_local, dtype=orig_dtype),
"U_low": nn.Parameter(U[:, k:].contiguous().detach().to(device=device_local, dtype=orig_dtype)),
"S_low": nn.Parameter(S[k:].contiguous().detach().to(device=device_local, dtype=orig_dtype)),
"V_low": nn.Parameter(Vt[k:, :].contiguous().detach().to(device=device_local, dtype=orig_dtype)),
"rank_high": k,
}
return svd
def reconstruct_weight_matrix(svd_dict: dict[str, torch.Tensor]) -> torch.Tensor:
"""Reconstruct a weight matrix from its SVD components."""
U_high = svd_dict["U_high"]
S_high = svd_dict["S_high"]
V_high = svd_dict["V_high"]
U_low = svd_dict["U_low"]
S_low = svd_dict["S_low"]
V_low = svd_dict["V_low"]
high_part = (
torch.mm(U_high * S_high.unsqueeze(0), V_high)
if U_high.numel() > 0 and S_high.numel() > 0
else torch.zeros(U_low.size(0), V_low.size(1), device=U_high.device)
)
low_part = (
torch.mm(U_low * S_low.unsqueeze(0), V_low)
if U_low.numel() > 0 and S_low.numel() > 0
else torch.zeros(U_high.size(0), V_high.size(1), device=U_low.device)
)
return high_part + low_part
def project_gradient_to_orthogonal_space(svd_dict: dict[str, Any]) -> None:
"""Project gradients of ``U_low`` and ``V_low`` to be orthogonal to the high rank space."""
if svd_dict["U_low"].grad is None and svd_dict["S_low"].grad is None and svd_dict["V_low"].grad is None:
return
U_high = svd_dict["U_high"]
V_high = svd_dict["V_high"]
# Project U_low gradients to space orthogonal to U_high
if svd_dict["U_low"].grad is not None:
dU = svd_dict["U_low"].grad
# Support distributed tensors by operating on the local shard
local_U_high = getattr(U_high, "to_local", lambda: U_high)()
local_dU = getattr(dU, "to_local", lambda: dU)()
# Perform projection computation using memory-efficient operations
# Memory-optimized projection: dU = dU - U_high @ (U_high.T @ dU)
# Use addmm_ for efficient in-place operation
# Compute local contribution to (U_high^T @ dU); all-reduce to get global projection
proj_coeff = torch.mm(local_U_high.transpose(0, 1), local_dU)
if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1:
dist.all_reduce(proj_coeff, op=dist.ReduceOp.SUM)
# Apply projection using only local rows of U_high
local_dU.addmm_(local_U_high, proj_coeff, alpha=-1.0)
if hasattr(dU, "_local_tensor"):
dU._local_tensor.copy_(local_dU)
else:
dU.copy_(local_dU)
# Repeat projection for V_low using V_high
if svd_dict["V_low"].grad is not None:
dV = svd_dict["V_low"].grad
local_V_high = getattr(V_high, "to_local", lambda: V_high)()
local_dV = getattr(dV, "to_local", lambda: dV)()
# Compute Gram matrix G = V_high^T @ V_high for global projection across row-sharded V_high
# Assumes column dimension is consistent across ranks (row sharding over singular vectors)
G_local = torch.mm(local_V_high.transpose(0, 1), local_V_high)
if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1:
dist.all_reduce(G_local, op=dist.ReduceOp.SUM)
# Apply projection: dV = dV - dV @ G (use local shard of dV)
update = torch.mm(local_dV, G_local)
local_dV.add_(update, alpha=-1.0)
if hasattr(dV, "_local_tensor"):
dV._local_tensor.copy_(local_dV)
else:
dV.copy_(local_dV)
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/osf/utils.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:tests/test_osf.py | import pytest
import torch
from torch.testing import assert_close
from peft import OSFConfig, get_peft_model
from peft.tuners.osf.layer import OSFLayer
from peft.tuners.osf.utils import (
decompose_weight_matrix,
reconstruct_weight_matrix,
)
def test_osf_roundtrip():
w = torch.randn(10, 8)
svd = decompose_weight_matrix(w, top_k=4)
w_rec = reconstruct_weight_matrix(svd)
assert_close(w_rec, w, atol=1e-5, rtol=1e-5)
class DummyConfig(dict):
pass
class DummyModel(torch.nn.Module):
def __init__(self, config=None):
super().__init__()
self.config = config
self.linear = torch.nn.Linear(8, 4)
def forward(self, x):
return self.linear(x)
def test_osf_gradient_projection_hook():
torch.manual_seed(0)
model = DummyModel(DummyConfig())
# Specify target module explicitly for DummyModel
cfg = OSFConfig(target_modules=["linear"], effective_rank=2)
wrapped = get_peft_model(model, cfg)
x = torch.randn(3, 8)
wrapped(x).sum().backward()
# Access the injected OSF layer
osf_linear = wrapped.base_model.model.linear
adapter = wrapped.base_model.active_adapters[0]
U_high = osf_linear._osf_U_high[adapter]
V_high = osf_linear._osf_V_high[adapter]
svd_params = osf_linear.osf_svd_params[adapter]
# Check orthogonality of gradients after projection
proj_u = U_high.T @ svd_params["U_low"].grad
proj_v = svd_params["V_low"].grad @ V_high.T
assert_close(proj_u, torch.zeros_like(proj_u), atol=1e-6, rtol=1e-6)
assert_close(proj_v, torch.zeros_like(proj_v), atol=1e-6, rtol=1e-6)
def test_osf_merge_and_unload_and_unmerge_behavior():
model = DummyModel(DummyConfig())
cfg = OSFConfig(target_modules=["linear"], effective_rank=2)
wrapped = get_peft_model(model, cfg)
# merge_adapter should work via BaseTuner and OSFLayer.merge
osf_linear = wrapped.base_model.model.linear
assert isinstance(osf_linear, OSFLayer)
wrapped.merge_adapter()
assert osf_linear.merged, "OSF layer should be marked as merged after merge_adapter()"
# unmerge_adapter is not supported for OSF
with pytest.raises(NotImplementedError):
wrapped.unmerge_adapter()
# merge_and_unload should return the base model (no OSF wrappers)
merged_model = wrapped.merge_and_unload()
assert isinstance(merged_model.linear, torch.nn.Linear)
| {
"repo_id": "huggingface/peft",
"file_path": "tests/test_osf.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:examples/delora_finetuning/delora_finetuning.py | # This script is based on examples/randlora_finetuning/randlora_finetuning.py
import os
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import DeloraConfig, get_peft_model
def train_model(
base_model: str,
data_path: str,
output_dir: str,
batch_size: int,
num_epochs: int,
learning_rate: float,
cutoff_len: int,
val_set_size: int,
eval_step: int,
save_step: int,
device: str,
rank: int,
delora_lambda: int,
module_dropout: float,
target_modules: str,
hub_model_id: str,
push_to_hub: bool,
):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
hf_token = os.getenv("HF_TOKEN")
# Setup device
device = torch.device(device)
print(f"Using device: {device}")
# load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token)
# Compute type
device_type = device.type
device_module = getattr(torch, device_type, torch.cuda)
bf16_supported = device_module.is_available() and device_module.is_bf16_supported()
dtype = torch.bfloat16 if bf16_supported else torch.float32
# Load the base model
model = AutoModelForCausalLM.from_pretrained(
base_model,
dtype=dtype,
)
# DeLoRA config for the PEFT model
peft_config = DeloraConfig(
r=rank,
delora_lambda=delora_lambda,
target_modules=(target_modules.split(",") if target_modules else None),
module_dropout=module_dropout,
bias="none",
)
# get the peft model with DeLoRA config
model = get_peft_model(model, peft_config)
model.to(device) # MODEL TO ACCELERATOR
tokenizer.pad_token = tokenizer.eos_token
# Load the dataset
dataset = load_dataset(data_path)
def tokenize_function(examples):
inputs = tokenizer(examples["text"], padding="max_length", truncation=True, max_length=cutoff_len)
inputs["labels"] = inputs["input_ids"].copy() # setting labels for a language modeling task
return inputs
# Tokenize the dataset and prepare for training
tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=dataset["train"].column_names)
# Data collator to dynamically pad the batched examples
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# Compute the total amount of training step for warmup
max_steps = int((len(dataset) // batch_size) * num_epochs)
# Define training arguments
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_steps=int(max_steps * 0.1), # 10% of total trainig steps
weight_decay=0.0,
logging_steps=eval_step,
save_steps=save_step,
save_total_limit=2,
push_to_hub=push_to_hub,
hub_model_id=hub_model_id,
gradient_accumulation_steps=16,
learning_rate=learning_rate,
hub_token=hf_token,
label_names=["labels"],
)
# Clear accelerator cache to free memory
device_module.empty_cache()
# Initialize the Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
data_collator=data_collator,
)
# Start model training
trainer.train()
# Save and push the trained model and tokenizer
if push_to_hub:
# Push the main model to the hub
trainer.push_to_hub(commit_message="Fine-tuned model")
# Save the model and tokenizer locally
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Fine-tune LLaMA with DeLoRA")
parser.add_argument("--base_model", type=str, default="huggyllama/llama-7b", help="Base model path or name")
parser.add_argument(
"--data_path", type=str, default="timdettmers/openassistant-guanaco", help="Dataset path or name"
)
parser.add_argument(
"--output_dir", type=str, default="path/to/output", help="Output directory for the fine-tuned model"
)
parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=3e-3, help="Learning rate")
parser.add_argument("--cutoff_len", type=int, default=512, help="Cutoff length for tokenization")
parser.add_argument("--val_set_size", type=int, default=500, help="Validation set size")
parser.add_argument("--eval_step", type=int, default=10, help="Evaluation step interval")
parser.add_argument("--save_step", type=int, default=100, help="Save step interval")
parser.add_argument("--device", type=str, default="auto", help="Device to use for training")
parser.add_argument("--rank", type=int, default=32, help="DeLoRA basis rank")
parser.add_argument("--delora_lambda", type=int, default=640, help="DeLoRA alpha")
parser.add_argument("--module_dropout", type=float, default=0.05, help="DeLoRA dropout rate")
parser.add_argument(
"--target_modules", type=str, default=None, help="Comma-separated list of target modules for DeLoRA"
)
parser.add_argument(
"--hub_model_id",
type=str,
default="path/to/repo",
help="Repository name to push the model on the Hugging Face Hub",
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to Hugging Face Hub")
args = parser.parse_args()
if args.device == "auto":
args.device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
train_model(
base_model=args.base_model,
data_path=args.data_path,
output_dir=args.output_dir,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
learning_rate=args.learning_rate,
cutoff_len=args.cutoff_len,
val_set_size=args.val_set_size,
eval_step=args.eval_step,
save_step=args.save_step,
device=args.device,
rank=args.rank,
delora_lambda=args.delora_lambda,
module_dropout=args.module_dropout,
target_modules=args.target_modules,
hub_model_id=args.hub_model_id,
push_to_hub=args.push_to_hub,
)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/delora_finetuning/delora_finetuning.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/peft:src/peft/tuners/delora/config.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class DeloraConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`DeloraModel`].
Args:
r (`int`):
The rank of the DeLoRA adapter.
delora_lambda (`int`):
The initial value of the boundary of the DeLoRA adapter. This variable sets an upper bound to the Frobenius
norm of the weight change, avoiding the finetuned model to deviate too much from the original model.
module_dropout (`float`):
The dropout probability for disabling DeLoRA modules during training.
target_modules (`Optional[Union[List[str], str]]`):
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
strings, either an exact match will be performed or it is checked if the name of the module ends with any
of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen,
excluding the output layer. If this is not specified, modules will be chosen according to the model
architecture. If the architecture is not known, an error will be raised -- in this case, you should specify
the target modules manually.
exclude_modules (`Optional[Union[List[str], str]]`):
The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
When passing a list of strings, either an exact match will be performed or it is checked if the name of the
module ends with any of the passed strings.
bias (`str`):
Bias type for DeLoRA. Can be 'none', 'all' or 'delora_only'. If 'all' or 'delora_only', the corresponding
biases will be updated during training. Be aware that this means that, even when disabling the adapters,
the model will not produce the same output as the base model would have without adaptation.
init_weights (`bool`):
Whether to perform initialization of adapter weights. If `True` (default): A is initialized with kaiming
uniform initialization, while B is initialized with zeros. If `False`: A and B are both initialized with
kaiming uniform, immediately contributing a non-zero delta. This is generally discouraged for normal use.
layers_to_transform (`Union[List[int], int]`):
The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
that are specified in this list. If a single integer is passed, it will apply the transformations on the
layer at this index.
layers_pattern (`Optional[Union[List[str], str]]`):
The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the
`nn.ModuleList` of the model, which is often called `'layers'` or `'h'`.
rank_pattern (`dict`):
The mapping from layer names or regexp expression to ranks which are different from the default rank
specified by `r`. For example, `{'^model.decoder.layers.0.encoder_attn.k_proj': 16}`.
lambda_pattern (`dict`):
The mapping from layer names or regexp expression to lambdas which are different from the default lambda
specified by `delora_lambda`. For example, `{'^model.decoder.layers.0.encoder_attn.k_proj': 16}`.
modules_to_save (`Optional[List[str]]`):
List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
"""
r: int = field(default=8, metadata={"help": "DeLoRA rank"})
delora_lambda: int = field(
default=15,
metadata={
"help": "The initial value of the boundary of the DeLoRA adapter. This variable sets an upper bound to the "
"Frobenius norm of the weight change, avoiding the finetuned model to deviate too much from the original model."
},
)
module_dropout: float = field(
default=0.0, metadata={"help": "The dropout probability for disabling DeLoRA modules during training"}
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "List of module names or regex expression of the module names to replace with DeLoRA."
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
"This can also be a wildcard 'all-linear' which matches all linear layers except the output layer."
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "List of module names or regex expression of the module names to exclude from DeLoRA."},
)
bias: str = field(default="none", metadata={"help": "Bias type for DeLoRA. Can be 'none' or 'all'"})
init_weights: bool = field(
default=True,
metadata={
"help": "Whether to perform initialization of adapter weights. If `True` (default): A is initialized with kaiming uniform "
"initialization, while B is initialized with zeros. If `False`: A and B are both initialized with kaiming uniform, "
"immediately contributing a non-zero delta. This is generally discouraged for normal use."
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that "
"are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
},
)
layers_pattern: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the "
"common layers pattern. This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`."
},
)
rank_pattern: Optional[dict] = field(
default_factory=dict,
metadata={
"help": "The mapping from layer names or regexp expression to ranks which are different from the default rank specified "
"by `r`. For example, `{'^model.decoder.layers.0.encoder_attn.k_proj': 16}`."
},
)
lambda_pattern: Optional[dict] = field(
default_factory=dict,
metadata={
"help": "The mapping from layer names or regexp expression to lambdas which are different from the default lambda specified by `delora_lambda`."
},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": "List of modules apart from DeLoRA layers to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` "
"are randomly initialized and as such need to be trainable and saved."
},
)
def __post_init__(self):
super().__post_init__()
# PeftType enum members are uppercase; use DELORA
self.peft_type = PeftType.DELORA
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
# if target_modules is a regex expression, then layers_to_transform should be None
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.")
# check for layers_to_transform and layers_pattern
if self.layers_pattern and not self.layers_to_transform:
raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/delora/config.py",
"license": "Apache License 2.0",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/delora/layer.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import math
import warnings
from typing import Any, Optional
import torch
import torch.nn as nn
from peft.tuners._buffer_dict import BufferDict
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class DeloraLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = (
"delora_A",
"delora_B",
"delora_lambda",
)
# All names of other parameters that may contain adapter-related parameters
other_param_names = (
"r",
"delora_dropout",
"delora_w_norm",
)
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.r = {}
self.delora_dropout = nn.ModuleDict({})
self.delora_A = nn.ParameterDict({})
self.delora_B = nn.ParameterDict({})
self.delora_lambda = nn.ParameterDict({})
# Use persistent buffers so they are included in state_dict and saved.
self.delora_w_norm = BufferDict({}, persistent=True)
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer_mod = self.get_base_layer()
if isinstance(base_layer_mod, nn.Linear):
self.in_features, self.out_features = base_layer_mod.in_features, base_layer_mod.out_features
else:
raise ValueError(f"Unsupported layer type {type(base_layer_mod)}")
@staticmethod
def _compute_delta(
A: torch.Tensor, B: torch.Tensor, delora_lambda: torch.Tensor, r: int, w_norm: torch.Tensor
) -> torch.Tensor:
"""Compute delta = B @ diag(delora_lambda/r / (||A_i||*||B^j||)) @ A, scaled by provided w_norm (per-input channel)"""
An = torch.clamp(A.norm(dim=1), min=1e-4)
Bn = torch.clamp(B.norm(dim=0), min=1e-4)
diag = torch.diag_embed(delora_lambda / r / (An * Bn))
delta = B @ diag @ A
delta = delta * w_norm.unsqueeze(0)
return delta
def get_delta_weight(self, adapter: str) -> torch.Tensor:
if adapter not in self.delora_A or adapter not in self.delora_B:
raise ValueError(f"Adapter {adapter} not found.")
delta = self._compute_delta(
self.delora_A[adapter],
self.delora_B[adapter],
self.delora_lambda[adapter],
self.r[adapter],
self.delora_w_norm[adapter],
)
return delta
def update_layer(
self,
adapter_name: str,
r: int,
delora_lambda: float,
module_dropout: float,
init_weights: bool = True,
inference_mode: bool = False,
**kwargs: Any,
) -> None:
"""Internal function to create delora adapter
Args:
adapter_name (`str`): Name for the adapter to add.
r (`int`): Rank for the added adapter.
delora_lambda (`float`): Boundary for the adapter's norm.
module_dropout (`float`): The dropout probability for disabling adapter during training.
init_weights (`bool`): Whether to initialize weights.
"""
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
self.delora_A[adapter_name] = nn.Parameter(torch.empty(r, self.in_features))
self.delora_B[adapter_name] = nn.Parameter(torch.empty(self.out_features, r))
self.delora_lambda[adapter_name] = nn.Parameter(torch.empty(1))
if module_dropout > 0.0:
module_dropout_layer = nn.Dropout(p=module_dropout)
else:
module_dropout_layer = nn.Identity()
self.delora_dropout.update(nn.ModuleDict({adapter_name: module_dropout_layer}))
# Initialize weights
self.reset_delora_parameters(adapter_name, init_weights, delora_lambda)
# Move new weights to device
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters, inference_mode=inference_mode)
def reset_delora_parameters(
self,
adapter_name: str,
init_weights: bool = True,
delora_lambda: float = 15.0,
) -> None:
if adapter_name not in self.delora_A.keys():
return
if init_weights is True:
nn.init.kaiming_uniform_(self.delora_A[adapter_name], a=math.sqrt(5))
nn.init.zeros_(self.delora_B[adapter_name])
else:
nn.init.kaiming_uniform_(self.delora_A[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.delora_B[adapter_name], a=math.sqrt(5))
self.delora_lambda[adapter_name].data.fill_(float(delora_lambda))
# capture a fixed norm for this adapter to use for future delta computations
with torch.no_grad():
w = self.get_base_layer().weight
if w.device.type != "meta":
w_norm = torch.norm(w.data, dim=0).detach()
else:
# For meta tensors, we can't compute the norm, so use a default value
w_norm = torch.ones(w.shape[1], device=w.device)
self.delora_w_norm[adapter_name] = w_norm
class DeloraLinear(nn.Module, DeloraLayer):
# DeLoRA implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
r: int,
delora_lambda: float,
module_dropout: float,
init_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
DeloraLayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, delora_lambda, module_dropout, init_weights)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
return
for active_adapter in adapter_names:
if active_adapter in self.delora_A.keys():
base_layer = self.get_base_layer()
delta_weight = (
self.get_delta_weight(active_adapter)
.detach()
.to(dtype=base_layer.weight.dtype, device=base_layer.weight.device)
)
with torch.no_grad():
if safe_merge:
orig_weights = base_layer.weight.data.clone()
orig_weights = orig_weights + delta_weight
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
base_layer.weight.data.add_(delta_weight)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
Unmerge all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.delora_A.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
if not self.active_adapters:
return self.base_layer(x, *args, **kwargs).to(previous_dtype)
base_out = self.base_layer(x, *args, **kwargs)
add_out = torch.zeros_like(base_out)
for adapter in self.active_adapters:
if adapter not in self.delora_A:
continue
x_d = self.delora_dropout[adapter](x)
# Decomposed delta calculation
# 1. (x * w_norm) @ A.T
h = nn.functional.linear(x_d * self.delora_w_norm[adapter], self.delora_A[adapter])
# 2. h @ diag
An = torch.clamp(self.delora_A[adapter].norm(dim=1), min=1e-4)
Bn = torch.clamp(self.delora_B[adapter].norm(dim=0), min=1e-4)
scaling = (self.delora_lambda[adapter] / self.r[adapter]) / (An * Bn)
h = h * scaling
# 3. h @ B.T
h = nn.functional.linear(h, self.delora_B[adapter])
add_out += h
result = base_out + add_out.to(base_out.dtype)
result = result.to(previous_dtype)
return result
def supports_lora_conversion(self, adapter_name: str = "default") -> bool:
return True
def __repr__(self) -> str:
rep = super().__repr__()
return "delora." + rep
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/delora/layer.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/delora/model.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import torch
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer
from peft.utils import (
TRANSFORMERS_MODELS_TO_DELORA_TARGET_MODULES_MAPPING,
)
from peft.utils.other import get_pattern_key
from .config import DeloraConfig
from .layer import DeloraLayer, DeloraLinear
class DeloraModel(BaseTuner):
"""
Creates DeLoRA model from a pretrained transformers model.
The method is described in detail in [TODO].
Args:
model ([`torch.nn.Module`]): The model to be adapted.
config ([`DeloraConfig`]): The configuration of the DeLoRA model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
Returns:
`torch.nn.Module`: The DeLoRA model.
**Attributes**:
- **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`DeloraConfig`]): The configuration of the DeLoRA model.
"""
prefix: str = "delora_"
tuner_layer_cls = DeloraLayer
target_module_mapping = TRANSFORMERS_MODELS_TO_DELORA_TARGET_MODULES_MAPPING
def _check_new_adapter_config(self, config: DeloraConfig) -> None:
"""
A helper method to check the config when a new adapter is being added.
Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
"""
super()._check_new_adapter_config(config)
def _create_and_replace(
self,
delora_config,
adapter_name,
target,
target_name,
parent,
current_key,
**optional_kwargs,
):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
# Regexp matching - Find key which matches current target_name in patterns provided
r_key = get_pattern_key(delora_config.rank_pattern.keys(), current_key)
lambda_key = get_pattern_key(delora_config.lambda_pattern.keys(), current_key)
r = delora_config.rank_pattern.get(r_key, delora_config.r)
delora_lambda = delora_config.lambda_pattern.get(lambda_key, delora_config.delora_lambda)
kwargs = {
"r": r,
"delora_lambda": delora_lambda,
"module_dropout": delora_config.module_dropout,
"init_weights": delora_config.init_weights,
}
if isinstance(target, DeloraLinear):
target.update_layer(adapter_name, **kwargs)
else:
new_module = self._create_new_module(delora_config, adapter_name, target, **kwargs)
if adapter_name != self.active_adapter:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _create_new_module(delora_config, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
new_module = DeloraLinear(target, adapter_name, **kwargs)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/delora/model.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/waveft_finetuning/waveft_finetuning.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
import torch
import transformers
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
from peft import (
WaveFTConfig,
get_peft_model,
)
def train(
base_model: str,
data_path: str = "yahma/alpaca-cleaned",
output_dir: str = "waveft",
batch_size: int = 16,
num_epochs: int = 1,
learning_rate: float = 3e-4,
cutoff_len: int = 256,
val_set_size: int = 16,
eval_step: int = 100,
save_step: int = 100,
device_map: str = "auto",
waveft_n_frequency: int = 2592,
waveft_target_modules: list[str] = None,
waveft_scaling: float = 25.0,
waveft_wavelet_family: str = "db1",
waveft_use_idwt: bool = True,
dtype: str = "float16",
seed: Optional[int] = None,
):
# Set device_map to the right place when enabling DDP.
world_size = int(os.environ.get("WORLD_SIZE", 0)) or int(os.environ.get("PMI_SIZE", 0))
if world_size > 1 and device_map != "cpu":
from accelerate import Accelerator
device_map = {"": Accelerator().process_index}
# Set seed
if seed is not None:
set_seed(seed)
model_kwargs = {"dtype": getattr(torch, dtype), "device_map": device_map}
model = AutoModelForCausalLM.from_pretrained(base_model, **model_kwargs)
tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
# For some tokenizer with no pad token like llama
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
def tokenize(prompt, add_eos_token=True):
result = tokenizer(
prompt,
truncation=True,
max_length=cutoff_len,
padding=False,
return_tensors=None,
)
if (
result["input_ids"][-1] != tokenizer.eos_token_id
and len(result["input_ids"]) < cutoff_len
and add_eos_token
):
result["input_ids"].append(tokenizer.eos_token_id)
result["attention_mask"].append(1)
result["labels"] = result["input_ids"].copy()
return result
def generate_and_tokenize_prompt(example):
full_prompt = generate_prompt(example)
tokenized_full_prompt = tokenize(full_prompt)
return tokenized_full_prompt
config = WaveFTConfig(
n_frequency=waveft_n_frequency,
scaling=waveft_scaling,
wavelet_family=waveft_wavelet_family,
use_idwt=waveft_use_idwt,
target_modules=waveft_target_modules,
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset(data_path)
train_val = data["train"].train_test_split(test_size=val_set_size, shuffle=True, seed=42)
train_data = train_val["train"].shuffle().map(generate_and_tokenize_prompt)
val_data = train_val["test"].shuffle().map(generate_and_tokenize_prompt)
trainer = transformers.Trainer(
model=model,
train_dataset=train_data,
eval_dataset=val_data,
args=transformers.TrainingArguments(
per_device_train_batch_size=batch_size,
warmup_steps=100,
num_train_epochs=num_epochs,
learning_rate=learning_rate,
logging_steps=100,
optim="adamw_torch",
eval_strategy="steps",
save_strategy="steps",
eval_steps=eval_step,
save_steps=save_step,
output_dir=output_dir,
save_total_limit=3,
load_best_model_at_end=True,
ddp_find_unused_parameters=False if world_size > 1 else None,
),
data_collator=transformers.DataCollatorForSeq2Seq(
tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
),
)
trainer.train()
model.save_pretrained(output_dir)
def generate_prompt(example):
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
{example["instruction"]}
### Response:
{example["output"]}"""
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--base_model", type=str)
parser.add_argument("--data_path", type=str, default="yahma/alpaca-cleaned")
parser.add_argument("--output_dir", type=str, default="waveft")
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--num_epochs", type=int, default=1)
parser.add_argument("--learning_rate", type=float, default=3e-4)
parser.add_argument("--cutoff_len", type=int, default=256)
parser.add_argument("--val_set_size", type=int, default=16)
parser.add_argument("--eval_step", type=int, default=100)
parser.add_argument("--save_step", type=int, default=100)
parser.add_argument("--device_map", type=str, default="auto")
parser.add_argument("--waveft_n_frequency", type=int, default=2592)
parser.add_argument("--waveft_target_modules", type=str, default=None)
parser.add_argument("--waveft_scaling", type=float, default=25.0)
parser.add_argument("--waveft_wavelet_family", type=str, default="db1")
parser.add_argument("--waveft_use_idwt", action="store_true", default=True)
parser.add_argument("--dtype", type=str, default="float16")
parser.add_argument("--seed", type=int, default=None)
args = parser.parse_args()
train(
base_model=args.base_model,
data_path=args.data_path,
output_dir=args.output_dir,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
learning_rate=args.learning_rate,
cutoff_len=args.cutoff_len,
val_set_size=args.val_set_size,
eval_step=args.eval_step,
save_step=args.save_step,
device_map=args.device_map,
waveft_n_frequency=args.waveft_n_frequency,
waveft_target_modules=args.waveft_target_modules,
waveft_scaling=args.waveft_scaling,
waveft_wavelet_family=args.waveft_wavelet_family,
waveft_use_idwt=args.waveft_use_idwt,
dtype=args.dtype,
seed=args.seed,
)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/waveft_finetuning/waveft_finetuning.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/waveft/config.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
from .constants import WAVELET_REDUCTIONS
@dataclass
class WaveFTConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`WaveFTModel`]. It is used to define the
parameters for Wavelet-based Fine-Tuning (WaveFT), an approach that leverages the sparsity of wavelet transforms
for parameter-efficient fine-tuning of pretrained models.
Args:
n_frequency (`int`):
Number of learnable wavelet coefficients for the Discrete Wavelet Transform (DWT). 'n_frequency' is an
integer that is greater than 0 and less than or equal to the total number of elements in the original
weight matrix (d_out * d_in). This parameter directly controls the number of trainable parameters for each
adapted layer. A higher 'n_frequency' generally leads to better performance but also increases GPU memory
usage, with a minor impact on training speed.
scaling (`float`):
The scaling factor applied to the reconstructed delta W matrix. This is a crucial hyperparameter, analogous
to `lora_alpha` in LoRA. It can be tuned during hyperparameter search. Our default value for SDXL
personalization is 25.
wavelet_family (`str`):
The wavelet family (e.g., 'db1', 'sym2', 'coif1') to use for the DWT and Inverse DWT (IDWT). Defaults to
'db1' (Haar wavelet). Different wavelet families have varying filter lengths which affect the training time
substantially
use_idwt (`bool`):
Set to False for efficient adaptation. Whether to use the Inverse Discrete Wavelet Transform (IDWT) to
reconstruct the delta weights from the learned wavelet coefficients. If `True` (default), the IDWT is
applied. If `False`, the learned coefficients are directly used to form a sparse delta weight matrix, which
is faster but performs worse for the SDXL personalization task.
random_loc_seed (`int`):
Seed for determining the random locations of the `n_frequency` learnable wavelet coefficients within the
full wavelet coefficient matrix.
target_modules (`Union[list[str],str]`):
List of module names or a regex expression identifying the modules to be adapted with WaveFT. For example,
`['q_proj', 'v_proj']` or `'.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'`. Currently, only linear
layers (`torch.nn.Linear`) are supported.
exclude_modules (`Optional[Union[List[str], str]]`):
List of module names or a regex expression for modules to exclude from WaveFT adaptation.
fan_in_fan_out (`bool`):
Set to `True` if the weights of the layer to be replaced are stored in `(fan_in, fan_out)` format. Default
is `False`.
bias (`str`):
Bias type for WaveFT. Can be 'none', 'all', or 'waveft_only'. ('fourier_only' was likely a typo and has
been corrected to 'waveft_only' if it implies bias only on adapted parameters) If 'waveft_only', biases are
added only to the WaveFT components. If 'all', biases are added to both base and WaveFT components. If
'none', no new biases are added.
modules_to_save (`list[str]`):
List of modules, in addition to WaveFT layers, that should be marked as trainable and saved in the final
checkpoint. Useful for layers like classifiers in sequence or token classification tasks that are randomly
initialized and need training.
layers_to_transform (`Union[list[int],int]`):
Specific layer indices to transform. If provided, PEFT will only adapt layers at these indices. If a single
integer is given, only that layer is transformed.
layers_pattern (`Optional[Union[List[str], str]]`):
Pattern for layer names, used if `layers_to_transform` is specified and the layer pattern is not standard
(e.g., not 'layers' or 'h'). This should target the `nn.ModuleList` attribute in the model.
n_frequency_pattern (`dict`):
A dictionary mapping layer names (or regex) to specific `n_frequency` values, overriding the global
`n_frequency`. Example: `{"model.decoder.layers.0.encoder_attn.k_proj": 1000}`.
init_weights (`bool`):
Initialization strategy for the learnable wavelet coefficients (spectrum). If `True` (default),
coefficients are initialized to zeros. If `False`, coefficients are initialized from a standard normal
distribution scaled by a small factor.
proportional_parameters (`bool`):
If `True`, `n_frequency` is allocated proportionally to each layer's `input_dim * output_dim`. Default is
`False`. Note: This option is included for experimental thoroughness to allow researchers to reproduce
paper results, rather than for practical utility, as no beneficial scenarios have been identified.
"""
n_frequency: int = field(
default=2592, # Default value might need adjustment based on common use cases or paper findings
metadata={
"help": (
"Number of learnable wavelet coefficients for the Discrete Wavelet Transform (DWT). "
"'n_frequency' is an integer that is greater than 0 and less than or equal to the "
"total number of elements in the original weight matrix (d_out * d_in). "
"This parameter directly controls the number of trainable parameters for each adapted layer. "
"A higher 'n_frequency' generally leads to better performance but also increases "
"GPU memory usage, with a minor impact on training speed."
)
},
)
scaling: float = field(
default=25.0, # Default value seems low based on typical examples, might need adjustment
metadata={
"help": (
"The scaling factor applied to the reconstructed delta W matrix. This is a crucial "
"hyperparameter, analogous to 'lora_alpha' in LoRA. It can be tuned during hyperparameter "
"search. Default value for SDXL personalization is 25. "
)
},
)
wavelet_family: str = field(
default="db1",
metadata={
"help": (
"The wavelet family (e.g., 'db1', 'sym2', 'coif1') to use for the DWT and Inverse DWT (IDWT). "
"Defaults to 'db1' (Haar wavelet). Different wavelet families have varying filter lengths "
"which affect the training time substantially. Size differences are handled automatically "
"if use_idwt is True."
)
},
)
use_idwt: bool = field(
default=True,
metadata={
"help": (
"Set to False for efficient adaptation. "
"Whether to use the Inverse Discrete Wavelet Transform (IDWT) to reconstruct the delta "
"weights from the learned wavelet coefficients. If True (default), the IDWT is applied. "
"If False, the learned coefficients are directly used to form a sparse delta weight matrix, "
"which is faster but performs worse for the SDXL personalization task."
)
},
)
random_loc_seed: int = field(
default=777,
metadata={
"help": (
"Seed for determining the random locations of the 'n_frequency' learnable wavelet "
"coefficients within the full wavelet coefficient matrix."
)
},
)
fan_in_fan_out: bool = field(
default=False,
metadata={
"help": (
"Set to True if the weights of the layer to be replaced are stored in (fan_in, fan_out) "
"format. Default is False."
)
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or a regex expression identifying the modules to be adapted with WaveFT. "
"For example, ['q_proj', 'v_proj'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. "
"Currently, only linear layers (torch.nn.Linear) are supported."
)
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "List of module names or regex for modules to exclude from WaveFT adaptation."},
)
bias: str = field(
default="none",
metadata={
"help": (
"Bias type for WaveFT. Can be 'none', 'all', or 'waveft_only'. "
"If 'waveft_only', biases are added only to the WaveFT components. "
"If 'all', biases are added to both base and WaveFT components. "
"If 'none', no new biases are added."
)
},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"List of modules, in addition to WaveFT layers, that should be marked as trainable "
"and saved in the final checkpoint. Useful for layers like classifiers in sequence "
"or token classification tasks that are randomly initialized and need training."
)
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": (
"Specific layer indices to transform. If provided, PEFT will only adapt layers at these "
"indices. If a single integer is given, only that layer is transformed."
)
},
)
layers_pattern: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"Pattern for layer names, used if `layers_to_transform` is specified and the layer "
"pattern is not standard (e.g., not 'layers' or 'h'). This should target the "
"`nn.ModuleList` attribute in the model."
)
},
)
n_frequency_pattern: Optional[dict] = field(
default_factory=dict,
metadata={
"help": (
"A dictionary mapping layer names (or regex) to specific `n_frequency` values, "
'overriding the global `n_frequency`. Example: {"model.decoder.layers.0.encoder_attn.k_proj": 1000}.'
)
},
)
proportional_parameters: bool = field(
default=False,
metadata={
"help": (
"If True, 'n_frequency' is allocated proportionally to each layer's "
"input_dim * output_dim. Default is False. Note: This option is included "
"for experimental thoroughness to allow researchers to reproduce paper results, "
"rather than for practical utility, as no beneficial scenarios have been identified."
)
},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Initialization strategy for the learnable wavelet coefficients (spectrum). "
"If True (default), coefficients are initialized to zeros. "
"If False, coefficients are initialized from a standard normal distribution scaled by a small factor."
)
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.WAVEFT
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
self.exclude_modules = (
set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules
)
# if target_modules is a regex expression, then layers_to_transform should be None
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.")
# if target_modules is a regex expression, then layers_pattern should be None
if isinstance(self.target_modules, str) and self.layers_pattern is not None:
raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.")
# check for layers_to_transform and layers_pattern
if self.layers_pattern and not self.layers_to_transform:
raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ")
if self.wavelet_family not in WAVELET_REDUCTIONS:
raise ValueError(
f"Wavelet family {self.wavelet_family} not supported. Supported wavelet families are: {list(WAVELET_REDUCTIONS.keys())}"
)
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/waveft/config.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/waveft/constants.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dimensional reduction amounts for different wavelet families during wavelet transforms Each tuple (rows, cols)
represents the reduction in matrix dimensions that occurs when applying wavelet decomposition/reconstruction due to
boundary effects and filter sizes. These values are used to pre-pad matrices before wavelet processing to ensure the
reconstructed matrix maintains the original target dimensions.
"""
WAVELET_REDUCTIONS = {
"db1": (0, 0),
"db2": (2, 2),
"db3": (4, 4),
"db4": (6, 6),
"db5": (8, 8),
"db6": (10, 10),
"db7": (12, 12),
"db8": (14, 14),
"db9": (16, 16),
"db10": (18, 18),
"db11": (20, 20),
"db12": (22, 22),
"db13": (24, 24),
"db14": (26, 26),
"db15": (28, 28),
"db16": (30, 30),
"db17": (32, 32),
"db18": (34, 34),
"db19": (36, 36),
"db20": (38, 38),
"db21": (40, 40),
"db22": (42, 42),
"db23": (44, 44),
"db24": (46, 46),
"db25": (48, 48),
"db26": (50, 50),
"db27": (52, 52),
"db28": (54, 54),
"db29": (56, 56),
"db30": (58, 58),
"db31": (60, 60),
"db32": (62, 62),
"db33": (64, 64),
"db34": (66, 66),
"db35": (68, 68),
"db36": (70, 70),
"db37": (72, 72),
"db38": (74, 74),
"sym2": (2, 2),
"sym3": (4, 4),
"sym4": (6, 6),
"sym5": (8, 8),
"sym6": (10, 10),
"sym7": (12, 12),
"sym8": (14, 14),
"sym9": (16, 16),
"sym10": (18, 18),
"sym11": (20, 20),
"sym12": (22, 22),
"sym13": (24, 24),
"sym14": (26, 26),
"sym15": (28, 28),
"sym16": (30, 30),
"sym17": (32, 32),
"sym18": (34, 34),
"sym19": (36, 36),
"sym20": (38, 38),
"coif1": (4, 4),
"coif2": (10, 10),
"coif3": (16, 16),
"coif4": (22, 22),
"coif5": (28, 28),
"coif6": (34, 34),
"coif7": (40, 40),
"coif8": (46, 46),
"coif9": (52, 52),
"coif10": (58, 58),
"coif11": (64, 64),
"coif12": (70, 70),
"coif13": (76, 76),
"coif14": (82, 82),
"coif15": (88, 88),
"coif16": (94, 94),
"coif17": (100, 100),
}
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/waveft/constants.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/waveft/layer.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.other import transpose
from .constants import WAVELET_REDUCTIONS
from .waverec2d import waverec2d
class WaveFTLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("waveft_spectrum",)
# All names of other parameters that may contain adapter-related parameters
other_param_names = (
"waveft_n_frequency",
"waveft_scaling",
"waveft_random_loc_seed",
"waveft_wavelet_family",
"waveft_indices",
"waveft_use_idwt",
)
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.waveft_n_frequency = {}
self.waveft_scaling = {}
self.waveft_spectrum = nn.ParameterDict({})
self.waveft_wavelet_family = {}
self.waveft_indices = {}
self.waveft_random_loc_seed = {}
self.waveft_use_idwt = {}
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.in_features, self.out_features = base_layer.in_features, base_layer.out_features
elif isinstance(base_layer, Conv1D):
self.in_features, self.out_features = (
base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape
)
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
def update_layer(
self, adapter_name, n_frequency, scaling, init_weights, random_loc_seed, wavelet_family="db1", use_idwt=True
):
if n_frequency <= 0:
raise ValueError(f"`n_frequency` should be a positive integer value but the value passed is {n_frequency}")
if n_frequency > self.in_features * self.out_features:
raise ValueError(
f"`n_frequency` should be less than or equal to the product of the input and output dimensions "
f"but the value passed is {n_frequency} and the product is {self.in_features * self.out_features}"
)
self.waveft_n_frequency[adapter_name] = n_frequency
self.waveft_random_loc_seed[adapter_name] = random_loc_seed
self.waveft_wavelet_family[adapter_name] = wavelet_family
self.waveft_use_idwt[adapter_name] = use_idwt
# Get the expanded dimensions based on wavelet family
reduction_rows, reduction_cols = WAVELET_REDUCTIONS[wavelet_family]
# Generate random indices within the original dimensions
# We handle padding separately in get_delta_weight
generator = torch.Generator().manual_seed(self.waveft_random_loc_seed[adapter_name])
indices = torch.randperm(self.out_features * self.in_features, generator=generator)[:n_frequency]
# Convert to row, col format for the original dimensions
self.waveft_indices[adapter_name] = torch.stack(
[indices // self.in_features, indices % self.in_features], dim=0
)
self.waveft_scaling[adapter_name] = scaling
# Actual trainable parameters
# Initialize based on init_weights
if init_weights:
# Initialize with zeros later using reset_wave_parameters
self.waveft_spectrum[adapter_name] = nn.Parameter(torch.empty(n_frequency), requires_grad=True)
self.reset_wave_parameters(adapter_name) # Initialize to zeros now
else:
# Initialize with randn scaled by a small std dev to prevent explosion
std_dev = 0.01 # Using a small std dev for initial random weights
self.waveft_spectrum[adapter_name] = nn.Parameter(torch.randn(n_frequency) * std_dev, requires_grad=True)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters)
@torch.no_grad()
def reset_wave_parameters(self, adapter_name):
if adapter_name in self.waveft_spectrum.keys():
nn.init.zeros_(self.waveft_spectrum[adapter_name])
def get_delta_weight(self, adapter) -> torch.Tensor:
spectrum = self.waveft_spectrum[adapter]
indices = self.waveft_indices[adapter].to(spectrum.device)
wavelet_family = self.waveft_wavelet_family[adapter]
# Choose whether to use IDWT or direct spectrum based on adapter setting
if self.waveft_use_idwt[adapter]:
reduction_rows, reduction_cols = WAVELET_REDUCTIONS[wavelet_family]
# Create a padded spectrum matrix with additional rows and columns
# to account for the reduction during wavelet reconstruction
padded_out_features = self.out_features + reduction_rows
padded_in_features = self.in_features + reduction_cols
# Make dimensions even if needed for wavelet processing
if padded_out_features % 2 != 0:
padded_out_features += 1
if padded_in_features % 2 != 0:
padded_in_features += 1
# Create the padded dense spectrum matrix
dense_spectrum = torch.zeros(
padded_out_features, padded_in_features, device=spectrum.device, dtype=spectrum.dtype
)
# Calculate padding offsets to center the original data in the padded matrix
row_offset = (padded_out_features - self.out_features) // 2
col_offset = (padded_in_features - self.in_features) // 2
# Adjust indices to account for padding offsets
padded_indices = indices.clone()
padded_indices[0, :] += row_offset
padded_indices[1, :] += col_offset
# Place spectrum values in the padded matrix
# Filter out any indices that would be out of bounds
valid_mask = (padded_indices[0, :] < padded_out_features) & (padded_indices[1, :] < padded_in_features)
valid_indices = padded_indices[:, valid_mask]
valid_spectrum = spectrum[valid_mask]
# Set the spectrum values in the padded matrix
dense_spectrum[valid_indices[0, :], valid_indices[1, :]] = valid_spectrum
# Split into four sub-bands
H, W = dense_spectrum.shape
H2, W2 = H // 2, W // 2
cA = dense_spectrum[:H2, :W2] # top-left
cH = dense_spectrum[:H2, W2:] # top-right
cV = dense_spectrum[H2:, :W2] # bottom-left
cD = dense_spectrum[H2:, W2:] # bottom-right
# Construct wavelet-coefficient tuple
coeffs = (cA, (cH, cV, cD))
# Reconstruct with the specified wavelet family
delta_weight = waverec2d(coeffs, wavelet_family) * self.waveft_scaling[adapter]
# Ensure the delta weight has exactly the correct dimensions
if delta_weight.shape[0] != self.out_features or delta_weight.shape[1] != self.in_features:
# Calculate where to start slicing to get a centered crop
start_row = (delta_weight.shape[0] - self.out_features) // 2
start_col = (delta_weight.shape[1] - self.in_features) // 2
# Slice to the exact output size needed
delta_weight = delta_weight[
start_row : start_row + self.out_features, start_col : start_col + self.in_features
]
else:
# Simple direct use of spectrum without IDWT
dense_spectrum = torch.zeros(
self.out_features, self.in_features, device=spectrum.device, dtype=spectrum.dtype
)
dense_spectrum[indices[0, :], indices[1, :]] = spectrum
delta_weight = dense_spectrum * self.waveft_scaling[adapter]
return delta_weight
class WaveFTLinear(nn.Module, WaveFTLayer):
# WaveFT implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
n_frequency: int = 1000,
scaling: float = 150.0,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
init_weights: Union[bool, str] = False,
random_loc_seed: int = 777,
wavelet_family: str = "db1",
use_idwt: bool = True,
**kwargs,
) -> None:
super().__init__()
WaveFTLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
self._active_adapter = adapter_name
self.update_layer(adapter_name, n_frequency, scaling, init_weights, random_loc_seed, wavelet_family, use_idwt)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.waveft_spectrum.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
orig_weights += transpose(self.get_delta_weight(active_adapter), self.fan_in_fan_out)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += transpose(self.get_delta_weight(active_adapter), self.fan_in_fan_out)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.waveft_spectrum.keys():
self.get_base_layer().weight.data -= transpose(
self.get_delta_weight(active_adapter), self.fan_in_fan_out
)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.waveft_spectrum.keys():
continue
delta_w = self.get_delta_weight(active_adapter)
x = self._cast_input_dtype(x, delta_w.dtype)
result = result + F.linear(x, delta_w)
result = result.to(previous_dtype)
return result
def supports_lora_conversion(self, adapter_name: str = "default") -> bool:
if isinstance(self.get_base_layer(), Conv1D):
# get_delta_weight does not transpose Conv1D because it is used in forward, therefore, it has the wrong
# shape for conversion
return False
return True
def __repr__(self) -> str:
rep = super().__repr__()
return "waveft." + rep
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/waveft/layer.py",
"license": "Apache License 2.0",
"lines": 252,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/waveft/model.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
import torch
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import (
TRANSFORMERS_MODELS_TO_WAVEFT_TARGET_MODULES_MAPPING,
)
from peft.utils.other import get_pattern_key
from .layer import WaveFTLayer, WaveFTLinear
class WaveFTModel(BaseTuner):
prefix: str = "waveft_"
tuner_layer_cls: type[BaseTunerLayer] = WaveFTLayer
target_module_mapping = TRANSFORMERS_MODELS_TO_WAVEFT_TARGET_MODULES_MAPPING
def _calculate_proportional_parameters(self, model: torch.nn.Module, waveft_config):
"""Calculate proportional parameter allocation for all target modules."""
target_modules_info = []
for name, module in model.named_modules():
if check_target_module_exists(waveft_config, name):
# Handle case where module is already wrapped with WaveFT
if isinstance(module, WaveFTLayer):
# Use the base layer for dimension calculations
base_module = module.base_layer
if isinstance(base_module, torch.nn.Linear):
input_dim, output_dim = base_module.in_features, base_module.out_features
elif isinstance(base_module, Conv1D):
input_dim, output_dim = base_module.weight.shape[1], base_module.weight.shape[0]
else:
continue
elif isinstance(module, torch.nn.Linear):
input_dim, output_dim = module.in_features, module.out_features
elif isinstance(module, Conv1D):
input_dim, output_dim = module.weight.shape[1], module.weight.shape[0]
else:
continue
target_modules_info.append((name, input_dim, output_dim))
if not target_modules_info:
raise ValueError("No target modules found for proportional parameter allocation.")
total_sum = sum(input_dim * output_dim for (_, input_dim, output_dim) in target_modules_info)
num_layers = len(target_modules_info)
total_budget = waveft_config.n_frequency * num_layers
n_frequency_dict = {}
for name, input_dim, output_dim in target_modules_info:
layer_ratio = (input_dim * output_dim) / total_sum
n_freq = round(layer_ratio * total_budget)
n_frequency_dict[name] = n_freq
return n_frequency_dict
def _create_and_replace(
self,
waveft_config,
adapter_name,
target,
target_name,
parent,
current_key,
**optional_kwargs,
):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
# Calculate proportional parameters if needed (only once per adapter)
if waveft_config.proportional_parameters:
if not hasattr(self, "_proportional_params_cache"):
self._proportional_params_cache = {}
if adapter_name not in self._proportional_params_cache:
n_frequency_dict = self._calculate_proportional_parameters(self.model, waveft_config)
self._proportional_params_cache[adapter_name] = n_frequency_dict
# Determine n_frequency: Priority order:
# 1. From proportional parameter cache (if proportional_parameters=True)
# 2. From optional_kwargs (if passed directly)
# 3. From n_frequency_pattern in config
# 4. From default n_frequency in config
n_frequency = None
if (
waveft_config.proportional_parameters
and hasattr(self, "_proportional_params_cache")
and adapter_name in self._proportional_params_cache
):
n_frequency = self._proportional_params_cache[adapter_name].get(current_key)
if n_frequency is None and "n_frequency" in optional_kwargs:
n_frequency = optional_kwargs["n_frequency"]
if n_frequency is None:
pattern_keys = list(waveft_config.n_frequency_pattern.keys())
target_name_key = get_pattern_key(pattern_keys, current_key)
n_frequency = waveft_config.n_frequency_pattern.get(target_name_key, waveft_config.n_frequency)
# Determine wavelet_family
wavelet_family = None
if "wavelet_family" in optional_kwargs:
wavelet_family = optional_kwargs["wavelet_family"]
if wavelet_family is None:
wavelet_family = waveft_config.wavelet_family
scaling = waveft_config.scaling
random_loc_seed = waveft_config.random_loc_seed
bias = hasattr(target, "bias") and target.bias is not None
# Prepare kwargs for module creation/update
kwargs = {
"n_frequency": n_frequency,
"scaling": scaling,
"fan_in_fan_out": waveft_config.fan_in_fan_out,
"init_weights": waveft_config.init_weights,
"random_loc_seed": waveft_config.random_loc_seed,
"wavelet_family": wavelet_family, # Use determined wavelet family
}
kwargs["bias"] = bias
if isinstance(target, WaveFTLayer):
target.update_layer(
adapter_name,
n_frequency,
scaling,
waveft_config.init_weights,
random_loc_seed,
wavelet_family=wavelet_family, # Pass determined wavelet family
use_idwt=waveft_config.use_idwt,
)
else:
new_module = self._create_new_module(waveft_config, adapter_name, target, **kwargs)
if adapter_name != self.active_adapter:
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _create_new_module(waveft_config, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = waveft_config.fan_in_fan_out = False
elif isinstance(target_base_layer, Conv1D):
kwargs["is_target_conv_1d_layer"] = True
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = waveft_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. Currently, only the following modules are supported: "
"`torch.nn.Linear`."
)
kwargs["wavelet_family"] = waveft_config.wavelet_family
kwargs["use_idwt"] = waveft_config.use_idwt
new_module = WaveFTLinear(target, adapter_name, **kwargs)
return new_module
def delete_adapter(self, adapter_name: str) -> None:
"""
Deletes an existing adapter.
Args:
adapter_name (str): Name of the adapter to be deleted.
"""
super().delete_adapter(adapter_name)
# Clean up proportional parameters cache
if hasattr(self, "_proportional_params_cache") and adapter_name in self._proportional_params_cache:
del self._proportional_params_cache[adapter_name]
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/waveft/model.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/waveft/waverec2d.py | # Copyright 2021 Moritz Wolter
# Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the EUPL v1.2
#
# This file contains code derived from PyTorch-Wavelet-Toolbox:
# https://github.com/v0lta/PyTorch-Wavelet-Toolbox
#
# Original work by Moritz Wolter, licensed under EUPL v1.2
# Modifications and integration by HuggingFace Inc. team
from collections.abc import Callable, Sequence
from functools import partial
from typing import Any, NamedTuple, Protocol, TypeAlias, Union, cast, overload
import numpy as np
import torch
from typing_extensions import Unpack
from .wavelet import Wavelet as minimal_wavelet
class WaveletDetailTuple2d(NamedTuple):
horizontal: torch.Tensor
vertical: torch.Tensor
diagonal: torch.Tensor
WaveletCoeff2d: TypeAlias = tuple[torch.Tensor, Unpack[tuple[WaveletDetailTuple2d, ...]]]
WaveletDetailDict: TypeAlias = dict[str, torch.Tensor]
WaveletCoeffNd: TypeAlias = tuple[torch.Tensor, Unpack[tuple[WaveletDetailDict, ...]]]
class Wavelet(Protocol):
name: str
dec_lo: Sequence[float]
dec_hi: Sequence[float]
rec_lo: Sequence[float]
rec_hi: Sequence[float]
dec_len: int
rec_len: int
filter_bank: tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float]]
def __len__(self) -> int:
return len(self.dec_lo)
class WaveletTensorTuple(NamedTuple):
dec_lo: torch.Tensor
dec_hi: torch.Tensor
rec_lo: torch.Tensor
rec_hi: torch.Tensor
@classmethod
def from_wavelet(cls, wavelet: Wavelet, dtype: torch.dtype) -> "WaveletTensorTuple":
return cls(
torch.tensor(wavelet.dec_lo, dtype=dtype),
torch.tensor(wavelet.dec_hi, dtype=dtype),
torch.tensor(wavelet.rec_lo, dtype=dtype),
torch.tensor(wavelet.rec_hi, dtype=dtype),
)
def _as_wavelet(wavelet: Union[Wavelet, str]) -> Wavelet:
if isinstance(wavelet, str):
return minimal_wavelet(wavelet)
else:
return wavelet
def _is_dtype_supported(dtype: torch.dtype) -> bool:
return dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]
def _outer(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
a_flat = torch.reshape(a, [-1])
b_flat = torch.reshape(b, [-1])
a_mul = torch.unsqueeze(a_flat, dim=-1)
b_mul = torch.unsqueeze(b_flat, dim=0)
return a_mul * b_mul
def _check_if_tensor(array: Any) -> torch.Tensor:
if not isinstance(array, torch.Tensor):
raise ValueError("First element of coeffs must be the approximation coefficient tensor.")
return array
def _check_axes_argument(axes: Sequence[int]) -> None:
if len(set(axes)) != len(axes):
raise ValueError("Cant transform the same axis twice.")
def _check_same_device(tensor: torch.Tensor, torch_device: torch.device) -> torch.Tensor:
if torch_device != tensor.device:
raise ValueError("coefficients must be on the same device")
return tensor
def _check_same_dtype(tensor: torch.Tensor, torch_dtype: torch.dtype) -> torch.Tensor:
if torch_dtype != tensor.dtype:
raise ValueError("coefficients must have the same dtype")
return tensor
@overload
def _coeff_tree_map(
coeffs: list[torch.Tensor], function: Callable[[torch.Tensor], torch.Tensor]
) -> list[torch.Tensor]: ...
@overload
def _coeff_tree_map(coeffs: WaveletCoeff2d, function: Callable[[torch.Tensor], torch.Tensor]) -> WaveletCoeff2d: ...
@overload
def _coeff_tree_map(coeffs: WaveletCoeffNd, function: Callable[[torch.Tensor], torch.Tensor]) -> WaveletCoeffNd: ...
def _coeff_tree_map(coeffs, function):
approx = function(coeffs[0])
result_lst: list[Any] = []
for element in coeffs[1:]:
if isinstance(element, tuple):
result_lst.append(WaveletDetailTuple2d(function(element[0]), function(element[1]), function(element[2])))
elif isinstance(element, dict):
new_dict = {key: function(value) for key, value in element.items()}
result_lst.append(new_dict)
elif isinstance(element, torch.Tensor):
result_lst.append(function(element))
else:
raise ValueError(f"Unexpected input type {type(element)}")
if not result_lst:
return [approx] if isinstance(coeffs, list) else (approx,)
elif isinstance(result_lst[0], torch.Tensor):
return [approx] + cast(list[torch.Tensor], result_lst)
else:
cast_result_lst = cast(Union[list[WaveletDetailDict], list[WaveletDetailTuple2d]], result_lst)
return (approx, *cast_result_lst)
def _check_same_device_dtype(
coeffs: Union[list[torch.Tensor], WaveletCoeff2d, WaveletCoeffNd],
) -> tuple[torch.device, torch.dtype]:
c = _check_if_tensor(coeffs[0])
torch_device, torch_dtype = c.device, c.dtype
_coeff_tree_map(coeffs, partial(_check_same_device, torch_device=torch_device))
_coeff_tree_map(coeffs, partial(_check_same_dtype, torch_dtype=torch_dtype))
return torch_device, torch_dtype
def _get_transpose_order(axes: Sequence[int], data_shape: Sequence[int]) -> tuple[list[int], list[int]]:
axes = [a + len(data_shape) if a < 0 else a for a in axes]
all_axes = list(range(len(data_shape)))
remove_transformed = list(filter(lambda a: a not in axes, all_axes))
return remove_transformed, axes
def _swap_axes(data: torch.Tensor, axes: Sequence[int]) -> torch.Tensor:
_check_axes_argument(axes)
front, back = _get_transpose_order(axes, list(data.shape))
return torch.permute(data, front + back)
def _undo_swap_axes(data: torch.Tensor, axes: Sequence[int]) -> torch.Tensor:
_check_axes_argument(axes)
front, back = _get_transpose_order(axes, list(data.shape))
restore_sorted = torch.argsort(torch.tensor(front + back)).tolist()
return torch.permute(data, restore_sorted)
def _fold_axes(data: torch.Tensor, keep_no: int) -> tuple[torch.Tensor, list[int]]:
dshape = list(data.shape)
return (torch.reshape(data, [int(np.prod(dshape[:-keep_no]))] + dshape[-keep_no:]), dshape)
def _unfold_axes(data: torch.Tensor, ds: list[int], keep_no: int) -> torch.Tensor:
return torch.reshape(data, ds[:-keep_no] + list(data.shape[-keep_no:]))
def _preprocess_coeffs(coeffs, ndim: int, axes, add_channel_dim: bool = False):
if isinstance(axes, int):
axes = (axes,)
torch_dtype = _check_if_tensor(coeffs[0]).dtype
if not _is_dtype_supported(torch_dtype):
raise ValueError(f"Input dtype {torch_dtype} not supported")
if ndim <= 0:
raise ValueError("Number of dimensions must be positive")
if tuple(axes) != tuple(range(-ndim, 0)):
if len(axes) != ndim:
raise ValueError(f"{ndim}D transforms work with {ndim} axes.")
else:
swap_fn = partial(_swap_axes, axes=axes)
coeffs = _coeff_tree_map(coeffs, swap_fn)
ds = list(coeffs[0].shape)
if len(ds) < ndim:
raise ValueError(f"At least {ndim} input dimensions required.")
elif len(ds) == ndim:
coeffs = _coeff_tree_map(coeffs, lambda x: x.unsqueeze(0))
elif len(ds) > ndim + 1:
coeffs = _coeff_tree_map(coeffs, lambda t: _fold_axes(t, ndim)[0])
if add_channel_dim:
coeffs = _coeff_tree_map(coeffs, lambda x: x.unsqueeze(1))
return coeffs, ds
def _postprocess_coeffs(coeffs, ndim: int, ds: list[int], axes):
if isinstance(axes, int):
axes = (axes,)
if ndim <= 0:
raise ValueError("Number of dimensions must be positive")
if len(ds) < ndim:
raise ValueError(f"At least {ndim} input dimensions required.")
elif len(ds) == ndim:
coeffs = _coeff_tree_map(coeffs, lambda x: x.squeeze(0))
elif len(ds) > ndim + 1:
unfold_axes_fn = partial(_unfold_axes, ds=ds, keep_no=ndim)
coeffs = _coeff_tree_map(coeffs, unfold_axes_fn)
if tuple(axes) != tuple(range(-ndim, 0)):
if len(axes) != ndim:
raise ValueError(f"{ndim}D transforms work with {ndim} axes.")
else:
undo_swap_fn = partial(_undo_swap_axes, axes=axes)
coeffs = _coeff_tree_map(coeffs, undo_swap_fn)
return coeffs
def _postprocess_tensor(
data: torch.Tensor, ndim: int, ds: list[int], axes: Union[tuple[int, ...], int]
) -> torch.Tensor:
return _postprocess_coeffs(coeffs=[data], ndim=ndim, ds=ds, axes=axes)[0]
def _get_filter_tensors(
wavelet: Union[Wavelet, str], flip: bool, device: torch.device, dtype: torch.dtype
) -> WaveletTensorTuple:
wavelet = _as_wavelet(wavelet)
if flip:
filters = WaveletTensorTuple(
torch.tensor(wavelet.rec_lo, device=device, dtype=dtype),
torch.tensor(wavelet.rec_hi, device=device, dtype=dtype),
torch.tensor(wavelet.dec_lo, device=device, dtype=dtype),
torch.tensor(wavelet.dec_hi, device=device, dtype=dtype),
)
else:
filters = WaveletTensorTuple.from_wavelet(wavelet, dtype=dtype)
filters = WaveletTensorTuple(
filters.dec_lo.to(device),
filters.dec_hi.to(device),
filters.rec_lo.to(device),
filters.rec_hi.to(device),
)
return filters
def _adjust_padding_at_reconstruction(tensor_len: int, coeff_len: int, padr: int, padl: int) -> tuple[int, int]:
if 2 * coeff_len - tensor_len == 1:
padr += 1
elif 2 * coeff_len - tensor_len != 0:
raise ValueError("incorrect padding")
return padr, padl
def _construct_2d_filt(lo: torch.Tensor, hi: torch.Tensor) -> torch.Tensor:
ll = _outer(lo, lo)
lh = _outer(hi, lo)
hl = _outer(lo, hi)
hh = _outer(hi, hi)
filt = torch.stack([ll, lh, hl, hh], 0)
filt = filt.unsqueeze(1)
return filt
def waverec2d(
coeffs: WaveletCoeff2d,
wavelet: Union[Wavelet, str],
axes: tuple[int, int] = (-2, -1),
) -> torch.Tensor:
coeffs, ds = _preprocess_coeffs(coeffs, ndim=2, axes=axes)
torch_device, torch_dtype = _check_same_device_dtype(coeffs)
_, _, rec_lo, rec_hi = _get_filter_tensors(wavelet, flip=False, device=torch_device, dtype=torch_dtype)
filt_len = rec_lo.shape[-1]
rec_filt = _construct_2d_filt(lo=rec_lo, hi=rec_hi)
res_ll = coeffs[0]
for c_pos, coeff_tuple in enumerate(coeffs[1:]):
if not isinstance(coeff_tuple, tuple) or len(coeff_tuple) != 3:
raise ValueError(f"Unexpected detail coefficient type: {type(coeff_tuple)}. Must be a 3-tuple.")
curr_shape = res_ll.shape
for coeff in coeff_tuple:
if coeff.shape != curr_shape:
raise ValueError("All coefficients on each level must have the same shape")
res_lh, res_hl, res_hh = coeff_tuple
res_ll = torch.stack([res_ll, res_lh, res_hl, res_hh], 1)
res_ll = torch.nn.functional.conv_transpose2d(res_ll, rec_filt, stride=2).squeeze(1)
padl = (2 * filt_len - 3) // 2
padr = (2 * filt_len - 3) // 2
padt = (2 * filt_len - 3) // 2
padb = (2 * filt_len - 3) // 2
if c_pos < len(coeffs) - 2:
padr, padl = _adjust_padding_at_reconstruction(
res_ll.shape[-1], coeffs[c_pos + 2][0].shape[-1], padr, padl
)
padb, padt = _adjust_padding_at_reconstruction(
res_ll.shape[-2], coeffs[c_pos + 2][0].shape[-2], padb, padt
)
if padt > 0:
res_ll = res_ll[..., padt:, :]
if padb > 0:
res_ll = res_ll[..., :-padb, :]
if padl > 0:
res_ll = res_ll[..., padl:]
if padr > 0:
res_ll = res_ll[..., :-padr]
res_ll = _postprocess_tensor(res_ll, ndim=2, ds=ds, axes=axes)
return res_ll
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/waveft/waverec2d.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/functional.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions that are useful for integration with non-PeftModel models, e.g. transformers or diffusers.
The functions provided here can be considered "public API" of PEFT and hence are safe to be used by packages that
provide PEFT integrations.
"""
from peft.mapping import inject_adapter_in_model
from peft.tuners.tuners_utils import cast_adapter_dtype, delete_adapter, set_adapter, set_requires_grad
from peft.utils import get_peft_model_state_dict, set_peft_model_state_dict
__all__ = [
"cast_adapter_dtype",
"delete_adapter",
"get_peft_model_state_dict",
"inject_adapter_in_model",
"set_adapter",
"set_peft_model_state_dict",
"set_requires_grad",
]
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/functional.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/arrow_multitask/arrow_phi3_mini.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script provides a simple evaluation pipeline for multiple-choice reasoning datasets
(e.g., BoolQ, HellaSwag, ARC, OpenBookQA, Winogrande) with different composition strategies.
Usage examples:
python arrow_phi3_mini.py --strategy base --ds_name arc-challenge
python arrow_phi3_mini.py --strategy arrow --ds_name boolq
python arrow_phi3_mini.py --strategy gks --ds_name hswag
Key features:
- Supports three strategies:
• "base" → Evaluate the quantized base model directly
• "arrow" → Use Arrow modular routing with task-specific adapters
• "gks" → Use Arrow + GenKnowSub (subtracting general-domain knowledge)
- Loads evaluation datasets from the Hugging Face Hub
- Implements a batched evaluation loop that computes per-option likelihoods and selects
the answer with the lowest average loss
- Reports simple accuracy
Implementation details:
- The base model is quantized to 4-bit using `BitsAndBytesConfig` (nf4, bf16 compute).
- For Arrow and GKS, task-specific adapters are loaded from the Hugging Face Hub:
TahaBa/phi3-mini-clustered-flan/ts_expert_i
- Task-specific adapters were trained on 10 clusters of FLAN tasks.
- The clusters were created using Model-Based Clustering (MBC):
1. Train a LoRA adapter for each individual task.
2. Apply k-means clustering to group tasks based on these adapters.
3. Train a LoRA adapter for each resulting cluster.
For more details, see the Arrow paper: https://huggingface.co/papers/2405.11157
- For GKS, general adapters are loaded from:
TahaBa/phi3-mini-general-adapters/...
- These adapters were trained on English, French, and German Wikipedia data
using a causal language modeling objective with (507-token context → 5-token completion) pairs.
- This setup encodes general knowledge into the LoRA space, which can then be
subtracted from task-specific adapters during inference to isolate and purify them.
For more details, see the GenKnowSub paper: https://huggingface.co/papers/2505.10939
- `evaluate_on_multi_choice_batched` handles tokenization, masking context tokens,
and computing per-choice log-likelihoods for fair comparison.
- Accuracy is printed at the end for the selected dataset.
This script is mainly meant for demonstration purposes and lightweight evaluation,
not full-scale benchmarking (batch size / max length can be tuned).
=======================================================================================
Results (evaluated with microsoft/Phi-3-mini-4k-instruct, 4-bit quantization):
| Dataset | Base Acc. | Arrow Acc. | Arrow+GKS Acc. |
|--------------|-----------|------------|----------------|
| ARC-Challenge| 0.4515 | 0.5418 | 0.5585 |
| ARC-Easy | 0.6894 | 0.8404 | 0.8473 |
| Winogrande | 0.5769 | 0.6550 | 0.6724 |
| BoolQ | 0.8146 | 0.8030 | 0.8247 |
| OpenBookQA | 0.43 | 0.448 | 0.472 |
| HellaSwag | 0.7318 | 0.7150 | 0.7376 |
Observations:
- Arrow generally improves over the base model by routing tokens to the most relevant task adapters.
- Applying GKS (general knowledge subtraction) consistently gives further gains compared to Arrow and Base.
These numbers are not meant as leaderboard results, but as a sanity check
to verify that the implementation works as expected and demonstrates
the benefits of Arrow and GenKnowSub.
"""
import argparse
import random
import numpy as np
import torch
from datasets import load_dataset
from sklearn.metrics import accuracy_score
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from peft import ArrowConfig, create_arrow_model
MODEL_NAME = "microsoft/Phi-3-mini-4k-instruct"
MODEL_MAX_LEN = 2048
def parse_args():
parser = argparse.ArgumentParser(description="Training script with strategy selection")
parser.add_argument(
"--strategy",
type=str,
choices=["base", "arrow", "gks"],
default="base",
help="Training strategy to use: base, arrow, or gks",
)
parser.add_argument(
"--ds_name",
type=str,
choices=["boolq", "hswag", "arc-easy", "arc-challenge", "oqa", "wg"],
default="arc-challenge",
help="Dataset to use: boolq, hswag, arc-easy, arc-challenge, oqa, wg",
)
return parser.parse_args()
def read_test_dataset(ds_name):
if ds_name == "boolq":
ds = load_dataset("google/boolq", split="validation", trust_remote_code=True)
elif ds_name == "hswag":
ds = load_dataset("Rowan/hellaswag", split="validation", trust_remote_code=True)
elif ds_name == "arc-challenge":
ds = load_dataset("allenai/ai2_arc", "ARC-Challenge", split="validation", trust_remote_code=True)
elif ds_name == "arc-easy":
ds = load_dataset("allenai/ai2_arc", "ARC-Easy", split="validation", trust_remote_code=True)
elif ds_name == "oqa":
ds = load_dataset("allenai/openbookqa", split="validation", trust_remote_code=True)
elif ds_name == "wg":
ds = load_dataset("allenai/winogrande", "winogrande_xl", split="validation", trust_remote_code=True)
else:
raise f"Dataset {ds_name} is not supported yet."
return ds
def extract_input_content(ds_name, row):
if ds_name == "boolq":
return f"[passage]{row['passage']}[question]{row['question']}"
if ds_name == "hswag":
return row["ctx"]
if (ds_name == "arc-challenge") or (ds_name == "arc-easy"):
return row["question"]
if ds_name == "oqa":
return row["question_stem"]
if ds_name == "wg":
return row["sentence"]
def create_multi_choice_options(row, ds_name):
options_texts = []
content = extract_input_content(ds_name, row)
if ds_name == "boolq":
choices = ["true", "false"]
if ds_name == "hswag":
choices = row["endings"]
if (ds_name == "arc-challenge") or (ds_name == "arc-easy"):
choices = row["choices"]["text"]
if ds_name == "wg":
choices = [row["option1"], row["option2"]]
if ds_name == "oqa":
choices = row["choices"]["text"]
for choice in choices:
options_texts.append(f"<|user|>\n{content}<|end|>\n<|assistant|>{choice}<|end|>\n")
return options_texts
def extract_multi_choice_target_index(row, ds_name):
if ds_name == "boolq":
return 0 if row["answer"] is True else 1
if ds_name == "hswag":
return int(row["label"])
if (ds_name == "arc-challenge") or (ds_name == "arc-easy"):
return row["choices"]["label"].index(row["answerKey"])
if ds_name == "wg":
return int(row["answer"]) - 1
if ds_name == "oqa":
return row["choices"]["label"].index(row["answerKey"])
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
elif hasattr(torch, "xpu") and torch.xpu.is_available():
torch.xpu.manual_seed_all(seed)
def compute_loglike_loss(logits, labels, reduction="none"):
bs = logits.size(0)
vocab_size = logits.size(-1)
labels = labels.squeeze(-1)
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = torch.nn.CrossEntropyLoss(reduction=reduction)
shift_logits = shift_logits.view(-1, vocab_size)
shift_labels = shift_labels.view(-1)
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
# reshape back
if reduction == "none":
loss = loss.view((bs, -1))
non_zero_loss = (loss != 0).sum(dim=-1)
non_zero_loss[non_zero_loss == 0] = 1
loss = loss.sum(dim=-1) / non_zero_loss
return loss.float() # Convert to float32 before returning
def evaluate_on_multi_choice_batched(
eval_dataset, model, tokenizer, ds_name, labels, predictions, args, batch_size=32, max_length=512, device="auto"
):
# Local import to mirror your original function
model.eval()
if device == "auto":
device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
else:
device = torch.device(device)
for start in tqdm(
range(0, len(eval_dataset), batch_size), total=(len(eval_dataset) + batch_size - 1) // batch_size
):
rows = [eval_dataset[i] for i in range(start, min(start + batch_size, len(eval_dataset)))]
# Build the flattened option texts for this batch
all_texts = []
options_per_sample = [] # number of options for each sample
ctx_lens_per_option = [] # context length replicated per option
for row in rows:
# options: ["<|user|>...<|assistant|>choiceA<|end|>", ...]
options = create_multi_choice_options(row, ds_name)
options_per_sample.append(len(options))
# compute context length once per sample (align with your -1 shift)
content = extract_input_content(ds_name, row)
context_prompt = f"<|user|>\n{content}<|end|>\n<|assistant|>"
ctx_len = len(tokenizer.encode(context_prompt)) - 1
all_texts.extend(options)
ctx_lens_per_option.extend([ctx_len] * len(options))
# collect gold label
labels.append(extract_multi_choice_target_index(row, ds_name))
# Tokenize all options in one go
tokenized = tokenizer(
all_texts,
return_tensors="pt",
padding=True,
truncation=True,
max_length=max_length,
)
tokenized = {k: v.to(device) for k, v in tokenized.items()}
# Create masked labels: ignore context and padding
masked_labels = tokenized["input_ids"].clone()
for i, ctx_len in enumerate(ctx_lens_per_option):
masked_labels[i, :ctx_len] = -100
masked_labels[tokenized["attention_mask"] == 0] = -100
with torch.no_grad():
logits = model(input_ids=tokenized["input_ids"], attention_mask=tokenized["attention_mask"]).logits
# per-sequence losses
losses = compute_loglike_loss(logits, masked_labels, reduction="none").detach().cpu()
# Reduce per sample (argmin across its options)
idx = 0
for n_opt in options_per_sample:
pred = torch.argmin(losses[idx : idx + n_opt]).item()
predictions.append(pred)
idx += n_opt
print(
f"Accuracy for dataset {args.ds_name} and strategy {args.strategy} is: {accuracy_score(labels, predictions)}"
)
if __name__ == "__main__":
args = parse_args()
print(f"Selected strategy: {args.strategy}")
print(f"Dataset name: {args.ds_name}")
# Loading the tokeniser
tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME,
use_fast=True,
padding_side="right",
model_max_length=MODEL_MAX_LEN,
)
# Quantisation config
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=False,
)
# Loading the model
base_model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
dtype=torch.bfloat16,
device_map="auto",
quantization_config=bnb_config,
)
# Loading the test dataset
test_dataset = read_test_dataset(args.ds_name)
print(f"{args.ds_name} is loaded with size: {len(test_dataset)}.")
labels, predictions = [], []
if args.strategy == "base":
# Batch-wise inference
with torch.no_grad():
evaluate_on_multi_choice_batched(
test_dataset,
base_model,
tokenizer,
args.ds_name,
labels,
predictions,
args,
batch_size=64, # tune this
max_length=512, # tune if options are long
device="auto",
)
else:
general_adapter_paths = []
if args.strategy == "gks":
arrow_config = ArrowConfig(
top_k=3,
router_temperature=1.0,
use_gks=True,
)
# General adapter paths from the hub
general_adapter_paths = [
"TahaBa/phi3-mini-general-adapters/cluster0_batch16_prop1.0_langen/checkpoint-17",
"TahaBa/phi3-mini-general-adapters/cluster0_batch16_prop1.0_langfr/checkpoint-35",
"TahaBa/phi3-mini-general-adapters/cluster0_batch16_prop1.0_langger/checkpoint-17",
]
else:
arrow_config = ArrowConfig(
top_k=3,
router_temperature=1.0,
)
# Task-specific adapter paths from the hub
task_specific_adapter_paths = [f"TahaBa/phi3-mini-clustered-flan/ts_expert_{i}" for i in range(10)]
# Creating the Arrow model
model = create_arrow_model(
base_model=base_model,
task_specific_adapter_paths=task_specific_adapter_paths,
general_adapter_paths=general_adapter_paths,
arrow_config=arrow_config,
)
# Batch-wise inference
with torch.no_grad():
evaluate_on_multi_choice_batched(
test_dataset,
model,
tokenizer,
args.ds_name,
labels,
predictions,
args,
batch_size=32, # tune this
max_length=512, # tune if options are long
device="auto",
)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/arrow_multitask/arrow_phi3_mini.py",
"license": "Apache License 2.0",
"lines": 319,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/lora/arrow.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
from typing import Any
import torch
from torch import nn
from transformers import PreTrainedModel
from .config import ArrowConfig
TASK_ADAPTER_PREFIX = "task_"
GKS_ADAPTER_PREFIX = "gks_"
class ArrowLoraLinearLayer(nn.Module):
"""
This class represent the main logic of the arrow routing algorithm for linear layers.
"""
def __init__(self, in_features, arrow_config):
super().__init__()
# extra parameters needed for arrow
self.in_features = in_features
self._protos_ready = False
self.top_k = arrow_config.top_k
self.temperature = arrow_config.router_temperature
self.rng_seed = arrow_config.rng_seed
self.task_adapter_names = (
arrow_config.task_adapter_names.copy()
) # Set in create_arrow_model() with this format: task_0, task_1, ...
self.gks_adapter_names = (
arrow_config.gks_adapter_names
) # Set in create_arrow_model() with this format: gks_0, gks_1, ...
self.use_gks = arrow_config.use_gks
self.gks_done = False
self.gks_added_adapter_names = []
self.in_features = in_features
self.cast_input_dtype_enabled = True
@torch.no_grad()
def on_adapter_change(self, lora_A, lora_B):
"""
Called when adapters are added/removed/renamed so Arrow can refresh its internal state before the next forward
pass.
"""
all_ts_adapter_names = [
k
for k in lora_A.keys()
if k in lora_B and k != "arrow_router" and not (k.startswith("gks_") and k[len("gks_") :].isdigit())
]
if sorted(self.task_adapter_names) == sorted(all_ts_adapter_names): # No changes in the ts_adapters
return
# Getting the name(s) of added adapter(s)
if len(self.task_adapter_names) < len(all_ts_adapter_names): # Adapter(s) are added.
self.gks_added_adapter_names = [x for x in all_ts_adapter_names if x not in self.task_adapter_names]
# Updating the task_adapter_names
self.task_adapter_names = all_ts_adapter_names.copy()
# Invalidate caches so they’ll be rebuilt lazily on next forward()
self._protos_ready = False
# GKS will be handled by self.gks_added_adapter_names
def top_right_singular_vec_from_BA(self, A, B, iters=15, eps=1e-8):
"""
Computes the top *right* singular vector of ΔW = B @ A without forming ΔW.
Theory:
For any matrix M, the right singular vectors are the eigenvectors of Mᵀ M. If ΔW = B @ A (with A ∈
ℝ^{r×in}, B ∈ ℝ^{out×r}), then
ΔWᵀ ΔW = (B @ A)ᵀ (B @ A) = Aᵀ (Bᵀ B) A ∈ ℝ^{in×in}.
Therefore, the dominant right singular vector of ΔW is the dominant eigenvector of M := Aᵀ (Bᵀ B) A. We
find it by *power iteration* on the linear operator
v ↦ Aᵀ (Bᵀ B) (A v),
which avoids materializing ΔW (out×in) or M (in×in). The result lives in the input/token space (size =
in_features), which is exactly what Arrow needs. (Right singular vectors ≡ eigenvectors of MᵀM; power
iteration converges to the dominant eigenvector under mild conditions.)
=============================== Practical notes:
- We perform all iteration in float32 for numerical stability, then cast back
to the LoRA dtype/device before storing/using the prototype.
- Convergence is checked with a simple fixed-iter cap (`iters`) and/or
`allclose` tolerance (`tol`).
- The returned vector is unique up to sign (±), as with any singular vector.
Downstream code should be sign-invariant.
"""
# A: (r, in), B: (out, r)
A32 = A.to(torch.float32)
B32 = B.to(torch.float32)
C = B32.T @ B32 # (r, r)
# Private RNG on A's device
gen = None
if self.rng_seed is not None:
gen = torch.Generator(device=A32.device.type)
gen.manual_seed(int(self.rng_seed))
# init vector in input space
v = torch.randn(A32.size(1), dtype=A32.dtype, device=A32.device, generator=gen)
v = v / (v.norm() + eps)
for _ in range(iters):
# w = (ΔWᵀΔW) v = Aᵀ (BᵀB) (A v)
w = A32.T @ (C @ (A32 @ v))
v = w / (w.norm() + eps)
return v # fp32
@torch.no_grad()
def build_prototypes(self, lora_A, lora_B):
"""
Computes a prototype vector for each LoRA module in every layer by applying Singular Value Decomposition (SVD)
to the `lora_A` matrix and extracting the top right singular vector.
These prototypes are later used to calculate the cosine similarity between each input token and each expert.
The resulting similarity scores serve as coefficients to compute a weighted average of the corresponding LoRA
modules, effectively routing each token through its most relevant experts.
** This prototype computation is done is done once for all experts and is re-done on newly added adapters.**
Args:
lora_A : Matrices A in LoRA layer.
lora_B (optional): Matrices B in LoRA layer. Defaults to None.
"""
if self._protos_ready:
return
protos = []
for name in self.task_adapter_names:
A = lora_A[name].weight # (r, in_features)
B = lora_B[name].weight # (out_features, r)
# Efficiently computing right singular vector of A @ B
proto32 = self.top_right_singular_vec_from_BA(A, B)
proto = proto32.to(dtype=A.dtype, device=A.device)
protos.append(proto)
proto_stack = torch.stack(protos, dim=0) # (E, in_features)
# Register the prototypes buffer with correct dtype/device consistent with A and B weights
self.register_buffer("prototypes", proto_stack, persistent=False)
self._protos_ready = True
@torch.no_grad()
def gen_know_sub(self, lora_A, lora_B):
"""
This function performs General Knowledge Subtraction. It takes an average of provided general_adapters, and
subtract it from each task_adapter. This subtraction tries to purify the task adapters, based on
"forgetting-via-negation" principle. Forgetting-via-negation is a task-arithmetic operation, explained in:
https://huggingface.co/papers/2212.04089 The task adapters will be more focused and isolated, enhancing the
performance on new tasks.
Args:
lora_A : Matrices A in LoRA layer.
lora_B : Matrices A in LoRA layer.
"""
if not self.use_gks:
return
elif self.gks_done and not self.gks_added_adapter_names:
return
else:
# 1) compute average A/B over gks_adapter_names
avg_A = torch.stack([lora_A[n].weight for n in self.gks_adapter_names], dim=0).mean(
0
) # shape (r, in_features)
avg_B = torch.stack([lora_B[n].weight for n in self.gks_adapter_names], dim=0).mean(
0
) # shape (out_features, r)
# 2) Subtract the average from task-specific experts
if self.gks_done is False: # GKS is done for all the experts, since it hasn't been done yet.
for name in self.task_adapter_names:
lora_A[name].weight.data.sub_(avg_A)
lora_B[name].weight.data.sub_(avg_B)
else: # GKS is only done on new added experts, since GKS has been done previously.
for name in self.gks_added_adapter_names:
lora_A[name].weight.data.sub_(avg_A)
lora_B[name].weight.data.sub_(avg_B)
# 3) Set gks_done flag as true, so we won't do it again in ArrowLinearVariant.forward().
self.gks_done = True
# Clearing the self.gks_added_adapter_names
self.gks_added_adapter_names = []
def _cast_input_dtype(self, x, dtype: torch.dtype):
"""
Whether to cast the dtype of the input of the forward method.
Usually, we want to enable this to align the input dtype with the dtype of the weight, but by setting
layer.cast_input_dtype=False, this can be disabled if necessary.
Enabling or disabling can be managed via the peft.helpers.disable_lora_input_dtype_casting context manager.
"""
if x is None: # useful e.g. if x is the bias, which can be None
return None
cast_input_dtype_enabled = getattr(self, "cast_input_dtype_enabled", True)
if (not cast_input_dtype_enabled) or (x.dtype == dtype):
return x
return x.to(dtype=dtype)
def forward(self, x, lora_A, lora_B, dropout, scaling):
"""
Applies Arrow routing inside a LoRA layer.
Steps:
1. Compute cosine similarity between each token representation and all adapter prototypes.
2. Select the top-k experts per token and normalize their scores with a softmax.
3. Project tokens into each selected expert’s low-rank space (A weights).
4. Map back to the output space (B weights).
5. Aggregate expert outputs via the weighted sum of their contributions.
6. Apply dropout, scaling, and return the reshaped delta.
- Conceptually, this is a Mixture-of-Experts (MoE) over LoRA adapters,
where coefficients are derived from prototype similarity.
Returns:
delta: LoRA output adjustment computed by Arrow routing.
"""
x = self._cast_input_dtype(x, lora_A[self.task_adapter_names[0]].weight.dtype)
B, *rest, F_in = x.shape
tok = x.view(-1, F_in) # (t, F_in)
t, E = tok.size(0), self.prototypes.size(0)
# We now turn scaling, which is a dict, to tensors in order to use them later
scales_tens = torch.tensor(
[scaling[n] for n in self.task_adapter_names],
device=tok.device,
dtype=tok.dtype,
) # shape (E,)
# 1) similarity — sign-agnostic
sim = torch.abs(tok @ self.prototypes.T) # (t, E)
# 2) top-k + softmax over full E (non-top-k = -inf)
top_v, idx = torch.topk(sim, self.top_k, dim=1)
full_score = tok.new_full((t, E), float("-inf"))
full_score.scatter_(1, idx, top_v)
coeff = torch.softmax(full_score / self.temperature, dim=1) # (t, E)
# 3) stack all A and B weights once
# A_stack: (E, r, in_features), B_stack: (E, out_features, r)
A_stack = torch.stack([lora_A[n].weight for n in self.task_adapter_names], dim=0)
B_stack = torch.stack([lora_B[n].weight for n in self.task_adapter_names], dim=0)
# 4) project tokens into each expert’s low‑rank space:
# z[e] = tok @ A_e.T → shape (t, E, r)
z = torch.einsum("tf, erf -> ter", tok, A_stack)
# 5) lift back each expert’s output:
# y[e] = z[e] @ B_e.T → shape (t, E, out_features)
y = torch.einsum("ter, eor -> teo", z, B_stack)
# 6) apply per-expert scaling before the weighted sum
# y_scaled[t, e, o] = scales[e] * y[t, e, o]
y = y * scales_tens.view(1, -1, 1)
# 6) weighted sum over experts:
# delta_flat[t,o] = Σ_e coeff[t,e] * y[t,e,o]
delta_flat = torch.einsum("te, teo -> to", coeff, y) # (t, out_features)
# 7) dropout, scale, and reshape
delta = dropout(delta_flat)
out_dim = delta_flat.size(-1)
return delta.view(B, *rest, out_dim)
def check_loaded_lora_compatibility_arrow(model, adapter_names: list[str]):
"""
After loading all adapters into `model`, check they share:
- the same LoRA rank (r)
- identical weight shapes
- identical sets of target_modules
Returns (sorted list of target module names, agreed rank r).
"""
reference = None # {'r':…, 'shapes':(Ashape,Bshape), 'modules':set([...])}
for name in adapter_names:
curr_modules = set()
curr_r = None
curr_shapes = None
for full_name, module in model.named_modules():
if hasattr(module, "lora_A") and name in module.lora_A:
A = module.lora_A[name].weight
B = module.lora_B[name].weight
mod_name = full_name.split(".")[-1]
curr_modules.add(mod_name)
# A has shape (r, in_features); B has shape (out_features, r)
curr_r = A.shape[0]
curr_shapes = (A.shape, B.shape)
if reference is None:
reference = {"r": curr_r, "shapes": curr_shapes, "modules": curr_modules}
else:
if curr_r != reference["r"]:
raise ValueError(f"[{name}] rank mismatch: {curr_r} != {reference['r']}")
if curr_shapes != reference["shapes"]:
raise ValueError(f"[{name}] shape mismatch: {curr_shapes} != {reference['shapes']}")
if curr_modules != reference["modules"]:
raise ValueError(
f"[{name}] target_modules mismatch:\n"
f" this adapter -> {sorted(curr_modules)}\n"
f" reference -> {sorted(reference['modules'])}"
)
agreed_modules = sorted(reference["modules"])
return agreed_modules, int(reference["r"])
def ensure_adapters_target_linear_layers_only(model, adapter_names: list[str]):
"""
Validate that every module holding LoRA weights for any of `adapter_names` is Linear-like: nn.Linear,
bitsandbytes.nn.Linear4bit, nn.Conv1d, or transformers.models.gpt2.modeling_gpt2.Conv1D. If not, raise.
"""
import torch.nn as nn
Linear4bit = None
try:
import bitsandbytes as bnb # type: ignore
Linear4bit = bnb.nn.Linear4bit
except ImportError:
pass
HFConv1D = None
try:
from transformers.models.gpt2.modeling_gpt2 import Conv1D as HFConv1D # type: ignore
except ImportError:
pass
allowed_types = (nn.Linear, nn.Conv1d)
if Linear4bit is not None:
allowed_types = allowed_types + (Linear4bit,)
if HFConv1D is not None:
allowed_types = allowed_types + (HFConv1D,)
offenders = []
for full_name, module in model.named_modules():
if hasattr(module, "lora_A"):
for name in adapter_names:
if name in getattr(module, "lora_A", {}):
base = getattr(module, "base_layer", None) or getattr(module, "original_module", None)
layer_to_check = base if base is not None else module
if not isinstance(layer_to_check, allowed_types):
offenders.append((name, full_name, type(layer_to_check).__name__))
if offenders:
lines = [
"LoRA adapters must only target Linear-like layers "
"(nn.Linear, nn.Conv1d, HF Conv1D, or bitsandbytes.nn.Linear4bit). Found:"
]
for name, full_name, tname in offenders:
lines.append(f" - adapter '{name}' on module '{full_name}' of type {tname}")
raise TypeError("\n".join(lines))
def _resolve_adapter_source(path: str) -> tuple[str, str | None]:
"""
Resolve a user-provided adapter `path` into (model_id, subfolder).
Supports:
- Local path to a folder that contains `adapter_config.json`
- Hub path with subfolder, e.g. "user/repo/ts_expert_0[/more/...]", which becomes:
model_id="user/repo", subfolder="ts_expert_0[/more/...]"
- Plain Hub repo id "user/repo" (no subfolder)
"""
if os.path.isdir(path):
if not os.path.isfile(os.path.join(path, "adapter_config.json")):
raise ValueError(f"Local adapter path '{path}' does not contain 'adapter_config.json'.")
return path, None
parts = path.strip("/").split("/")
if len(parts) >= 2:
model_id = "/".join(parts[:2])
if len(parts) > 2:
subfolder = "/".join(parts[2:])
return model_id, subfolder
return model_id, None
return path, None
def create_arrow_model(
base_model: PreTrainedModel,
task_specific_adapter_paths: list[str],
arrow_config: ArrowConfig,
general_adapter_paths: list[str] | None = None,
**adapter_kwargs: Any,
):
if task_specific_adapter_paths is None or len(task_specific_adapter_paths) == 0:
raise ValueError("`task_specific_adapter_paths` should contain at least one adapter path")
from peft import LoraConfig, PeftModel
model_id0, sub0 = _resolve_adapter_source(task_specific_adapter_paths[0])
initial_ts_expert_name = f"{TASK_ADAPTER_PREFIX}0"
first_kwargs = dict(adapter_kwargs)
if sub0 is not None and "subfolder" not in first_kwargs:
first_kwargs["subfolder"] = sub0
model = PeftModel.from_pretrained(
base_model,
model_id=model_id0,
adapter_name=initial_ts_expert_name,
**first_kwargs,
)
for i in range(1, len(task_specific_adapter_paths)):
ts_expert_name = f"{TASK_ADAPTER_PREFIX}{i}"
mid, sub = _resolve_adapter_source(task_specific_adapter_paths[i])
more_kwargs = dict(adapter_kwargs)
if sub is not None and "subfolder" not in more_kwargs:
more_kwargs["subfolder"] = sub
model.load_adapter(
model_id=mid,
adapter_name=ts_expert_name,
**more_kwargs,
)
arrow_config.task_adapter_names = [f"{TASK_ADAPTER_PREFIX}{i}" for i in range(len(task_specific_adapter_paths))]
if arrow_config.use_gks:
if general_adapter_paths is None or len(general_adapter_paths) == 0:
raise ValueError("You should provide general LoRA paths if you want to use GenKnowSub.")
for i in range(len(general_adapter_paths)):
gen_expert_name = f"{GKS_ADAPTER_PREFIX}{i}"
mid, sub = _resolve_adapter_source(general_adapter_paths[i])
gks_kwargs = dict(adapter_kwargs)
if sub is not None and "subfolder" not in gks_kwargs:
gks_kwargs["subfolder"] = sub
model.load_adapter(
model_id=mid,
adapter_name=gen_expert_name,
**gks_kwargs,
)
arrow_config.gks_adapter_names = [f"{GKS_ADAPTER_PREFIX}{i}" for i in range(len(general_adapter_paths))]
else:
arrow_config.gks_adapter_names = []
target_modules, r = check_loaded_lora_compatibility_arrow(
model, adapter_names=arrow_config.task_adapter_names + arrow_config.gks_adapter_names
)
ensure_adapters_target_linear_layers_only(
model, adapter_names=arrow_config.task_adapter_names + arrow_config.gks_adapter_names
)
router_cfg = LoraConfig(
arrow_config=arrow_config,
target_modules=target_modules,
r=r,
)
model.add_adapter(adapter_name="arrow_router", peft_config=router_cfg)
model.set_adapter("arrow_router")
return model
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/lora/arrow.py",
"license": "Apache License 2.0",
"lines": 394,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:tests/test_arrow.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import torch
from transformers import AutoModelForCausalLM, AutoModelForImageClassification
from peft import LoraConfig, get_peft_model
from peft.tuners.lora import ArrowConfig, create_arrow_model
from peft.tuners.lora.arrow import _resolve_adapter_source
from tests.testing_utils import hub_online_once
# ─── Fixtures ──────────────────────────────────────────────────────────
@pytest.fixture(scope="module")
def workdir(tmp_path_factory):
"""
Create a temp directory and chdir into it for the duration of the module.
"""
wd = tmp_path_factory.mktemp("arrow_workdir")
old_cwd = os.getcwd()
os.chdir(wd)
yield Path(wd)
os.chdir(old_cwd)
# (pytest will auto-delete wd)
def _create_and_save_adapter(out_dir: Path, rank: int = 4):
"""Helper: build a LoRA adapter around `model` and save into `out_dir`."""
# fan_in_fan_out is set to True because of GPT2 model that we use to avoid warning
cfg = LoraConfig(r=rank, target_modules=["c_attn"], fan_in_fan_out=True, init_lora_weights=False)
model_id = "peft-internal-testing/tiny-random-gpt2"
with hub_online_once(model_id):
model = AutoModelForCausalLM.from_pretrained(model_id)
peft_model = get_peft_model(model, cfg)
peft_model.save_pretrained(out_dir)
@pytest.fixture(scope="module")
def ts_adapters(workdir: Path):
"""
Build 3 task-specific adapters and return their absolute paths
"""
abs_paths = []
for i in range(3):
sub = f"{workdir}/ts{i}"
_create_and_save_adapter(sub)
abs_paths.append(sub)
return abs_paths
@pytest.fixture(scope="module")
def gen_adapter(workdir: Path):
"""Build 1 general-knowledge adapter and return its absolute path list."""
sub = f"{workdir}/gen0"
_create_and_save_adapter(sub)
return [sub] # list because create_arrow_model expects list
class TestArrowRouting:
def test_incompatible_rank_raises(self, workdir: Path):
"""
Adding adapters with different ranks must raise a ValueError.
"""
# Create two adapters with different ranks targeting the same modules
sub_r4 = workdir / "rank4"
sub_r8 = workdir / "rank8"
_create_and_save_adapter(sub_r4, rank=4)
_create_and_save_adapter(sub_r8, rank=8)
model_id = "peft-internal-testing/tiny-random-gpt2"
with hub_online_once(model_id):
base = AutoModelForCausalLM.from_pretrained(model_id)
# Expect create_arrow_model to raise due to rank mismatch
with pytest.raises(ValueError, match=r"rank mismatch"):
_ = create_arrow_model(
base_model=base,
task_specific_adapter_paths=[str(sub_r4), str(sub_r8)],
arrow_config=ArrowConfig(top_k=1),
)
def test_arrow_differs_with_extra_expert(self, ts_adapters):
"""
Arrow with 2 experts vs Arrow with 3 experts must produce different logits.
"""
# Arrow over first 2 experts
model_id = "peft-internal-testing/tiny-random-gpt2"
with hub_online_once(model_id):
base_model_1 = AutoModelForCausalLM.from_pretrained(model_id)
base_model_2 = copy.deepcopy(base_model_1)
cfg_small = ArrowConfig(top_k=2)
m_small = create_arrow_model(
base_model=base_model_1,
task_specific_adapter_paths=ts_adapters[:2],
arrow_config=cfg_small,
).eval()
# Arrow over all 3 experts
cfg_big = ArrowConfig(top_k=2)
m_big = create_arrow_model(
base_model=base_model_2,
task_specific_adapter_paths=ts_adapters,
arrow_config=cfg_big,
).eval()
x = torch.ones(1, 4, dtype=torch.long)
assert not torch.allclose(m_small(x).logits, m_big(x).logits)
def test_arrow_gks_with_load_adapter_later_with_forward(self, ts_adapters, gen_adapter):
"""
Loading the last expert after creating the arrow model should produce the same result as loading all the
experts at once in create_arrow_model(), when forward path is called before adding the new adapter.
"""
# Arrow over all three experts
model_id = "peft-internal-testing/tiny-random-gpt2"
with hub_online_once(model_id):
base_model_1 = AutoModelForCausalLM.from_pretrained(model_id)
base_model_2 = copy.deepcopy(base_model_1)
cfg_big = ArrowConfig(top_k=2, use_gks=True, rng_seed=42)
m_big = create_arrow_model(
base_model=base_model_1,
task_specific_adapter_paths=ts_adapters,
general_adapter_paths=gen_adapter,
arrow_config=cfg_big,
).eval()
# Arrow over all 2 experts + loading the third expert later
cfg_small_later_big = ArrowConfig(top_k=2, use_gks=True, rng_seed=42)
m_small_later_big = create_arrow_model(
base_model=base_model_2,
task_specific_adapter_paths=ts_adapters[:2],
general_adapter_paths=gen_adapter,
arrow_config=cfg_small_later_big,
)
# Ensuring that the prototypes and gks are done one time by running a forward path
x = torch.ones(1, 4, dtype=torch.long)
m_small_later_big(x)
# Now loading the third expert
m_small_later_big.load_adapter(
model_id=ts_adapters[-1],
adapter_name="new_added_ts_expert",
)
# Activating the new adapter and run forward path on it
m_small_later_big.set_adapter("new_added_ts_expert")
x = torch.ones(3, 5, dtype=torch.long)
m_small_later_big(x)
# Now we switch back to the arrow_router
m_small_later_big.set_adapter("arrow_router")
m_small_later_big.eval()
x = torch.ones(1, 4, dtype=torch.long)
assert torch.allclose(m_big(x).logits, m_small_later_big(x).logits)
def test_arrow_with_load_adapter_later_with_forward_activate_new(self, ts_adapters, gen_adapter):
"""
Loading the last expert after creating the arrow model and activate it should produce different result compared
to the case where arrow_router is activate, and the model's using arrow.
"""
# Arrow over all three experts
model_id = "peft-internal-testing/tiny-random-gpt2"
with hub_online_once(model_id):
base_model_1 = AutoModelForCausalLM.from_pretrained(model_id)
base_model_2 = copy.deepcopy(base_model_1)
cfg_big = ArrowConfig(top_k=2, use_gks=True, rng_seed=42)
m_big = create_arrow_model(
base_model=base_model_1,
task_specific_adapter_paths=ts_adapters,
general_adapter_paths=gen_adapter,
arrow_config=cfg_big,
).eval()
# Arrow over all 2 experts + loading the third expert later
cfg_small_later_big = ArrowConfig(top_k=2, use_gks=True, rng_seed=42)
m_small_later_big = create_arrow_model(
base_model=base_model_2,
task_specific_adapter_paths=ts_adapters[:2],
general_adapter_paths=gen_adapter,
arrow_config=cfg_small_later_big,
)
# Ensuring that the prototypes and gks are done one time by running a forward path
x = torch.ones(1, 4, dtype=torch.long)
m_small_later_big(x)
# Now loading the third expert
m_small_later_big.load_adapter(
model_id=ts_adapters[-1],
adapter_name="new_added_ts_expert",
)
# The new adapter is activated
m_small_later_big.set_adapter("new_added_ts_expert")
m_small_later_big.eval()
x = torch.ones(1, 4, dtype=torch.long)
assert not torch.allclose(m_big(x).logits, m_small_later_big(x).logits)
def test_arrow_gks_with_load_adapter_later_without_forward(self, ts_adapters, gen_adapter):
"""
Loading the last expert after creating the arrow model should produce the same result as loading all the
experts at once in create_arrow_model()
"""
# Arrow over all three experts
model_id = "peft-internal-testing/tiny-random-gpt2"
with hub_online_once(model_id):
base_model_1 = AutoModelForCausalLM.from_pretrained(model_id)
base_model_2 = copy.deepcopy(base_model_1)
cfg_big = ArrowConfig(top_k=2, use_gks=True, rng_seed=42)
m_big = create_arrow_model(
base_model=base_model_1,
task_specific_adapter_paths=ts_adapters,
general_adapter_paths=gen_adapter,
arrow_config=cfg_big,
).eval()
# Arrow over all 2 experts + loading the third expert later
cfg_small_later_big = ArrowConfig(top_k=2, use_gks=True, rng_seed=42)
m_small_later_big = create_arrow_model(
base_model=base_model_2,
task_specific_adapter_paths=ts_adapters[:2],
general_adapter_paths=gen_adapter,
arrow_config=cfg_small_later_big,
)
# Now loading the third expert
m_small_later_big.load_adapter(
model_id=ts_adapters[-1],
adapter_name="new_added_ts_expert",
)
m_small_later_big.eval()
x = torch.ones(1, 4, dtype=torch.long)
assert torch.allclose(m_big(x).logits, m_small_later_big(x).logits)
def test_genknowsub_changes_output(self, ts_adapters, gen_adapter):
"""
Arrow+GenKnowSub vs plain Arrow must change logits.
"""
# Plain Arrow
model_id = "peft-internal-testing/tiny-random-gpt2"
with hub_online_once(model_id):
base_model_1 = AutoModelForCausalLM.from_pretrained(model_id)
base_model_2 = copy.deepcopy(base_model_1)
cfg_plain = ArrowConfig(top_k=2)
m_plain = create_arrow_model(
base_model=base_model_1,
task_specific_adapter_paths=ts_adapters,
arrow_config=cfg_plain,
).eval()
# Arrow + GenKnowSub
cfg_gks = ArrowConfig(top_k=2, use_gks=True)
m_gks = create_arrow_model(
base_model=base_model_2,
task_specific_adapter_paths=ts_adapters,
general_adapter_paths=gen_adapter,
arrow_config=cfg_gks,
).eval()
x = torch.ones(1, 4, dtype=torch.long)
assert not torch.allclose(m_plain(x).logits, m_gks(x).logits)
def test_merging_adapters_raise_error_in_arrow(self, ts_adapters):
"""
Merging/unmerging is not allowed while an ArrowLinearLayer is loaded on the model and active.
"""
# Arrow over first 2 experts
model_id = "peft-internal-testing/tiny-random-gpt2"
with hub_online_once(model_id):
base_model = AutoModelForCausalLM.from_pretrained(model_id)
cfg_small = ArrowConfig(top_k=2)
m_small = create_arrow_model(
base_model=base_model,
task_specific_adapter_paths=ts_adapters[:2],
arrow_config=cfg_small,
).eval()
with pytest.raises(RuntimeError, match=r"Cannot merge an active Arrow router adapter"):
m_small.merge_and_unload()
def test_conv2d_targets_raise_typeerror_in_arrow(self, workdir):
"""
Adapters applied to Conv2d must be rejected by create_arrow_model() which enforces Linear/Linear4bit-only
targets.
"""
model_id = "peft-internal-testing/tiny-random-ResNetForImageClassification"
with hub_online_once(model_id):
base = AutoModelForImageClassification.from_pretrained(model_id)
# Build a LoRA adapter targeting a Conv2d
cfg = LoraConfig(r=4, target_modules=["convolution"], init_lora_weights=False)
peft_model = get_peft_model(copy.deepcopy(base), cfg)
conv_dir = workdir / "cv0"
peft_model.save_pretrained(conv_dir)
# Expect create_arrow_model to raise TypeError
with pytest.raises(TypeError, match=r"LoRA adapters must only target Linear"):
_ = create_arrow_model(
base_model=base,
task_specific_adapter_paths=[str(conv_dir)],
arrow_config=ArrowConfig(top_k=1),
)
def test_arrow_forward_float16_no_autocast_with_merging(self, ts_adapters):
"""
Run Arrow in float16 with autocast disabled; forward should work, while merge/unmerge operations must raise for
Arrow models.
"""
import platform
try:
_ = torch.zeros(1, dtype=torch.float16)
except Exception:
pytest.skip(reason="Test requires float16 support")
if platform.system() == "Darwin":
pytest.skip(reason="MacOS does not support multiple ops in float16")
model_id = "peft-internal-testing/tiny-random-gpt2"
# Create base in fp16 (no manual assignment to .dtype)
with hub_online_once(model_id):
base = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16)
cfg = ArrowConfig(top_k=2)
# Build Arrow model and disable adapter dtype autocast
model = create_arrow_model(
base_model=base,
task_specific_adapter_paths=ts_adapters,
arrow_config=cfg,
autocast_adapter_dtype=False,
dtype=torch.float16,
).eval()
X = {
"input_ids": torch.ones(1, 4, dtype=torch.long),
"attention_mask": torch.ones(1, 4, dtype=torch.long),
}
# Forward should work in fp16
_ = model(**X)
# Merge must fail on Arrow models
with pytest.raises(RuntimeError, match=r"Cannot merge an active Arrow router adapter"):
model.merge_adapter(safe_merge=False)
with pytest.raises(RuntimeError, match=r"Cannot merge an active Arrow router adapter"):
_ = model.merge_and_unload()
def test_prototypes_not_recomputed_on_repeated_forward(self, ts_adapters):
"""
Repeated calls to forward should not recompute prototypes. We verify by spying on
ArrowLoraLinearLayer.top_right_singular_vec_from_BA(), which is only called when prototypes are (re)built.
"""
model_id = "peft-internal-testing/tiny-random-gpt2"
with hub_online_once(model_id):
base = AutoModelForCausalLM.from_pretrained(model_id)
cfg = ArrowConfig(top_k=2)
model = create_arrow_model(
base_model=base,
task_specific_adapter_paths=ts_adapters,
arrow_config=cfg,
).eval()
# Find one Arrow layer instance on the model
arrow_layer = None
for _, module in model.named_modules():
if hasattr(module, "lora_arrow") and "arrow_router" in module.lora_arrow:
arrow_layer = module.lora_arrow["arrow_router"]
break
assert arrow_layer is not None, "Arrow router layer not found on model"
x = torch.ones(1, 4, dtype=torch.long)
# Spy on the internal proto computation; should run once (E calls for E experts)
with patch.object(
arrow_layer,
"top_right_singular_vec_from_BA",
wraps=arrow_layer.top_right_singular_vec_from_BA,
) as spy:
_ = model(x)
first_calls = spy.call_count
assert first_calls == len(arrow_layer.task_adapter_names)
# Call forward again; prototypes should be cached, so no extra calls
_ = model(x)
assert spy.call_count == first_calls
def test_training_updates_when_task_adapter_active(ts_adapters):
"""
Ensure a simple training step works: compute a dummy loss, backward, and take an optimizer step. Verify that
task-adapter parameters update.
"""
model_id = "peft-internal-testing/tiny-random-gpt2"
with hub_online_once(model_id):
base = AutoModelForCausalLM.from_pretrained(model_id)
# Build Arrow model over two experts
cfg = ArrowConfig(top_k=2)
model = create_arrow_model(
base_model=base,
task_specific_adapter_paths=ts_adapters[:2],
arrow_config=cfg,
)
model.train()
# Switch to a specific task adapter for training (vanilla LoRA)
model.set_adapter("task_0")
# Choose a representative parameter to check updates (task_0 A weight)
rep_name = None
for n, _ in model.named_parameters():
if ".lora_A.task_0.weight" in n:
rep_name = n
break
assert rep_name is not None, "task_0 LoRA A weight not found"
rep_param = dict(model.named_parameters())[rep_name]
before = rep_param.detach().clone()
# Optimizer over trainable params (task_0 now active and trainable)
opt = torch.optim.SGD([p for p in model.parameters() if p.requires_grad], lr=1e-2)
# Dummy batch
vocab = model.config.vocab_size
input_ids = torch.randint(0, vocab, (2, 8))
attention_mask = torch.ones_like(input_ids)
# Compute loss and update
opt.zero_grad()
out = model(input_ids=input_ids, attention_mask=attention_mask, labels=input_ids)
assert hasattr(out, "loss") and out.loss is not None
out.loss.backward()
opt.step()
after = rep_param.detach().clone()
assert not torch.allclose(before, after), "Active task adapter parameters did not update after optimizer step"
@pytest.mark.parametrize(
"case",
[
"local_root",
"local_nested",
"hub_repo",
"hub_with_sub",
],
)
def test_resolve_adapter_source_variants(tmp_path: Path, case: str):
"""
Ensure `_resolve_adapter_source` correctly handles:
- Local dir (containing adapter_config.json)
- Local nested subfolder
- Hub repo id "user/repo"
- Hub repo with subfolder "user/repo/sub/folder"
"""
if case == "local_root":
d = tmp_path / "adapter_local_root"
d.mkdir(parents=True, exist_ok=True)
(d / "adapter_config.json").write_text("{}")
model_id, sub = _resolve_adapter_source(str(d))
assert model_id == str(d)
assert sub is None
elif case == "local_nested":
d = tmp_path / "repo_like" / "sub" / "folder"
d.mkdir(parents=True, exist_ok=True)
(d / "adapter_config.json").write_text("{}")
model_id, sub = _resolve_adapter_source(str(d))
assert model_id == str(d)
assert sub is None
elif case == "hub_repo":
model_id, sub = _resolve_adapter_source("user/repo")
assert model_id == "user/repo"
assert sub is None
elif case == "hub_with_sub":
model_id, sub = _resolve_adapter_source("user/repo/sub/folder")
assert model_id == "user/repo"
assert sub == "sub/folder"
else:
raise AssertionError(f"unknown case: {case}")
| {
"repo_id": "huggingface/peft",
"file_path": "tests/test_arrow.py",
"license": "Apache License 2.0",
"lines": 432,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:examples/alora_finetuning/alora_finetuning.py | import os
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
def train_model(
base_model: str,
data_path: str,
output_dir: str,
batch_size: int,
num_epochs: int,
learning_rate: float,
cutoff_len: int,
val_set_size: int,
invocation_string: str,
quantize: bool,
eval_step: int,
save_step: int,
device: str,
lora_r: int,
lora_alpha: int,
lora_dropout: float,
lora_target_modules: str,
hub_model_id: str,
push_to_hub: bool,
):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
hf_token = os.getenv("HF_TOKEN")
if device == "auto":
device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
else:
device = torch.device(device)
print(f"Using device: {device}")
tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token)
tokenizer.pad_token = tokenizer.unk_token
invocation_tokens = tokenizer.encode(invocation_string, add_special_tokens=False)
if quantize:
if (torch.cuda.is_available() and torch.cuda.is_bf16_supported()) or torch.xpu.is_available():
bnb_4bit_compute_dtype = torch.bfloat16
else:
bnb_4bit_compute_dtype = torch.float16
model = AutoModelForCausalLM.from_pretrained(
base_model,
token=hf_token,
quantization_config=BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=bnb_4bit_compute_dtype,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
),
)
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True)
else:
model = AutoModelForCausalLM.from_pretrained(base_model, token=hf_token)
lora_config = LoraConfig(
task_type="CAUSAL_LM",
alora_invocation_tokens=invocation_tokens,
r=lora_r,
lora_alpha=lora_alpha,
target_modules=(lora_target_modules.split(",") if lora_target_modules else ["q_proj", "k_proj", "v_proj"]),
lora_dropout=lora_dropout,
bias="none",
)
model = get_peft_model(model, lora_config)
model.to(device)
tokenizer.pad_token = tokenizer.eos_token
dataset = load_dataset(data_path)
def tokenize_function(examples):
formatted_texts = [
tokenizer.apply_chat_template(
[
{"role": "user", "content": user_msg},
{"role": "assistant", "content": assistant_msg},
],
tokenize=False, # get plain text first
add_generation_prompt=False,
)
for user_msg, assistant_msg in zip(examples["input"], examples["output"])
]
# 2) Tokenize those texts
model_inputs = tokenizer(
formatted_texts,
padding="max_length",
truncation=True,
max_length=cutoff_len,
)
labels = []
for ids in model_inputs["input_ids"]:
labels.append([(token_id if token_id != tokenizer.pad_token_id else -100) for token_id in ids])
model_inputs["labels"] = labels
return model_inputs
# Tokenize the dataset and prepare for training
tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=dataset["train"].column_names)
# Data collator to dynamically pad the batched examples
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_steps=100,
weight_decay=0.01,
logging_dir="./logs",
logging_steps=eval_step,
save_steps=save_step,
save_total_limit=2,
push_to_hub=push_to_hub,
hub_model_id=hub_model_id,
gradient_accumulation_steps=16,
fp16=True,
learning_rate=learning_rate,
hub_token=hf_token,
)
if torch.cuda.is_available():
torch.cuda.empty_cache()
elif torch.xpu.is_available():
torch.xpu.empty_cache()
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
data_collator=data_collator,
)
trainer.train()
if push_to_hub:
trainer.push_to_hub(commit_message="Fine-tuned model")
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
def model_inference(model_path: str, adapter_path: str, prompt: str = None, data_path: str = None):
"""
Simple inference with the tuned aLoRA adapter. Optionally (reuse_cache = True) demonstrates
that the aLoRA adapter can (but does not need to) use KV cache created by the base model,
perhaps during a prior generation turn.
Purely for demonstration purposes. See the [paper](https://huggingface.co/papers/2504.12397)
for realistic multiturn cache reuse examples.
"""
if prompt is None:
# Use first row of test data
dataset = load_dataset(data_path)
prompt = dataset["test"][0]["input"]
tokenizer = AutoTokenizer.from_pretrained(model_path)
base_model = AutoModelForCausalLM.from_pretrained(model_path)
alora_model = PeftModel.from_pretrained(base_model, adapter_path)
chat = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(text, return_tensors="pt").to(base_model.device)
# Generate answer with adapter
output_dict = alora_model.generate(**inputs, return_dict_in_generate=True, max_new_tokens=20)
alora_outputs = output_dict.sequences
# Print results
print(f"Prompt: {text}")
response = tokenizer.decode(alora_outputs[0][inputs["input_ids"].shape[1] :], skip_special_tokens=True)
print(f"Trained adapter response: {response}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Fine-tune Mistral with Activated LoRA")
parser.add_argument(
"--base_model", type=str, default="mistralai/Mistral-7B-Instruct-v0.3", help="Base model path or name"
)
parser.add_argument(
"--data_path",
type=str,
default="Lots-of-LoRAs/task1660_super_glue_question_generation",
help="Dataset path or name",
)
parser.add_argument(
"--output_dir", type=str, default="path/to/output", help="Output directory for the fine-tuned model"
)
parser.add_argument("--batch_size", type=int, default=2, help="Batch size")
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=1e-4, help="Learning rate")
parser.add_argument("--cutoff_len", type=int, default=2048, help="Cutoff length for tokenization")
parser.add_argument("--val_set_size", type=int, default=500, help="Validation set size")
parser.add_argument(
"--invocation_string",
type=str,
default="[/INST]",
help="String that activates the aLoRA adapter. Model dependent.",
)
parser.add_argument("--quantize", action="store_true", help="Use quantization")
parser.add_argument("--eval_step", type=int, default=10, help="Evaluation step interval")
parser.add_argument("--save_step", type=int, default=100, help="Save step interval")
parser.add_argument("--device", type=str, default="auto", help="Device to use for training")
parser.add_argument("--lora_r", type=int, default=32, help="LoRA rank")
parser.add_argument("--lora_alpha", type=int, default=32, help="LoRA alpha")
parser.add_argument("--lora_dropout", type=float, default=0.05, help="LoRA dropout rate")
parser.add_argument(
"--lora_target_modules", type=str, default=None, help="Comma-separated list of target modules for LoRA"
)
parser.add_argument(
"--hub_model_id",
type=str,
default="path/to/repo",
help="Repository name to push the model on the Hugging Face Hub",
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to Hugging Face Hub")
args = parser.parse_args()
train_model(
base_model=args.base_model,
data_path=args.data_path,
output_dir=args.output_dir,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
learning_rate=args.learning_rate,
cutoff_len=args.cutoff_len,
val_set_size=args.val_set_size,
invocation_string=args.invocation_string,
quantize=args.quantize,
eval_step=args.eval_step,
save_step=args.save_step,
device=args.device,
lora_r=args.lora_r,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
lora_target_modules=args.lora_target_modules,
hub_model_id=args.hub_model_id,
push_to_hub=args.push_to_hub,
)
print("Model trained. Running test inference.")
model_inference(model_path=args.base_model, adapter_path=args.output_dir, data_path=args.data_path)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/alora_finetuning/alora_finetuning.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/peft:examples/road_finetuning/road_finetuning.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import RoadConfig, get_peft_model, prepare_model_for_kbit_training
def train_model(
base_model: str,
data_path: str,
output_dir: str,
batch_size: int,
num_epochs: int,
learning_rate: float,
cutoff_len: int,
val_set_size: int,
quantize: bool,
eval_step: int,
save_step: int,
device: str,
variant: str,
road_target_modules: str,
hub_model_id: str,
push_to_hub: bool,
):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
hf_token = os.getenv("HF_TOKEN")
# Setup device
device = torch.device(device)
print(f"Using device: {device}")
# load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token)
# IF YOU WANNA QUANTIZE THE MODEL
if quantize:
model = AutoModelForCausalLM.from_pretrained(
base_model,
token=hf_token,
quantization_config=BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=(
torch.bfloat16 if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else torch.float16
),
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
),
)
# setup for quantized training
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True)
else:
model = AutoModelForCausalLM.from_pretrained(base_model, token=hf_token, device_map="auto")
# RoAd config for the PEFT model
road_config = RoadConfig(
variant=variant, # Rank of matrix
target_modules=(
road_target_modules.split(",")
if road_target_modules
else ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
),
)
# get the peft model with RoAd config
model = get_peft_model(model, road_config)
model.to(device) # MODEL TO GPU/CUDA
tokenizer.pad_token = tokenizer.eos_token
# Load the dataset
dataset = load_dataset(data_path)
def tokenize_function(examples):
inputs = tokenizer(examples["text"], padding="max_length", truncation=True, max_length=cutoff_len)
inputs["labels"] = inputs["input_ids"].copy() # setting labels for a language modeling task
return inputs
# Tokenize the dataset and prepare for training
tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=dataset["train"].column_names)
# Data collator to dynamically pad the batched examples
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# Define training arguments
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_steps=100,
weight_decay=0.01,
logging_dir="./logs",
logging_steps=eval_step,
save_steps=save_step,
save_total_limit=2,
push_to_hub=push_to_hub,
hub_model_id=hub_model_id,
gradient_accumulation_steps=16,
fp16=True,
learning_rate=learning_rate,
hub_token=hf_token,
)
# Clear CUDA cache to free memory
torch.cuda.empty_cache()
# Initialize the Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
data_collator=data_collator,
)
# Start model training
trainer.train()
# Save and push the trained model and tokenizer
if push_to_hub:
# Push the main model to the hub
trainer.push_to_hub(commit_message="Fine-tuned model")
# Save the model and tokenizer locally
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Fine-tune LLaMA with DoRA and PEFT")
parser.add_argument("--base_model", type=str, default="huggyllama/llama-7b", help="Base model path or name")
parser.add_argument(
"--data_path", type=str, default="timdettmers/openassistant-guanaco", help="Dataset path or name"
)
parser.add_argument(
"--output_dir", type=str, default="path/to/output", help="Output directory for the fine-tuned model"
)
parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=3e-3, help="Learning rate")
parser.add_argument("--cutoff_len", type=int, default=512, help="Cutoff length for tokenization")
parser.add_argument("--val_set_size", type=int, default=500, help="Validation set size")
parser.add_argument("--quantize", action="store_true", help="Use quantization")
parser.add_argument("--eval_step", type=int, default=10, help="Evaluation step interval")
parser.add_argument("--save_step", type=int, default=100, help="Save step interval")
parser.add_argument("--device", type=str, default="cuda:0", help="Device to use for training")
parser.add_argument(
"--variant", type=str, default="road_1", choices=["road_1", "road_2", "road_4"], help="RoAD variant"
)
parser.add_argument(
"--road_target_modules", type=str, default=None, help="Comma-separated list of target modules for RoAd"
)
parser.add_argument(
"--hub_model_id",
type=str,
default="path/to/repo",
help="Repository name to push the model on the Hugging Face Hub",
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to Hugging Face Hub")
args = parser.parse_args()
train_model(
base_model=args.base_model,
data_path=args.data_path,
output_dir=args.output_dir,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
learning_rate=args.learning_rate,
cutoff_len=args.cutoff_len,
val_set_size=args.val_set_size,
quantize=args.quantize,
eval_step=args.eval_step,
save_step=args.save_step,
device=args.device,
variant=args.variant,
road_target_modules=args.road_target_modules,
hub_model_id=args.hub_model_id,
push_to_hub=args.push_to_hub,
)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/road_finetuning/road_finetuning.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/road/bnb.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from typing import Any, Optional
import bitsandbytes as bnb
import torch
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.integrations import dequantize_bnb_weight
from .config import RoadVariant
from .layer import RoadLayer, _apply_road, _get_delta_weight
if is_bnb_available():
class Linear8bitLt(torch.nn.Module, RoadLayer):
# Road implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
variant: RoadVariant = "road_1",
group_size: int = 64,
init_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
RoadLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
variant=variant,
group_size=group_size,
init_weights=init_weights,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self._available_adapters:
warnings.warn(
"Merge road module to 8-bit linear may get different generations due to rounding errors."
)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
# Dequantize the result of identity matrix and int8 weight because bitsandbytes does not support int8
# dequantization directly
output = dequantize_bnb_weight(weight, state=state)
road_R = _get_delta_weight(
self.variant[active_adapter],
self.group_size[active_adapter],
self.road_theta[active_adapter].data,
self.road_alpha[active_adapter].data,
)
w_data = torch.matmul(road_R, output.to(road_R.dtype))
w_data = w_data.to(road_R.dtype).to(road_R.device).contiguous()
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.get_base_layer().weight = bnb.nn.Int8Params(
w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
).to(weight.device)
if self.get_base_layer().bias is not None:
bias = self.get_base_layer().bias
orig_dtype = bias.dtype
bias_data = bias.data
new_bias = torch.matmul(road_R, bias_data.to(road_R.dtype))
bias.data = new_bias.to(orig_dtype)
state.reset_grads()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self._available_adapters:
warnings.warn(
"Unmerge road module to 8-bit linear may get different generations due to rounding errors."
)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
output = dequantize_bnb_weight(weight, state=state)
road_R = _get_delta_weight(
self.variant[active_adapter],
self.group_size[active_adapter],
self.road_theta[active_adapter].data,
self.road_alpha[active_adapter].data,
)
inv_road_R = torch.linalg.inv(road_R.to(torch.float32)).to(road_R.dtype)
w_data = torch.matmul(inv_road_R, output.to(road_R.dtype))
w_data = w_data.to(road_R.dtype).to(road_R.device).contiguous()
self.get_base_layer().weight = bnb.nn.Int8Params(
w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
).to(weight.device)
if self.get_base_layer().bias is not None:
bias = self.get_base_layer().bias
orig_dtype = bias.dtype
bias_data = bias.data
new_bias = torch.matmul(inv_road_R, bias_data)
bias.data = new_bias.to(orig_dtype)
state.reset_grads()
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
result = self._cast_input_dtype(result, self.road_theta[active_adapter].dtype)
result = _apply_road(
self.variant[active_adapter],
self.group_size[active_adapter],
self.road_theta[active_adapter],
self.road_alpha[active_adapter],
result,
)
if requires_conversion:
x = x.to(expected_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "road." + rep
def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
loaded_in_8bit = kwargs.get("loaded_in_8bit", False)
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
eightbit_kwargs = kwargs.copy()
eightbit_kwargs.update(
{
"has_fp16_weights": target.state.has_fp16_weights,
"threshold": target.state.threshold,
"index": target.index,
}
)
new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs)
return new_module
if is_bnb_4bit_available():
class Linear4bit(torch.nn.Module, RoadLayer):
# OFT implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
variant: RoadVariant = "road_1",
group_size: int = 64,
init_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
RoadLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
variant=variant,
group_size=group_size,
init_weights=init_weights,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self._available_adapters:
warnings.warn(
"Merge oft module to 4-bit linear may get different generations due to rounding errors."
)
# Refer to https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930
weight = self.get_base_layer().weight
kwargs = weight.__dict__
output = dequantize_bnb_weight(weight, state=weight.quant_state)
road_R = _get_delta_weight(
self.variant[active_adapter],
self.group_size[active_adapter],
self.road_theta[active_adapter].data,
self.road_alpha[active_adapter].data,
)
w_data = torch.matmul(road_R, output.to(road_R.dtype))
w_data = w_data.to(road_R.dtype).to(road_R.device)
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
if "bnb_quantized" in kwargs:
kwargs["bnb_quantized"] = False
kwargs["requires_grad"] = False
kwargs.pop("data", None)
# torch.compile can introduce attributes preceded by '_', remove them
kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), **kwargs).to(weight.device)
if self.get_base_layer().bias is not None:
bias = self.get_base_layer().bias
orig_dtype = bias.dtype
bias_data = bias.data
new_bias = torch.matmul(road_R, bias_data.to(road_R.dtype))
bias.data = new_bias.to(orig_dtype)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self._available_adapters:
warnings.warn(
"Unmerge oft module to 4-bit linear may get different generations due to rounding errors."
)
weight = self.get_base_layer().weight
kwargs = weight.__dict__
output = dequantize_bnb_weight(weight, state=weight.quant_state)
road_R = _get_delta_weight(
self.variant[active_adapter],
self.group_size[active_adapter],
self.road_theta[active_adapter].data,
self.road_alpha[active_adapter].data,
)
inv_road_R = torch.linalg.inv(road_R.to(torch.float32)).to(road_R.dtype)
w_data = torch.matmul(inv_road_R, output.to(road_R.dtype))
w_data = w_data.to(road_R.dtype).to(road_R.device)
if "bnb_quantized" in kwargs:
kwargs["bnb_quantized"] = False
kwargs["requires_grad"] = False
kwargs.pop("data", None)
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), **kwargs).to(weight.device)
if self.get_base_layer().bias is not None:
bias = self.get_base_layer().bias
orig_dtype = bias.dtype
bias_data = bias.data
new_bias = torch.matmul(inv_road_R, bias_data)
bias.data = new_bias.to(orig_dtype)
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
# As per Tim Dettmers, for 4bit, we need to defensively clone here.
# The reason is that in some cases, an error can occur that backprop
# does not work on a manipulated view. This issue may be solved with
# newer PyTorch versions but this would need extensive testing to be
# sure.
# result = result.clone()
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
result = self._cast_input_dtype(result, self.road_theta[active_adapter].dtype)
result = _apply_road(
self.variant[active_adapter],
self.group_size[active_adapter],
self.road_theta[active_adapter],
self.road_alpha[active_adapter],
result,
)
if requires_conversion:
x = x.to(expected_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "oft." + rep
def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
loaded_in_4bit = kwargs.get("loaded_in_4bit", False)
if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update(
{
"compute_dtype": target_base_layer.compute_dtype,
"compress_statistics": target_base_layer.weight.compress_statistics,
"quant_type": target_base_layer.weight.quant_type,
}
)
new_module = Linear4bit(target, adapter_name, **fourbit_kwargs)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/road/bnb.py",
"license": "Apache License 2.0",
"lines": 340,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/road/config.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Literal, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
RoadVariant = Literal["road_1", "road_2", "road_4"]
@dataclass
class RoadConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`RoadModel`]. RoAd adapter is proposed in
https://huggingface.co/papers/2409.00119.
Args:
variant (Union[`RoadVariant`, `str`]):
The variant of the Road model to use. It can be one of road_1, road_2, or road_4. Refer to the paper for
more details.
- road_1: Uses the same scale and angle for all pairs of elements.
This variant has lowest number of parameters, it stores a number equal to the output hidden size of
parameters for each layer that RoAd is applied to.
- road_2: Uses the same scale and angle for each element.
This variant has 2x the number of parameters compared to road_1.
- road_4: Uses two different scales and angles for each ellement.
This variant has 4x the number of parameters compared to road_1.
group_size (`int`):
Group size defines how elements are grouped together into 2D vectors for rotation. Within each group
element 0 is paired with element group_size/2, then element 1 is paired with element group_size/2+1 and so
on. This has no effect on the model performance, since elements are unordered, however it has some effect
on inference speed when used in e.g. VLLM. For best speed group size of at least 32 or 64 (the default) is
recommended. Note that model hidden size (or hidden size per partition when used with tensor parallelism)
must be divisible by group_size, so for very small models you might need to reduce this parameter.
init_weights (`bool`):
Whether to perform initialization of RoAd weights.
target_modules (`Optional[Union[List[str], str]]`):
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
strings, either an exact match will be performed or it is checked if the name of the module ends with any
of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen (if
the model is a PreTrainedModel, the output layer excluded). If this is not specified, modules will be
chosen according to the model architecture. If the architecture is not known, an error will be raised -- in
this case, you should specify the target modules manually.
modules_to_save (`List[str]`):
List of modules apart from Road layers to be set as trainable and saved in the final checkpoint.
"""
variant: Union[str, RoadVariant] = field(
default="road_1",
metadata={"help": ("Variant of the Road model to use.")},
)
group_size: int = field(
default=64,
metadata={
"help": (
"Group size defines how elements are grouped together into 2D vectors for rotation. "
"Within each group element 0 is paired with element group_size/2, "
"then element 1 is paired with element group_size/2+1 and so on. "
"This has no effect on the model performance, since elements are unordered, "
"however it has some effect on inference speed when used in e.g. VLLM. "
"For best speed group size of at least 64 is recommended. "
"Note that model hidden size (or hidden size per partition when used with tensor parallelism) "
"must be divisible by group_size, so for very small models you might need to reduce this parameter."
)
},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Whether to initialize the weights of the RoAd layers with their default initialization. Don't change "
"this setting, except if you know exactly what you're doing."
),
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with Road."
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'."
"This can also be a wildcard 'all-linear' which matches all linear/Conv1D "
"(if the model is a PreTrainedModel, the output layer excluded)."
"If not specified, modules will be chosen according to the model architecture, If the architecture is "
"not known, an error will be raised -- in this case, you should specify the target modules manually."
),
},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"List of modules apart from RoAd layers to be set as trainable and saved in the final checkpoint. For"
" example, in Sequence Classification or Token Classification tasks, the final layer"
" `classifier/score` are randomly initialized and as such need to be trainable and saved."
)
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.ROAD
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
if self.variant not in ["road_1", "road_2", "road_4"]:
raise ValueError(f"Invalid variant {self.variant} specified. Please choose from road_1, road_2 or road_4")
if self.group_size <= 0 or self.group_size % 2 != 0:
raise ValueError(f"The group_size must be divisible by 2 when using RoadLayer, but got {self.group_size}.")
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/road/config.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/road/layer.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from .config import RoadConfig, RoadVariant
class RoadLayer(BaseTunerLayer):
"""
Road layer.
Generally the idea of RoAD is to split the input vector into many 2D vectors and rotate each 2D vector with its own
2D rotation matrix. For additional flexibility, each rotation matrix is multiplied by a trainable scale.
when applied to vector R @ x each pair of elements of x is transformed like this: `y₀ = x₀ * α * cosθ - xₙ * α *
sinθ` and `yₙ = x₀ * α * sinθ + xₙ * α * cosθ`
The scales α and angles θ are learned for each pair of elements and, moreover, each of the 4 instances in the
rotation matrix may actually be different (when using variant 2 or 4).
Note that instead of using two consecutive elements x₀ x₁ we first split the whole vector into groups and pair
elements from the first with the second half of the same group, which allows for more efficient inference
implementation.
The adapter needs to only store the angles θ and scales α, rather than the full matrix R and the inference
implementation only needs to do elementwise vector multiplications.
For merging the weights, we make use of the following formula: R @ (W @ x + b) = (R @ W) @ x + R @ b. The lhs part
is how it is used in unmerged state (using efficient elementwise implementation instead of matrix multiplication)
and the rhs part is how it is used in merged state where (R @ W) becomes the new weight matrix and R @ b becomes
the new bias.
"""
adapter_layer_names: tuple[str, ...] = ("road_theta", "road_alpha")
other_param_names: tuple[str, ...] = ("variant", "group_size")
def __init__(self, base_layer: nn.Module, ephemeral_gpu_offload: bool = False, **kwargs) -> None:
self.base_layer = base_layer
self.variant = {}
self.group_size = {}
self.road_theta = nn.ParameterDict({})
self.road_alpha = nn.ParameterDict({})
self._disable_adapters = False
self.merged_adapters = []
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
else:
raise ValueError(f"Unsupported layer type '{type(base_layer)}' encountered, cannot apply RoAd adapter.")
self.in_features = in_features
self.out_features = out_features
@property
def _available_adapters(self) -> set[str]:
return {*self.road_theta}
def update_layer(
self,
adapter_name,
variant,
group_size,
init_weights,
inference_mode: bool = False,
):
self.variant[adapter_name] = variant
self.group_size[adapter_name] = group_size
if self.out_features % group_size != 0:
raise ValueError(
f"The out_features of the base layer must be divisible by group_size ({group_size}) when using RoadLayer."
)
# Actual trainable parameters
if variant == "road_1":
size = self.out_features // 2
elif variant == "road_2":
size = self.out_features
elif variant == "road_4":
size = self.out_features * 2
else:
raise ValueError(
f"Unsupported variant {variant} for RoadLayer. Supported variants are road_1, road_2, and road_4."
)
self.road_theta[adapter_name] = nn.Parameter(torch.empty(size))
self.road_alpha[adapter_name] = nn.Parameter(torch.empty(size))
self.reset_parameters(adapter_name, init_weights)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters, inference_mode=inference_mode)
def reset_parameters(self, adapter_name, init_weights):
if init_weights is False:
nn.init.normal_(self.road_theta[adapter_name].data, mean=0.0, std=0.5)
nn.init.normal_(self.road_alpha[adapter_name].data, mean=1.0, std=0.5)
return
nn.init.zeros_(self.road_theta[adapter_name].data)
nn.init.ones_(self.road_alpha[adapter_name].data)
class Linear(nn.Module, RoadLayer):
# Road implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
variant: RoadVariant = "road_1",
group_size: int = 64,
init_weights: Union[bool, str] = True,
**kwargs,
) -> None:
super().__init__()
RoadLayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
variant,
group_size,
init_weights=init_weights,
)
def _check_forward_args(self, x, *args, **kwargs):
"""Check if the arguments are compatible with the configs and state of the model"""
adapter_names = kwargs.get("adapter_names", None)
if adapter_names is None:
return
if len(x) != len(adapter_names):
msg = (
"Length of `adapter_names` should be the same as the number of inputs, but got "
f"{len(adapter_names)} and {len(x)} respectively."
)
raise ValueError(msg)
if self.merged:
# It is unclear what would be the right thing to do if users pass adapter_names and there are merged
# adapters. Therefore, it is better to raise an error in this case.
msg = "Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first."
raise ValueError(msg)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop("adapter_names", None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
elif adapter_names is not None:
result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
torch_result_dtype = result.dtype
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
result = self._cast_input_dtype(result, self.road_theta[active_adapter].dtype)
result = _apply_road(
self.variant[active_adapter],
self.group_size[active_adapter],
self.road_theta[active_adapter],
self.road_alpha[active_adapter],
result,
)
result = result.to(torch_result_dtype)
return result
def _mixed_batch_forward(
self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any
) -> torch.Tensor:
# This is a special method that handles the case when users pass the argument `adapter_names`. This is an
# extra argument that allows mixing different adapters in the same batch at inference time.
result = self.base_layer(x, *args, **kwargs)
unique_adapters = set(adapter_names)
sub_batch_indices_list = []
for adapter in unique_adapters:
sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter])
for i, active_adapter in enumerate(unique_adapters):
if active_adapter == "__base__":
continue
if active_adapter not in self._available_adapters:
continue
dtype = self.road_theta[active_adapter].data.dtype
# getting the sub-batch, passing it to Road layers and updating the corresponding indices of the linear
# layer output
sub_batch = result[sub_batch_indices_list[i]].to(dtype)
result[sub_batch_indices_list[i]] = _apply_road(
self.variant[active_adapter],
self.group_size[active_adapter],
self.road_theta[active_adapter],
self.road_alpha[active_adapter],
sub_batch,
)
return result
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If `None`, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self._available_adapters:
base_layer = self.get_base_layer()
orig_dtype = base_layer.weight.dtype
road_R = _get_delta_weight(
self.variant[active_adapter],
self.group_size[active_adapter],
self.road_theta[active_adapter].data,
self.road_alpha[active_adapter].data,
)
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weight = base_layer.weight.data.clone()
orig_weight = torch.matmul(road_R.to(orig_dtype), orig_weight)
if not torch.isfinite(orig_weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weight.contiguous().to(orig_dtype)
if base_layer.bias is not None:
orig_bias = base_layer.bias.clone()
orig_bias = torch.matmul(road_R.to(orig_dtype), orig_bias)
if not torch.isfinite(orig_bias).all():
raise ValueError(
f"NaNs detected in the merged bias. The adapter {active_adapter} seems to be broken"
)
base_layer.bias.data = orig_bias.contiguous().to(orig_dtype)
else:
orig_weight = base_layer.weight.data
orig_weight = torch.matmul(road_R.to(orig_dtype), orig_weight)
base_layer.weight.data = orig_weight.contiguous().to(orig_dtype)
if base_layer.bias is not None:
orig_bias = base_layer.bias.data
orig_bias = torch.matmul(road_R.to(orig_dtype), orig_bias)
base_layer.bias.data = orig_bias.contiguous().to(orig_dtype)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
# Going in reverse order
active_adapter = self.merged_adapters.pop()
if active_adapter in self._available_adapters:
weight = self.get_base_layer().weight
orig_dtype = weight.dtype
road_R = _get_delta_weight(
self.variant[active_adapter],
self.group_size[active_adapter],
self.road_theta[active_adapter].data,
self.road_alpha[active_adapter].data,
)
# Since our matrix are not necessarily orthogonal we need inverse instead of transpose.
# In practice we expect this to basically always work since we start from block diagonal rotation matrix.
inv_road_R = torch.linalg.inv(road_R.to(torch.float32)).to(orig_dtype)
orig_weight = torch.matmul(inv_road_R, weight.data)
weight.data = orig_weight.contiguous()
if self.get_base_layer().bias is not None:
orig_bias = torch.matmul(inv_road_R, self.get_base_layer().bias.data)
self.get_base_layer().bias.data = orig_bias.contiguous()
def __repr__(self) -> str:
rep = super().__repr__()
return "road." + rep
def _get_delta_weight(variant: RoadVariant, group_size: int, road_theta: torch.Tensor, road_alpha: torch.Tensor):
first_col, second_col = _prepare_cols(variant, group_size, road_theta, road_alpha)
# To help understand the logic below consider how rope embeddings work
# here it is similar, but done in groups.
# https://discuss.huggingface.co/t/is-llama-rotary-embedding-implementation-correct/44509/3
# First column is simply put on the main diagonal
output_tensor = torch.diag(first_col)
# For second column we need to swap each half groups and add minus sign
size = second_col.shape[0]
swapped_second_col = second_col.reshape(-1, 2, group_size // 2)[:, [1, 0], :].flatten()
rotated_diag_second_col = torch.diag(swapped_second_col).reshape(-1, 2, group_size // 2, size)[:, [1, 0], :, :]
rotated_diag_second_col[:, 0, :, :] *= -1
rotated_diag_second_col = rotated_diag_second_col.reshape(size, size)
output_tensor += rotated_diag_second_col
return output_tensor
def _prepare_cols(
variant: RoadVariant, group_size: int, road_theta: torch.Tensor, road_alpha: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
# In inference mode, this can be cached
if variant == "road_1":
# In each group there are only group_size // 2 parameters that are reused
road_theta = road_theta.reshape(-1, group_size // 2).repeat_interleave(2, dim=0).flatten()
road_alpha = road_alpha.reshape(-1, group_size // 2).repeat_interleave(2, dim=0).flatten()
theta_cos = road_theta.cos()
theta_sin = road_theta.sin()
first_col = road_alpha * theta_cos
second_col = road_alpha * theta_sin
elif variant == "road_2":
# Each group has exactly group_size parameters
theta_cos = road_theta.cos()
theta_sin = road_theta.sin()
first_col = road_alpha * theta_cos
second_col = road_alpha * theta_sin
elif variant == "road_4":
# Each group has 2*group_size parameters, first half used for first column, second half for second column
road_theta = road_theta.reshape(-1, 2, group_size)
theta_cos = road_theta[:, 0, :].cos().flatten()
theta_sin = road_theta[:, 1, :].sin().flatten()
road_alpha = road_alpha.reshape(-1, 2, group_size)
alpha_1 = road_alpha[:, 0, :].flatten()
alpha_2 = road_alpha[:, 1, :].flatten()
first_col = alpha_1 * theta_cos
second_col = alpha_2 * theta_sin
else:
raise ValueError(
f"Unsupported variant {variant} for RoadLayer. Supported variants are road_1, road_2, and road_4."
)
return first_col, second_col
def _apply_road(
variant: RoadVariant, group_size: int, road_theta: torch.Tensor, road_alpha: torch.Tensor, x: torch.Tensor
):
first_col, second_col = _prepare_cols(variant, group_size, road_theta, road_alpha)
# Split in half groups and join back
# See equation 4 in the RoAD paper
x_grouped = x.reshape(-1, 2, group_size // 2)
x1 = x_grouped[:, 0, :]
x2 = x_grouped[:, 1, :]
rotate_half_x = torch.stack((-x2, x1), dim=1).reshape(x.shape)
result = x * first_col + rotate_half_x * second_col
return result
def dispatch_default(
target: torch.nn.Module,
adapter_name: str,
road_config: RoadConfig,
**kwargs,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
new_module = Linear(target, adapter_name, **kwargs)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/road/layer.py",
"license": "Apache License 2.0",
"lines": 342,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/road/model.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import operator
from contextlib import contextmanager
from functools import partial
from torch import nn
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.road.config import RoadConfig
from peft.tuners.tuners_utils import (
BaseTuner,
get_device_map,
)
from peft.utils import TRANSFORMERS_MODELS_TO_ROAD_TARGET_MODULES_MAPPING
from .layer import RoadLayer, dispatch_default
def _adapter_names_pre_forward_hook(target, args, kwargs, adapter_names):
# pre-forward hook to inject the adapter_names argument when using mixed adapter batches inference
kwargs["adapter_names"] = adapter_names
return args, kwargs
class RoadModel(BaseTuner):
""" """
prefix: str = "road_"
tuner_layer_cls = RoadLayer
target_module_mapping = TRANSFORMERS_MODELS_TO_ROAD_TARGET_MODULES_MAPPING
def _create_and_replace(
self,
road_config: RoadConfig,
adapter_name: str,
target: nn.Module,
target_name: str,
parent: nn.Module,
current_key,
) -> None:
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
# Regexp matching - Find key which matches current target_name in patterns provided
variant = road_config.variant
group_size = road_config.group_size
kwargs = {
"variant": variant,
"group_size": group_size,
"init_weights": road_config.init_weights,
"loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False),
"loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False),
}
# for torchao merging, we need the get_apply_tensor_subclass from the quantization config
try:
kwargs["get_apply_tensor_subclass"] = operator.attrgetter(
"hf_quantizer.quantization_config.get_apply_tensor_subclass"
)(self.model)
except AttributeError:
pass
if isinstance(target, RoadLayer):
target.update_layer(
adapter_name,
variant,
group_size,
init_weights=road_config.init_weights,
)
else:
device_map = get_device_map(self.model)
new_module = self._create_new_module(road_config, adapter_name, target, device_map=device_map, **kwargs)
if adapter_name not in self.active_adapters:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _create_new_module(road_config: RoadConfig, adapter_name, target, **kwargs):
dispatchers = []
# avoid eager bnb import
if is_bnb_available():
from .bnb import dispatch_bnb_8bit
dispatchers.append(dispatch_bnb_8bit)
if is_bnb_4bit_available():
from .bnb import dispatch_bnb_4bit
dispatchers.append(dispatch_bnb_4bit)
dispatchers.extend(
[
dispatch_default,
]
)
new_module = None
for dispatcher in dispatchers:
new_module = dispatcher(target, adapter_name, road_config=road_config, **kwargs)
if new_module is not None: # first match wins
break
if new_module is None:
# no module could be matched
raise ValueError(
f"Target module {target} is not supported. Currently, only the following modules are supported: "
"`torch.nn.Linear`."
)
return new_module
@contextmanager
def _enable_peft_forward_hooks(self, *args, **kwargs):
# If adapter_names is passed as an argument, we inject it into the forward arguments.
adapter_names = kwargs.pop("adapter_names", None)
if adapter_names is None:
# nothing to do
yield
return
if self.training:
raise ValueError("Cannot pass `adapter_names` when the model is in training mode.")
# Check that users only passed actually existing adapters.
# Note: We cannot do this on the layer level, as each individual layer may not have each adapter. Still, we want
# to check that there is at least one layer with the given name, or else something like typos can easily slip.
expected_adapters = set()
for layer in self.modules():
if isinstance(layer, RoadLayer):
expected_adapters |= layer.road_theta.keys()
unique_adapters = {name for name in adapter_names if name != "__base__"}
unexpected_adapters = unique_adapters - expected_adapters
if unexpected_adapters:
raise ValueError(f"Trying to infer with non-existing adapter(s): {', '.join(sorted(unexpected_adapters))}")
hook_handles = []
for module in self.modules():
if isinstance(module, RoadLayer):
pre_forward = partial(_adapter_names_pre_forward_hook, adapter_names=adapter_names)
handle = module.register_forward_pre_hook(pre_forward, with_kwargs=True)
hook_handles.append(handle)
# TODO LoRA also has hooks for beam search, ignore this for now
yield
for handle in hook_handles:
handle.remove()
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/road/model.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/utils/warning.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PeftWarning(UserWarning):
"""Base PEFT warning"""
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/utils/warning.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:method_comparison/text_generation_benchmark/data.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data handling utilities for PEFT benchmarking.
"""
import json
import os
from typing import Optional
from transformers import PreTrainedTokenizer
from utils import BenchmarkConfig
DEFAULT_PROMPTS_PATH = os.path.join(os.path.dirname(__file__), "configs", "prompts.json")
def load_test_prompts(config: dict) -> dict[str, list[str]]:
"""
Load prompts from JSON file.
Args:
config: Configuration containing prompts file path
Returns:
dictionary with prompts by category
"""
prompts_file = getattr(config, "prompts_file", DEFAULT_PROMPTS_PATH)
with open(prompts_file) as f:
prompts = json.load(f)
return prompts
def truncate_prompt_for_model(
prompt: str,
tokenizer: PreTrainedTokenizer,
max_length: Optional[int] = None,
reserve_output_tokens: int = 50,
) -> str:
"""
Truncate a prompt to fit within the model's context window.
Args:
prompt: Input prompt
tokenizer: Model tokenizer
max_length: Maximum sequence length (if None, uses model's max_length)
reserve_output_tokens: Number of tokens to reserve for response
Returns:
Truncated prompt
"""
if max_length is None:
if hasattr(tokenizer, "model_max_length"):
max_length = tokenizer.model_max_length
else:
max_length = 2048
max_prompt_length = max_length - reserve_output_tokens
input_ids = tokenizer.encode(prompt, return_tensors="pt")[0]
if len(input_ids) <= max_prompt_length:
return prompt
truncated_ids = input_ids[:max_prompt_length]
truncated_prompt = tokenizer.decode(truncated_ids, skip_special_tokens=True)
return truncated_prompt
def prepare_benchmark_prompts(
config: BenchmarkConfig,
tokenizer: PreTrainedTokenizer,
max_input_length: Optional[int] = None,
seed: int = 42,
) -> dict[str, list[str]]:
"""
Prepare prompts for benchmarking, ensuring appropriate length and variety.
Always returns all prompt categories for consistent benchmarking.
Args:
config: Benchmark configuration
tokenizer: Model tokenizer
max_input_length: Maximum input length (overrides model default if provided)
seed: Random seed (kept for backwards compatibility)
Returns:
Dictionary with processed prompts by category (all categories included)
"""
all_prompts = load_test_prompts(config)
processed_prompts = {}
for category, prompts in all_prompts.items():
truncated_prompts = [
truncate_prompt_for_model(
prompt,
tokenizer,
max_length=max_input_length,
reserve_output_tokens=getattr(config, "reserve_output_tokens", 50),
)
for prompt in prompts
]
processed_prompts[category] = truncated_prompts
return processed_prompts
| {
"repo_id": "huggingface/peft",
"file_path": "method_comparison/text_generation_benchmark/data.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:method_comparison/text_generation_benchmark/run.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Main entry point to run the experiments. Contains general setup and the proper inference code.
"""
import argparse
import gc
import json
import os
import sys
import time
from typing import Optional
import bitsandbytes
import torch
import transformers
from data import prepare_benchmark_prompts
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, set_seed
from utils import (
BenchmarkConfig,
BenchmarkResult,
BenchmarkStatus,
get_memory_usage,
init_accelerator,
log_results,
validate_experiment_path,
)
import peft
from peft import PeftConfig, get_peft_model
def load_base_results(model_id: str) -> Optional[dict]:
"""Load base model results if they exist."""
base_results_dir = os.path.join(os.path.dirname(__file__), "base_results")
model_name = model_id.replace("/", "_").replace("-", "_")
filename = f"base_{model_name}.json"
filepath = os.path.join(base_results_dir, filename)
if os.path.exists(filepath):
with open(filepath) as f:
return json.load(f)
return None
def measure_inference_time(model, tokenizer, prompts, max_new_tokens, num_runs, print_fn, category_generation_params):
"""Measure inference time for each prompt category."""
inference_times = {}
time_per_token = {}
generated_tokens = {}
individual_samples = {}
for category, category_prompts in prompts.items():
print_fn(f"\nMeasuring inference time for {category} prompts...")
category_times = []
category_tokens = []
category_time_per_token = []
category_samples = []
for prompt in category_prompts:
prompt_times = []
prompt_tokens = []
prompt_time_per_token = []
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
cat_max_new_tokens = category_generation_params.get(category, {}).get("max_new_tokens", max_new_tokens)
for _ in range(num_runs):
start_time = time.perf_counter()
outputs = model.generate(
**inputs,
max_new_tokens=cat_max_new_tokens,
min_new_tokens=cat_max_new_tokens,
pad_token_id=tokenizer.pad_token_id,
)
end_time = time.perf_counter()
# Calculate metrics
inference_time = end_time - start_time
num_tokens = len(outputs[0]) - len(inputs["input_ids"][0])
time_per_token_val = inference_time / num_tokens if num_tokens > 0 else 0
prompt_times.append(inference_time)
prompt_tokens.append(num_tokens)
prompt_time_per_token.append(time_per_token_val)
# Calculate averages for this prompt
avg_time = sum(prompt_times) / len(prompt_times)
avg_tokens = sum(prompt_tokens) / len(prompt_tokens)
avg_time_per_token = sum(prompt_time_per_token) / len(prompt_time_per_token)
sample_result = {
"inference_time": avg_time,
"generated_tokens": avg_tokens,
"time_per_token": avg_time_per_token,
"individual_runs": [
{"inference_time": t, "generated_tokens": tok, "time_per_token": tpt}
for t, tok, tpt in zip(prompt_times, prompt_tokens, prompt_time_per_token)
],
}
category_samples.append(sample_result)
category_times.append(avg_time)
category_tokens.append(avg_tokens)
category_time_per_token.append(avg_time_per_token)
if category_times:
avg_category_time = sum(category_times) / len(category_times)
avg_category_tokens = sum(category_tokens) / len(category_tokens)
avg_category_time_per_token = sum(category_time_per_token) / len(category_time_per_token)
inference_times[category] = avg_category_time
generated_tokens[category] = avg_category_tokens
time_per_token[category] = avg_category_time_per_token
individual_samples[category] = category_samples
return {
"inference_times": inference_times,
"time_per_token": time_per_token,
"generated_tokens": generated_tokens,
"individual_samples": individual_samples,
}
def run_benchmark(
benchmark_config: BenchmarkConfig, experiment_name: str, experiment_path: str, print_fn=print
) -> BenchmarkResult:
"""Run benchmarks for the specified PEFT method configuration."""
result = BenchmarkResult(
experiment_name=experiment_name,
status=BenchmarkStatus.RUNNING,
model_id=benchmark_config.model_id,
)
result.save()
start_time = time.perf_counter()
e_main_benchmark: Optional[Exception] = None
try:
print_fn("Initializing accelerator...")
accelerator_allocated_init, accelerator_reserved_init = init_accelerator()
set_seed(benchmark_config.seed)
print_fn(f"Loading base model: {benchmark_config.model_id}")
tokenizer = AutoTokenizer.from_pretrained(benchmark_config.model_id)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model_kwargs = {
"device_map": "auto" if (torch.cuda.is_available() or torch.xpu.is_available()) else None,
}
if benchmark_config.dtype == "float32":
model_kwargs["torch_dtype"] = torch.float32
elif benchmark_config.dtype == "float16":
model_kwargs["torch_dtype"] = torch.float16
elif benchmark_config.dtype == "bfloat16":
model_kwargs["torch_dtype"] = torch.bfloat16
else:
raise ValueError(f"Unsupported dtype: {benchmark_config.dtype}")
if benchmark_config.use_8bit:
model_kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True
)
elif benchmark_config.use_4bit:
model_kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=model_kwargs.get("torch_dtype", torch.float16),
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
)
base_model = AutoModelForCausalLM.from_pretrained(benchmark_config.model_id, **model_kwargs)
base_results = load_base_results(benchmark_config.model_id)
print_fn("Preparing benchmark prompts...")
prompts = prepare_benchmark_prompts(
config=benchmark_config,
tokenizer=tokenizer,
max_input_length=None,
seed=benchmark_config.seed,
)
if base_results:
print_fn("Using cached base model results...")
base_inference_times = base_results["inference_results"]
else:
raise FileNotFoundError(
"No cached base results found. Please run `python run_base.py` first to generate base model results."
)
try:
print_fn(f"Loading PEFT config from {experiment_path}")
peft_config = PeftConfig.from_pretrained(experiment_path)
print_fn(f"Loaded PEFT config: {peft_config.peft_type}, with parameters: {vars(peft_config)}")
model = get_peft_model(base_model, peft_config)
except Exception as exc:
error_msg = f"Error loading PEFT config: {str(exc)}"
print_fn(error_msg)
del base_model
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
elif torch.xpu.is_available():
torch.xpu.empty_cache()
ram, accelerator_allocated, accelerator_reserved = get_memory_usage()
result.add_memory_log("peft_model_loaded", ram, accelerator_allocated, accelerator_reserved)
# Calculate PEFT model metrics
trainable_params = model.get_nb_trainable_parameters()[0]
total_params = sum(p.numel() for p in model.parameters())
base_params = sum(p.numel() for p in model.base_model.parameters())
dtype_bytes = 2 if benchmark_config.dtype in ["float16", "bfloat16"] else 4
adapter_size_mb = trainable_params * dtype_bytes / (1024 * 1024)
base_model_size_mb = base_params * dtype_bytes / (1024 * 1024)
param_ratio = trainable_params / total_params if total_params > 0 else 0
result.update_meta_info(
param_counts={
"base_params": base_params,
"trainable_params": trainable_params,
"total_params": total_params,
"param_ratio": param_ratio,
},
size_info={"base_model_size_mb": base_model_size_mb, "adapter_size_mb": adapter_size_mb},
package_info={
"transformers-version": transformers.__version__,
"peft-version": peft.__version__,
"bitsandbytes-version": bitsandbytes.__version__ if hasattr(bitsandbytes, "__version__") else None,
},
)
print_fn("Measuring PEFT model inference times...")
peft_inference_times = measure_inference_time(
model,
tokenizer,
prompts,
max_new_tokens=benchmark_config.max_new_tokens,
num_runs=benchmark_config.num_inference_runs,
print_fn=print_fn,
category_generation_params=benchmark_config.category_generation_params,
)
# Calculate inference overhead for each category
inference_overhead = {
k: (peft_inference_times["inference_times"][k] - base_inference_times["inference_times"][k])
/ base_inference_times["inference_times"][k]
* 100
for k in base_inference_times["inference_times"]
}
for category in prompts:
category_metrics = {
"inference_time": peft_inference_times["inference_times"][category],
"base_inference_time": base_inference_times["inference_times"][category],
"inference_overhead_pct": inference_overhead[category],
"time_per_token": peft_inference_times["time_per_token"][category],
"generated_tokens": peft_inference_times["generated_tokens"][category],
}
result.add_metrics_for_category(
category, category_metrics, individual_samples=peft_inference_times["individual_samples"][category]
)
result.update_generation_info(
memory_data={
"peak_accelerator_memory_mb": max(
(log["accelerator_allocated_mb"] for log in result.generation_info["memory"]["memory_logs"]), default=0
),
"peak_ram_memory_mb": max(
(log["ram_mb"] for log in result.generation_info["memory"]["memory_logs"]), default=0
),
}
)
ram, accelerator_allocated, accelerator_reserved = get_memory_usage()
result.add_memory_log("benchmark_complete", ram, accelerator_allocated, accelerator_reserved)
result.status = BenchmarkStatus.SUCCESS
except Exception as exc:
print_fn(f"Benchmark failed with error: {exc}")
result.status = BenchmarkStatus.FAILED
e_main_benchmark = exc
end_time = time.perf_counter()
error_message = str(e_main_benchmark) if e_main_benchmark is not None else None
peft_config_dict = peft_config.to_dict() if "peft_config" in locals() else None
if peft_config_dict:
for key, value in peft_config_dict.items():
if isinstance(value, set):
peft_config_dict[key] = list(value)
result.update_run_info(
duration=end_time - start_time,
status=result.status,
error=error_message,
peft_config=peft_config_dict,
benchmark_config=benchmark_config.to_dict(),
)
return result
def main() -> None:
"""Main entry point for the benchmark runner."""
parser = argparse.ArgumentParser(description="Run PEFT method benchmarks")
parser.add_argument("experiment_path", help="Path to experiment directory")
parser.add_argument("--verbose", "-v", action="store_true", help="Enable verbose output")
args = parser.parse_args()
print_fn = print if args.verbose else lambda *args, **kwargs: None
experiment_path = args.experiment_path
allowed_root = os.path.abspath(os.path.join(os.path.dirname(__file__)))
abs_experiment_path = os.path.abspath(experiment_path)
if not abs_experiment_path.startswith(allowed_root):
print(f"Experiment path must be inside {allowed_root}, got: {abs_experiment_path}. Skipping execution.")
return 0
if not os.path.exists(abs_experiment_path):
print(f"Experiment path not found: {abs_experiment_path}. Skipping execution.")
return 0
experiment_path = abs_experiment_path
experiment_name, benchmark_config = validate_experiment_path(experiment_path)
print_fn(f"Running benchmark for experiment: {experiment_name}")
result = run_benchmark(
benchmark_config=benchmark_config,
experiment_name=experiment_name,
experiment_path=experiment_path,
print_fn=print_fn,
)
log_results(experiment_name, result, print_fn=print)
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "huggingface/peft",
"file_path": "method_comparison/text_generation_benchmark/run.py",
"license": "Apache License 2.0",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:method_comparison/text_generation_benchmark/run_base.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import sys
import time
import torch
from data import prepare_benchmark_prompts
from run import measure_inference_time
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, set_seed
from utils import (
BenchmarkConfig,
get_memory_usage,
init_accelerator,
)
def run_base_model_benchmark(benchmark_config: BenchmarkConfig, print_fn=print) -> dict:
"""Run benchmark for base model only and return results."""
print_fn(f"Running base model benchmark for: {benchmark_config.model_id}")
print_fn("Initializing accelerator...")
init_accelerator()
set_seed(benchmark_config.seed)
print_fn(f"Loading base model: {benchmark_config.model_id}")
tokenizer = AutoTokenizer.from_pretrained(benchmark_config.model_id)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model_kwargs = {
"device_map": "auto" if (torch.cuda.is_available() or torch.xpu.is_available()) else None,
}
if benchmark_config.dtype == "float32":
model_kwargs["torch_dtype"] = torch.float32
elif benchmark_config.dtype == "float16":
model_kwargs["torch_dtype"] = torch.float16
elif benchmark_config.dtype == "bfloat16":
model_kwargs["torch_dtype"] = torch.bfloat16
if benchmark_config.use_8bit:
model_kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True
)
elif benchmark_config.use_4bit:
model_kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=model_kwargs.get("torch_dtype", torch.float16),
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
)
model = AutoModelForCausalLM.from_pretrained(benchmark_config.model_id, **model_kwargs)
ram, accelerator_allocated, accelerator_reserved = get_memory_usage()
print_fn(f"Memory after model load - RAM: {ram:.2f}MB, {model.device.type.upper()}: {accelerator_allocated:.2f}MB")
print_fn("Preparing benchmark prompts...")
prompts = prepare_benchmark_prompts(
config=benchmark_config.to_dict(),
tokenizer=tokenizer,
max_input_length=None,
seed=benchmark_config.seed,
)
# Measure base model inference for each prompt category
print_fn("Measuring base model inference times...")
base_inference_results = measure_inference_time(
model,
tokenizer,
prompts,
max_new_tokens=benchmark_config.max_new_tokens,
num_runs=benchmark_config.num_inference_runs,
print_fn=print_fn,
category_generation_params=benchmark_config.category_generation_params,
)
result = {
"model_id": benchmark_config.model_id,
"benchmark_config": benchmark_config.to_dict(),
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
"inference_results": base_inference_results,
"memory_info": {
"ram_mb": ram,
"accelerator_allocated_mb": accelerator_allocated,
"accelerator_reserved_mb": accelerator_reserved,
},
}
return result
def save_base_results(result: dict, model_id: str) -> str:
"""Save base model results with a filename based on model and config."""
base_results_dir = os.path.join(os.path.dirname(__file__), "base_results")
os.makedirs(base_results_dir, exist_ok=True)
model_name = model_id.replace("/", "_").replace("-", "_")
filename = f"base_{model_name}.json"
filepath = os.path.join(base_results_dir, filename)
with open(filepath, "w") as f:
json.dump(result, f, indent=2)
return filepath
def main():
"""Main entry point for the base model benchmark runner."""
parser = argparse.ArgumentParser(description="Run base model benchmarks")
parser.add_argument("--verbose", "-v", action="store_true", help="Enable verbose output")
parser.add_argument("--force", "-f", action="store_true", help="Force re-run even if results exist")
args = parser.parse_args()
print_fn = print if args.verbose else lambda *args, **kwargs: None
default_config_path = os.path.join(os.path.dirname(__file__), "default_benchmark_params.json")
benchmark_config = BenchmarkConfig.from_json(default_config_path)
model_name = benchmark_config.model_id.replace("/", "_").replace("-", "_")
base_results_dir = os.path.join(os.path.dirname(__file__), "base_results")
filename = f"base_{model_name}.json"
filepath = os.path.join(base_results_dir, filename)
if os.path.exists(filepath) and not args.force:
print(f"Base results already exist at: {filepath}")
print("Use --force to re-run the benchmark")
return 0
print_fn(f"Running base model benchmark for: {benchmark_config.model_id}")
result = run_base_model_benchmark(benchmark_config, print_fn=print_fn)
saved_path = save_base_results(result, benchmark_config.model_id)
device_type = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
print(f"Base model results saved to: {saved_path}")
print("\nBase Model Benchmark Summary:")
print(f"Model: {result['model_id']}")
print(
f"Memory Usage - RAM: {result['memory_info']['ram_mb']:.2f}MB, {device_type.upper()}: {result['memory_info']['accelerator_allocated_mb']:.2f}MB"
)
print("\nInference Times by Category:")
for category, time_val in result["inference_results"]["inference_times"].items():
time_per_token = result["inference_results"]["time_per_token"][category]
tokens = result["inference_results"]["generated_tokens"][category]
print(f" {category}: {time_val:.4f}s ({time_per_token:.6f}s/token, {tokens:.1f} tokens)")
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "huggingface/peft",
"file_path": "method_comparison/text_generation_benchmark/run_base.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:method_comparison/text_generation_benchmark/utils.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for PEFT benchmarking.
"""
import datetime
import json
import os
import platform
import subprocess
from dataclasses import asdict, dataclass, field
from enum import Enum
from typing import Any, Callable, Optional
import psutil
import torch
from peft.utils import infer_device
FILE_NAME_BENCHMARK_PARAMS = "benchmark_params.json"
FILE_NAME_DEFAULT_CONFIG = "default_benchmark_params.json"
RESULT_PATH = os.path.join(os.path.dirname(__file__), "results")
RESULT_PATH_TEMP = os.path.join(os.path.dirname(__file__), "temporary_results")
RESULT_PATH_CANCELLED = os.path.join(os.path.dirname(__file__), "cancelled_results")
class BenchmarkStatus(Enum):
"""Status of a benchmark run."""
SUCCESS = "success"
FAILED = "failed"
CANCELLED = "cancelled"
RUNNING = "running"
@dataclass
class BenchmarkResult:
"""Container for benchmark results."""
experiment_name: str
status: BenchmarkStatus
model_id: str
run_info: dict = field(default_factory=dict)
generation_info: dict = field(default_factory=dict)
meta_info: dict = field(default_factory=dict)
def __post_init__(self):
"""Initialize structured data format."""
device = infer_device()
torch_accelerator_module = getattr(torch, device, torch.cuda)
self.run_info = {
"timestamp": datetime.datetime.now(tz=datetime.timezone.utc).isoformat(),
"duration": 0.0,
"status": self.status.value,
"hardware": {
"num_accelerators": torch_accelerator_module.device_count() if torch_accelerator_module.is_available() else 0,
"accelerator_type": torch_accelerator_module.get_device_name(0) if torch_accelerator_module.is_available() else "N/A",
"cuda_version": torch.version.cuda if torch.cuda.is_available() else "N/A",
"pytorch_version": torch.__version__,
},
}
self.meta_info = {
"model_id": self.model_id,
"parameters": {
"base_params": 0,
"trainable_params": 0,
"total_params": 0,
"param_ratio": 0.0,
},
"model_size": {
"base_model_size_mb": 0.0,
"adapter_size_mb": 0.0,
},
"package_info": {
"transformers-version": None,
"transformers-commit-hash": None,
"peft-version": None,
"peft-commit-hash": None,
"datasets-version": None,
"datasets-commit-hash": None,
"bitsandbytes-version": None,
"bitsandbytes-commit-hash": None,
"torch-version": torch.__version__,
"torch-commit-hash": None,
},
"system_info": {
"system": platform.system(),
"release": platform.release(),
"version": platform.version(),
"machine": platform.machine(),
"processor": platform.processor(),
"accelerator": torch_accelerator_module.get_device_name(0) if torch_accelerator_module.is_available() else "N/A",
},
}
self.generation_info = {
"memory": {
"peak_accelerator_memory_mb": 0.0,
"peak_ram_memory_mb": 0.0,
"memory_logs": [],
},
"by_category": {},
"overall": {},
}
def update_meta_info(self, param_counts: dict, size_info: dict, package_info: Optional[dict] = None):
"""Update model metadata information."""
self.meta_info["parameters"].update(param_counts)
self.meta_info["model_size"].update(size_info)
if package_info:
self.meta_info["package_info"].update(package_info)
def update_generation_info(self, memory_data: Optional[dict] = None, performance_metrics: Optional[dict] = None):
"""Update generation performance information, primarily for memory and high-level performance."""
if memory_data:
self.generation_info["memory"].update(memory_data)
if performance_metrics: # For things like overall tokens/sec if calculated
self.generation_info.update(performance_metrics)
def add_memory_log(self, stage: str, ram_mb: float, accelerator_allocated_mb: float, accelerator_reserved_mb: float):
"""Add a memory usage log entry to generation_info."""
self.generation_info["memory"]["memory_logs"].append(
{
"stage": stage,
"ram_mb": ram_mb,
"accelerator_allocated_mb": accelerator_allocated_mb,
"accelerator_reserved_mb": accelerator_reserved_mb,
}
)
def add_metrics_for_category(self, category: str, metrics: dict, individual_samples: list = None):
"""Add metrics for a specific prompt category under generation_info."""
category_data = {"metrics": metrics, "samples": individual_samples if individual_samples is not None else []}
self.generation_info["by_category"][category] = category_data
def update_run_info(
self,
duration: float,
status: BenchmarkStatus,
error: Optional[str] = None,
peft_config: Optional[dict] = None,
benchmark_config: Optional[dict] = None,
):
"""Update run information."""
self.run_info["duration"] = duration
self.run_info["status"] = status.value
if error:
self.run_info["error"] = error
if peft_config:
self.run_info["peft_config"] = peft_config
if benchmark_config:
self.run_info["benchmark_config"] = benchmark_config
def compute_overall_metrics(self):
"""Compute overall metrics across all categories within generation_info."""
if not self.generation_info["by_category"]:
return
categories = self.generation_info["by_category"]
key_metrics = [
"inference_time",
"base_inference_time",
"inference_overhead_pct",
"time_per_token",
"generated_tokens",
]
for metric in key_metrics:
values = []
for category_data in categories.values():
if "metrics" in category_data and metric in category_data["metrics"]:
values.append(category_data["metrics"][metric])
if values:
self.generation_info["overall"][metric] = sum(values) / len(values)
def to_dict(self) -> dict[str, Any]:
"""Convert result to dictionary."""
self.compute_overall_metrics()
return {
"run_info": self.run_info,
"generation_info": self.generation_info,
"meta_info": self.meta_info,
}
def save(self, path: Optional[str] = None):
"""Save result to JSON file."""
if path is None:
peft_branch = get_peft_branch()
if self.status == BenchmarkStatus.CANCELLED:
base_path = RESULT_PATH_CANCELLED
elif peft_branch != "main":
base_path = RESULT_PATH_TEMP
elif self.status == BenchmarkStatus.SUCCESS:
base_path = RESULT_PATH
elif self.status == BenchmarkStatus.FAILED:
base_path = RESULT_PATH_CANCELLED
else:
base_path = RESULT_PATH_TEMP
filename = f"{self.experiment_name}.json"
path = os.path.join(base_path, filename)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w") as f:
json.dump(self.to_dict(), f, indent=2)
return path
@dataclass
class BenchmarkConfig:
"""Configuration for benchmarking PEFT methods."""
model_id: str
seed: int
num_inference_runs: int
max_new_tokens: int
dtype: str = "float16"
use_4bit: bool = False
use_8bit: bool = False
category_generation_params: Optional[dict] = None
def __post_init__(self) -> None:
"""Validate configuration."""
if not isinstance(self.model_id, str):
raise ValueError(f"Invalid model_id: {self.model_id}")
if self.seed < 0:
raise ValueError(f"Invalid seed: {self.seed}")
if self.num_inference_runs <= 0:
raise ValueError(f"Invalid num_inference_runs: {self.num_inference_runs}")
if self.max_new_tokens <= 0:
raise ValueError(f"Invalid max_new_tokens: {self.max_new_tokens}")
@classmethod
def from_dict(cls, config_dict: dict) -> "BenchmarkConfig":
"""Create config from dictionary."""
valid_keys = set(cls.__dataclass_fields__.keys())
filtered_dict = {k: v for k, v in config_dict.items() if k in valid_keys}
return cls(**filtered_dict)
@classmethod
def from_json(cls, json_path: str) -> "BenchmarkConfig":
"""Load config from JSON file."""
with open(json_path) as f:
config_dict = json.load(f)
return cls.from_dict(config_dict)
def to_dict(self) -> dict[str, Any]:
"""Convert config to dictionary."""
result = asdict(self)
return result
def save(self, path: str) -> None:
"""Save config to JSON file."""
with open(path, "w") as f:
json.dump(self.to_dict(), f, indent=2)
def merge_from_dict(self, config_dict: dict) -> None:
"""Merge settings from a dictionary into this config object.
Keys in config_dict will override existing attributes.
"""
for key, value in config_dict.items():
if hasattr(self, key):
setattr(self, key, value)
def validate_experiment_path(path: str) -> tuple[str, "BenchmarkConfig"]:
"""Validate experiment path, load and merge configs, and return them."""
if not os.path.exists(path):
raise FileNotFoundError(f"Experiment path not found: {path}")
path_parts = os.path.normpath(path).split(os.sep)
try:
experiments_idx = path_parts.index("experiments")
except ValueError:
experiment_name = os.path.basename(path.rstrip(os.sep))
else:
if experiments_idx + 1 < len(path_parts):
method_name = path_parts[experiments_idx + 1]
remaining_parts = path_parts[experiments_idx + 2 :]
if remaining_parts:
remaining_name = "-".join(remaining_parts)
experiment_name = f"{method_name}--{remaining_name}"
else:
experiment_name = method_name
else:
experiment_name = os.path.basename(path.rstrip(os.sep))
default_config_path = os.path.join(os.path.dirname(__file__), FILE_NAME_DEFAULT_CONFIG)
experiment_benchmark_params_path = os.path.join(path, FILE_NAME_BENCHMARK_PARAMS)
if not os.path.exists(default_config_path):
raise FileNotFoundError(f"Default configuration file not found: {default_config_path}. This is required.")
benchmark_config = BenchmarkConfig.from_json(default_config_path)
print(f"Loaded default configuration from {default_config_path}")
if os.path.exists(experiment_benchmark_params_path):
with open(experiment_benchmark_params_path) as f:
experiment_specific_params = json.load(f)
benchmark_config.merge_from_dict(experiment_specific_params)
print(f"Loaded and merged experiment-specific parameters from {experiment_benchmark_params_path}")
else:
print(f"No {FILE_NAME_BENCHMARK_PARAMS} found in {path}. Using only default configuration.")
return experiment_name, benchmark_config
def get_memory_usage() -> tuple[float, float, float]:
"""Get current memory usage (RAM and accelerator)."""
process = psutil.Process(os.getpid())
ram_usage_bytes = process.memory_info().rss
ram_usage_mb = ram_usage_bytes / (1024 * 1024)
if torch.cuda.is_available():
accelerator_allocated = torch.cuda.memory_allocated()
accelerator_reserved = torch.cuda.memory_reserved()
accelerator_allocated_mb = accelerator_allocated / (1024 * 1024)
accelerator_reserved_mb = accelerator_reserved / (1024 * 1024)
elif torch.xpu.is_available():
accelerator_allocated = torch.xpu.memory_allocated()
accelerator_reserved = torch.xpu.memory_reserved()
accelerator_allocated_mb = accelerator_allocated / (1024 * 1024)
accelerator_reserved_mb = accelerator_reserved / (1024 * 1024)
else:
accelerator_allocated_mb = 0.0
accelerator_reserved_mb = 0.0
return ram_usage_mb, accelerator_allocated_mb, accelerator_reserved_mb
def init_accelerator() -> tuple[float, float]:
"""Initialize accelerator and return initial memory usage."""
if torch.cuda.is_available():
torch.cuda.init()
torch.cuda.empty_cache()
_, accelerator_allocated, accelerator_reserved = get_memory_usage()
elif torch.xpu.is_available():
torch.xpu.init()
torch.xpu.empty_cache()
_, accelerator_allocated, accelerator_reserved = get_memory_usage()
else:
accelerator_allocated = 0.0
accelerator_reserved = 0.0
return accelerator_allocated, accelerator_reserved
def get_model_size_mb(model: torch.nn.Module, dtype_bytes: int = 4) -> float:
"""Calculate model size in MB."""
return sum(p.numel() * dtype_bytes for p in model.parameters()) / (1024 * 1024)
def get_peft_branch() -> str:
repo_root = os.path.dirname(__file__)
return subprocess.check_output("git rev-parse --abbrev-ref HEAD".split(), cwd=repo_root).decode().strip()
def log_results(
experiment_name: str,
benchmark_result: BenchmarkResult,
print_fn: Callable = print,
) -> None:
"""Log benchmark results to console."""
print_fn("\n" + "=" * 50)
print_fn(f"Benchmark Results: {experiment_name}")
print_fn("=" * 50)
print_fn(f"Status: {benchmark_result.run_info.get('status', 'N/A')}")
print_fn(f"Duration: {benchmark_result.run_info.get('duration', 0):.2f} seconds")
if benchmark_result.run_info.get("status") != BenchmarkStatus.SUCCESS.value:
print_fn(f"Error: {benchmark_result.run_info.get('error', 'Unknown error')}")
print_fn("=" * 50)
return
print_fn("\nModel Information:")
print_fn(f" Base Model: {benchmark_result.meta_info.get('model_id', 'N/A')}")
print_fn("\nParameter Counts:")
params = benchmark_result.meta_info.get("parameters", {})
print_fn(f" Base Parameters: {params.get('base_params', 0):,}")
print_fn(f" Trainable Parameters: {params.get('trainable_params', 0):,}")
print_fn(f" Parameter Ratio: {params.get('param_ratio', 0):.5%}")
print_fn("\nModel Size:")
size_info = benchmark_result.meta_info.get("model_size", {})
print_fn(f" Base Model: {size_info.get('base_model_size_mb', 0):.2f} MB")
print_fn(f" Adapter: {size_info.get('adapter_size_mb', 0):.2f} MB")
print_fn("\nMemory Usage (from generation_info):")
memory_data = benchmark_result.generation_info.get("memory", {})
print_fn(f" Peak Accelerator Memory: {memory_data.get('peak_accelerator_memory_mb', 0):.2f} MB")
print_fn(f" Peak RAM Memory: {memory_data.get('peak_ram_memory_mb', 0):.2f} MB")
print_fn("\nDetailed Metrics (from generation_info.by_category):")
if benchmark_result.generation_info.get("by_category"):
for category, cat_data in benchmark_result.generation_info["by_category"].items():
print_fn(f" Category: {category}")
metrics = cat_data.get("metrics", {})
print_fn(f" Inference Time: {metrics.get('inference_time', 0):.4f} seconds")
print_fn(f" Base Inference Time: {metrics.get('base_inference_time', 0):.4f} seconds")
print_fn(f" Inference Overhead: {metrics.get('inference_overhead_pct', 0):.2f}%")
print_fn(f" Time Per Token: {metrics.get('time_per_token', 0):.6f} seconds/token")
print_fn(f" Generated Tokens: {metrics.get('generated_tokens', 0):.1f}")
samples = cat_data.get("samples", [])
if samples:
print_fn(f" Number of Samples: {len(samples)}")
print_fn(
f" Average Generated Tokens: {sum(s.get('generated_tokens', 0) for s in samples) / len(samples):.1f}"
)
else:
print_fn(" No per-category metrics available.")
benchmark_result.compute_overall_metrics()
print_fn("\nOverall Metrics (from generation_info.overall):")
overall = benchmark_result.generation_info.get("overall")
if overall:
print_fn(f" Inference Time: {overall.get('inference_time', 0):.4f} seconds")
print_fn(f" Base Inference Time: {overall.get('base_inference_time', 0):.4f} seconds")
print_fn(f" Inference Overhead: {overall.get('inference_overhead_pct', 0):.2f}%")
print_fn(f" Time Per Token: {overall.get('time_per_token', 0):.6f} seconds/token")
print_fn(f" Generated Tokens: {overall.get('generated_tokens', 0):.1f}")
else:
print_fn(" No overall metrics computed.")
print_fn("\nSaved results to:", benchmark_result.save())
print_fn("=" * 50)
| {
"repo_id": "huggingface/peft",
"file_path": "method_comparison/text_generation_benchmark/utils.py",
"license": "Apache License 2.0",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/miss_finetuning/miss_finetuning.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass, field
from typing import Literal, Optional
import torch
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
from trl import SFTConfig, SFTTrainer
from peft import MissConfig, get_peft_model
@dataclass
class ScriptArguments(SFTConfig):
# model configs
base_model_name_or_path: Optional[str] = field(
default=None, metadata={"help": "The name or path of the fp32/16 base model."}
)
bits: str = field(default="bf16", metadata={"help": "(`['bf16', 'fp16', fp32]`)"})
init_weights: Literal[True, "bat"] = field(
default=True,
metadata={
"help": (
"True -> MiSS efficience and balance; `bat` -> Bat, `mini` -> smaller MiSS efficience and balance"
),
},
)
miss_r: int = field(default=16)
merge_and_save: bool = field(default=False)
# dataset configs
data_path: str = field(default="imdb", metadata={"help": "Path to the training data."})
dataset_split: str = field(default="train[:1%]", metadata={"help": "(`['train', 'test', 'eval']`):"})
dataset_field: list[str] = field(default=None, metadata={"help": "Fields of dataset input and output."})
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
print(script_args)
print(f"Load pre-processed residual model in {script_args.bits} bits.")
if script_args.bits in ["nf4", "fp4", "int8"]:
print("MiSS currently does not support quantization.")
elif script_args.base_model_name_or_path is not None:
print(f"No available pre-processed model, manually initialize a MiSS using {script_args.base_model_name_or_path}.")
model = AutoModelForCausalLM.from_pretrained(
script_args.base_model_name_or_path,
dtype=(
torch.float16
if script_args.bits == "fp16"
else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32)
),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name_or_path)
tokenizer.pad_token_id = tokenizer.eos_token_id
miss_config = MissConfig(
r=script_args.miss_r,
target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"],
bias="none",
task_type="CAUSAL_LM",
init_weights=script_args.init_weights,
)
peft_model = get_peft_model(model, miss_config)
print(peft_model)
peft_model.print_trainable_parameters()
print(f"Training MiSS with trl on the {script_args.data_path}[{script_args.dataset_split}] dataset.")
dataset = load_dataset(script_args.data_path, split=script_args.dataset_split)
dataset = dataset.map(
lambda example: {
"text": f"### USER: {example[script_args.dataset_field[0]]}\n### ASSISTANT: {example[script_args.dataset_field[1]]}"
}
)
trainer = SFTTrainer(
model=peft_model,
args=script_args,
train_dataset=dataset,
processing_class=tokenizer,
)
trainer.train()
trainer.save_state()
peft_model.save_pretrained(
os.path.join(script_args.output_dir, "miss_ft"),
)
if script_args.merge_and_save:
model = peft_model.merge_and_unload()
model.save_pretrained(os.path.join(script_args.output_dir, "miss_merged"))
tokenizer.save_pretrained(os.path.join(script_args.output_dir, "miss_merged"))
| {
"repo_id": "huggingface/peft",
"file_path": "examples/miss_finetuning/miss_finetuning.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:scripts/convert-bone-to-miss.py | #!/usr/bin/env python3
# Copyright (c) 2025 Your Organization/Project. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Bone checkpoint to MiSS format."""
import argparse
import json
import os
from pathlib import Path
from safetensors import safe_open
from safetensors.torch import save_file
from peft.utils import CONFIG_NAME, SAFETENSORS_WEIGHTS_NAME
def convert_bone_to_miss(bone_dir: Path, miss_dir: Path) -> None:
"""Convert Bone checkpoint files to MiSS format."""
bone_config_path = bone_dir / CONFIG_NAME
miss_config_path = miss_dir / CONFIG_NAME
if not os.path.exists(miss_dir):
os.makedirs(miss_dir, exist_ok=True)
with open(bone_config_path, encoding="utf-8") as f:
config = json.load(f)
config["peft_type"] = "MISS"
with open(miss_config_path, "w", encoding="utf-8") as f:
json.dump(config, f, indent=2, ensure_ascii=False)
bone_weight_path = bone_dir / SAFETENSORS_WEIGHTS_NAME
miss_weight_path = miss_dir / SAFETENSORS_WEIGHTS_NAME
new_data = {}
with safe_open(bone_weight_path, framework="pt") as f:
for old_key in f.keys():
tensor = f.get_tensor(old_key)
new_key = old_key.replace(".bone_", ".miss_")
new_data[new_key] = tensor
save_file(new_data, miss_weight_path)
print(f"Converted checkpoint saved at {miss_weight_path}")
def main() -> None:
parser = argparse.ArgumentParser(description="Convert Bone checkpoint to MiSS format.")
parser.add_argument("bone_dir", type=Path, help="Directory containing Bone checkpoint files")
parser.add_argument("miss_dir", type=Path, help="Directory to save MiSS checkpoint files")
args = parser.parse_args()
args.miss_dir.mkdir(parents=True, exist_ok=True)
convert_bone_to_miss(args.bone_dir, args.miss_dir)
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/peft",
"file_path": "scripts/convert-bone-to-miss.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/miss/config.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Literal, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class MissConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`MiSSModel`].
Args:
r (`int`):
The rank of MiSS across different layers. It is best to set 'r' to an even number; otherwise, the default
initialization method will not work. The rank of MiSS corresponds to a low-rank decomposition along the
in_features dimension.
miss_dropout (`float`):
The dropout probability for MiSS layers.
mini_r (`int`):
The rank of MiSS corresponds to a low-rank decomposition along the out_features dimension. When you set
`init_weights=mini`, you need to set `mini_r`. Please make sure that `out_features` is divisible by
`mini_r`.
target_modules (`Optional[Union[List[str], str]]`):
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
strings, either an exact match will be performed or it is checked if the name of the module ends with any
of the passed strings. If this is specified as 'all-linear', then all linear modules are chosen, excluding
the output layer. If this is not specified, modules will be chosen according to the model architecture. If
the architecture is not known, an error will be raised -- in this case, you should specify the target
modules manually.
exclude_modules (`Optional[Union[List[str], str]]`):
The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
When passing a list of strings, either an exact match will be performed or it is checked if the name of the
module ends with any of the passed strings.
init_weights (bool | Literal["bat", "mini"]):
Different initializations correspond to different MiSS variants. By default(balance), the most efficient
and general method in MiSS will be used. 'bat': In this mode, you can enable nonlinear updates across
different shards. 'mini': In this mode, you can set a smaller rank to use fewer trainable parameters, but
it is recommended to keep `out_features % mini_r == 0`.
layers_to_transform (`Union[List[int], int]`):
The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
that are specified in this list. If a single integer is passed, it will apply the transformations on the
layer at this index.
layers_pattern (`str`):
The layer pattern name, used only if `layers_to_transform` is different from `None`.
modules_to_save (`List[str]`):
List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
"""
r: int = field(
default=64,
metadata={
"help": "The rank of MiSS corresponds to a low-rank decomposition along the in_features dimension.",
"note": "It is best to set 'r' to an even number; otherwise, the default initialization method will not work.",
},
)
miss_dropout: float = field(default=0.0, metadata={"help": "MiSS dropout"})
mini_r: int = field(
default=1,
metadata={
"help": "The rank of MiSS corresponds to a low-rank decomposition along the out_features dimension.",
"note": "It is recommended that mini_r be divisible by out_features. When mini_r == out_features, the mini method is equivalent to the default efficient MiSS.",
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": "List of module names or regex expression of the module names to replace with MiSS.",
"example": "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' ",
},
)
exclude_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={"help": "List of module names or regex expression of the module names to exclude from MiSS."},
)
init_weights: bool | Literal["bat", "mini"] = field(
default=True,
metadata={
"help": (
"True -> MiSS balance; `bat` -> Bat; `mini` -> smaller rank and efficiency"
"Whether to initialize the weights of the MiSS layers with their default initialization. Don't change "
"this setting, except if you know exactly what you're doing."
),
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
},
)
layers_pattern: Optional[str] = field(
default=None,
metadata={
"help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
},
)
bias: str = field(default="none", metadata={"help": "Bias type for MiSS. Can be 'none', 'all' or 'MiSS_only'"})
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": "List of modules apart from MiSS layers to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.MISS
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
self.exclude_modules = (
set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules
)
# if target_modules is a regex expression, then layers_to_transform should be None
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.")
# if target_modules is a regex expression, then layers_pattern should be None
if isinstance(self.target_modules, str) and self.layers_pattern is not None:
raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.")
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/miss/config.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/miss/layer.py | # Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import math
import warnings
from typing import Any, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class MissLayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("miss_block",)
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("miss_r", "miss_dropout", "miss_mini_r")
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.miss_r = {}
self.miss_dropout = nn.ModuleDict({})
self.miss_mini_r = {}
self.miss_block = nn.ParameterDict({})
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
# flag to enable/disable casting of input to weight dtype during forward call
self.cast_input_dtype_enabled = True
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.in_features, self.out_features = base_layer.in_features, base_layer.out_features
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
def update_layer(
self,
adapter_name: str,
r: int,
mini_r: int,
miss_dropout,
init_weights: bool | str,
inference_mode: bool = False,
**kwargs,
) -> None:
"""Internal function to create miss adapter
Args:
adapter_name (`str`): Name for the adapter to add.
r (`int`): Rank for the added adapter.
init_weights (`bool`): Whether to initialize weights.
"""
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.miss_r[adapter_name] = r
self.miss_mini_r[adapter_name] = mini_r
if miss_dropout > 0.0:
miss_dropout_layer = nn.Dropout(p=miss_dropout)
else:
miss_dropout_layer = nn.Identity()
self.miss_dropout[adapter_name] = miss_dropout_layer
# Determine shape of MiSS weights
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.miss_block[adapter_name] = nn.Parameter(torch.zeros(r, self.out_features), requires_grad=True)
else:
raise TypeError(f"MiSS is not implemented for base layers of type {type(base_layer).__name__}")
# Initialize weights
if init_weights == "bat":
if self.in_features % r != 0 or self.out_features % r != 0:
raise ValueError("The weight matrix must be fully divisible into [r, r] blocks.")
self.reset_bat_parameters(adapter_name, r)
elif init_weights == "mini":
if self.out_features % mini_r != 0:
raise ValueError(
"mini_r is divided along the out_features dimension. For optimal performance and implementation simplicity,"
"it is recommended that out_features be divisible by mini_r."
"Error: {self.out_features} % mini_r != 0"
)
self.reset_mini_parameters(adapter_name, r, mini_r)
elif init_weights:
self.reset_miss_parameters(adapter_name, r)
else:
self.reset_miss_parameters_random(adapter_name)
# Move new weights to device
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters, inference_mode=inference_mode)
def reset_miss_parameters(self, adapter_name: str, r):
self.miss_block[adapter_name] = nn.Parameter(torch.zeros(r, self.out_features), requires_grad=True)
def reset_bat_parameters(self, adapter_name: str, r):
self.miss_block[adapter_name] = nn.Parameter(torch.zeros(self.out_features // r, r, r), requires_grad=True)
def reset_mini_parameters(self, adapter_name: str, r, mini_r):
self.miss_block[adapter_name] = nn.Parameter(torch.zeros(r, mini_r), requires_grad=True)
def reset_miss_parameters_random(self, adapter_name: str):
nn.init.kaiming_uniform_(self.miss_block[adapter_name], a=math.sqrt(5))
def scale_layer(self, scale: float) -> None:
if scale == 1:
return
for active_adapter in self.active_adapters:
if active_adapter not in self.miss_block.keys():
continue
warnings.warn("Scaling operation for MiSS not supported! Automatically set scale to 1.")
def unscale_layer(self, scale=None) -> None:
for active_adapter in self.active_adapters:
if active_adapter not in self.miss_block.keys():
continue
warnings.warn("Unscaling operation for MiSS not supported! Keeping scale at 1.")
class MissLinear(nn.Module, MissLayer):
"""
MiSS implemented in a dense layer.
"""
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 0,
mini_r: int = 0,
miss_dropout: float = 0.0,
init_weights: Union[bool, str] = True,
**kwargs,
) -> None:
super().__init__()
MissLayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, mini_r, miss_dropout, init_weights, **kwargs)
self.miss_fn = init_weights
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If `None`, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.miss_block.keys():
base_layer = self.get_base_layer()
orig_dtype = base_layer.weight.dtype
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weight = base_layer.weight.data.clone()
if self.miss_fn == "bat":
delta_weight = self.get_delta_weight(active_adapter, orig_weight)
orig_weight += delta_weight
elif self.miss_fn == "mini":
delta_weight = self.get_delta_weight_miss(active_adapter, self.base_layer.weight.data)
orig_weight = delta_weight
else:
delta_weight = self.get_delta_weight_miss(active_adapter, self.base_layer.weight.data)
orig_weight = delta_weight
if not torch.isfinite(orig_weight).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weight.to(orig_dtype)
else:
if self.miss_fn == "bat":
delta_weight = self.get_delta_weight(active_adapter, self.base_layer.weight.data)
base_layer.weight.data += delta_weight.to(orig_dtype)
elif self.miss_fn == "mini":
delta_weight = self.get_delta_weight_miss(active_adapter, self.base_layer.weight.data)
base_layer.weight.data = delta_weight.to(orig_dtype)
else:
delta_weight = self.get_delta_weight_miss(active_adapter, self.base_layer.weight.data)
base_layer.weight.data = delta_weight.to(orig_dtype)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
base_layer = self.get_base_layer()
orig_dtype = base_layer.weight.dtype
if active_adapter in self.miss_block.keys():
orig_weight = self.get_base_layer().weight.data.clone()
if self.miss_fn == "bat":
delta_weight = self.get_delta_weight(active_adapter, orig_weight, re=True)
elif self.miss_fn == "mini":
delta_weight = self.get_delta_weight_miss(active_adapter, orig_weight, re=True)
else:
delta_weight = self.get_delta_weight_miss(active_adapter, orig_weight, re=True)
base_layer.weight.data = delta_weight.to(orig_dtype)
def get_delta_weight(self, adapter, orig_weight, re: bool = False) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
device = self.miss_block[adapter].device
dtype = self.miss_block[adapter].dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_miss = self.miss_block[adapter]
if cast_to_fp32:
weight_miss = weight_miss.float()
orig_weight = orig_weight.to(weight_miss.dtype)
r = weight_miss.size(-1)
if re:
o = orig_weight.reshape(orig_weight.size(0) // r, r, orig_weight.size(1) // r, r).permute(2, 0, 1, 3)
one = torch.eye(weight_miss.size(-1)).to(weight_miss.device)
# inverse must be in float32, after that the dtype can be adjusted if needed
inv_I_plus_b = torch.inverse(one + weight_miss)
inv_I_plus_b = inv_I_plus_b.to(weight_miss.dtype)
w = (o - weight_miss) @ inv_I_plus_b
output_tensor = w.permute(1, 2, 0, 3).reshape(*orig_weight.shape)
else:
w = (
orig_weight.reshape(orig_weight.size(0) // r, r, orig_weight.size(1) // r, r).permute(2, 0, 1, 3)
@ weight_miss
+ weight_miss
)
output_tensor = w.permute(1, 2, 0, 3).reshape(*orig_weight.shape)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
# cast back the weights
self.miss_block[adapter].data = weight_miss.to(dtype)
return output_tensor
def get_delta_weight_miss(self, adapter, orig_weight, re: bool = False) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
device = self.miss_block[adapter].device
dtype = self.miss_block[adapter].dtype
# In case users wants to merge the adapter weights that are in
# (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to
# (b)float16 because some CPUs have slow bf16/fp16 matmuls.
cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16)
weight_miss = self.miss_block[adapter]
if cast_to_fp32:
weight_miss = weight_miss.float()
in_features = orig_weight.size(-1)
out_features = orig_weight.size(0)
r = weight_miss.size(0)
if self.miss_fn == "mini":
weight_miss = weight_miss.repeat(1, out_features // self.miss_mini_r[adapter])
if in_features % r != 0:
last_size = in_features % r
n_block = in_features // r
n_block_size = n_block * r
if re:
orig_weight[:, :n_block_size] = (
(orig_weight[:, :n_block_size].reshape(-1, n_block, r).permute(1, 2, 0) - weight_miss)
.permute(2, 0, 1)
.reshape(*orig_weight[:, :n_block_size].shape)
)
orig_weight[:, n_block_size:] = (
orig_weight[:, n_block_size:] - (weight_miss.transpose(0, 1))[:, :last_size]
)
else:
orig_weight[:, :n_block_size] = (
(orig_weight[:, :n_block_size].reshape(-1, n_block, r).permute(1, 2, 0) + weight_miss)
.permute(2, 0, 1)
.reshape(*orig_weight[:, :n_block_size].shape)
)
orig_weight[:, n_block_size:] = (
orig_weight[:, n_block_size:] + (weight_miss.transpose(0, 1))[:, :last_size]
)
output_tensor = orig_weight
else:
if re:
w = orig_weight.reshape(-1, orig_weight.size(1) // r, r).permute(1, 2, 0) - weight_miss
output_tensor = w.permute(2, 0, 1).reshape(*orig_weight.shape)
else:
w = orig_weight.reshape(-1, orig_weight.size(1) // r, r).permute(1, 2, 0) + weight_miss
output_tensor = w.permute(2, 0, 1).reshape(*orig_weight.shape)
if cast_to_fp32:
output_tensor = output_tensor.to(dtype=dtype)
# cast back the weights
self.miss_block[adapter].data = weight_miss.to(dtype)
return output_tensor
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
if self.miss_fn == "bat":
orig_weight = self.base_layer.weight.data.clone()
for active_adapter in self.active_adapters:
if active_adapter not in self.miss_block.keys():
continue
delta_weight = self.get_delta_weight(active_adapter, orig_weight)
orig_weight = orig_weight + delta_weight
x = self._cast_input_dtype(x, orig_weight.dtype)
bias = self._cast_input_dtype(self.base_layer.bias, orig_weight.dtype)
result = F.linear(input=x, weight=orig_weight, bias=bias)
else:
result = self.base_layer(x, *args, **kwargs)
for active_adapter in self.active_adapters:
if active_adapter not in self.miss_block.keys():
continue
miss = self.miss_block[active_adapter]
if self.miss_fn == "mini":
miss = miss.repeat(1, self.base_layer.out_features // self.miss_mini_r[active_adapter])
dropout = self.miss_dropout[active_adapter]
r = miss.size(0)
if x.size(-1) % r != 0:
padding_size = (r - x.size(-1) % r) % r
x = F.pad(x, (0, padding_size))
x = self._cast_input_dtype(x, miss.dtype)
result = result + torch.sum(dropout(x).reshape(*x.shape[:-1], x.size(-1) // r, r), dim=-2) @ miss
result = result.to(previous_dtype)
return result
def supports_lora_conversion(self, adapter_name: str = "default") -> bool:
# only 'bat' can be converted in a straightforward way
return self.miss_fn == "bat"
def __repr__(self) -> str:
rep = super().__repr__()
return "miss." + rep
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/miss/layer.py",
"license": "Apache License 2.0",
"lines": 338,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/miss/model.py | # Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer
from peft.utils import TRANSFORMERS_MODELS_TO_MISS_TARGET_MODULES_MAPPING
from .layer import MissLayer, MissLinear
class MissModel(BaseTuner):
"""
Creates Householder reflection adaptation (MiSS) model from a pretrained model. The method is described in
https://huggingface.co/papers/2409.15371
Args:
model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached.
config ([`MissConfig`]): The configuration of the MiSS model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
Create empty adapter weights on meta device. Useful to speed up the loading process.
Returns:
`torch.nn.Module`: The MiSS model.
Example:
```py
>>> from diffusers import StableDiffusionPipeline
>>> from peft import MissModel, MissConfig
>>> config_te = MissConfig(
... r=8,
... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"],
... init_weights=True,
... )
>>> config_unet = MissConfig(
... r=8,
... target_modules=[
... "proj_in",
... "proj_out",
... "to_k",
... "to_q",
... "to_v",
... "to_out.0",
... "ff.net.0.proj",
... "ff.net.2",
... ],
... init_weights=True,
... )
>>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
>>> model.text_encoder = MissModel(model.text_encoder, config_te, "default")
>>> model.unet = MissModel(model.unet, config_unet, "default")
```
**Attributes**:
- **model** ([`~torch.nn.Module`]) -- The model to be adapted.
- **peft_config** ([`MissConfig`]): The configuration of the MiSS model.
"""
prefix: str = "miss_"
tuner_layer_cls = MissLayer
target_module_mapping = TRANSFORMERS_MODELS_TO_MISS_TARGET_MODULES_MAPPING
def _create_and_replace(
self,
miss_config,
adapter_name,
target,
target_name,
parent,
current_key,
**optional_kwargs,
):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
bias = hasattr(target, "bias") and target.bias is not None
kwargs = {
"r": miss_config.r,
"mini_r": miss_config.mini_r,
"miss_dropout": miss_config.miss_dropout,
"init_weights": miss_config.init_weights,
}
kwargs["bias"] = bias
# If it is not a MissLayer, create a new module, else update it with new adapters
if not isinstance(target, MissLayer):
new_module = self._create_new_module(miss_config, adapter_name, target, **kwargs)
if adapter_name not in self.active_adapters:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
else:
target.update_layer(
adapter_name,
r=miss_config.r,
init_weights=miss_config.init_weights,
miss_dropout=miss_config.miss_dropout,
mini_r=miss_config.mini_r,
)
@staticmethod
def _create_new_module(miss_config, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
new_module = MissLinear(target, adapter_name, **kwargs)
else:
raise ValueError(
f"Target module {target} is not supported. Currently, only `torch.nn.Linear` is supported."
)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/miss/model.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:tests/test_target_parameters.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from transformers import AutoModelForCausalLM
import peft
from peft import LoraConfig, TaskType, get_peft_model
from peft.tuners.lora.layer import ParamWrapper
from .testing_common import PeftCommonTester
from .testing_utils import hub_online_once, set_init_weights_false
ALL_CONFIGS = [
##########
# Llama4 #
##########
# target down_proj
(
"trl-internal-testing/tiny-Llama4ForCausalLM",
LoraConfig,
{
"task_type": TaskType.CAUSAL_LM,
"target_modules": [],
"lora_dropout": 0.0,
"target_parameters": [
"feed_forward.experts.down_proj",
],
},
),
# target gate_up_proj and down_proj, but not on the same module
(
"trl-internal-testing/tiny-Llama4ForCausalLM",
LoraConfig,
{
"task_type": TaskType.CAUSAL_LM,
"target_modules": [],
"lora_dropout": 0.0,
"target_parameters": [
"0.feed_forward.experts.gate_up_proj",
"1.feed_forward.experts.down_proj",
],
},
),
# target down_proj and gate_up_proj on the same module
(
"trl-internal-testing/tiny-Llama4ForCausalLM",
LoraConfig,
{
"task_type": "CAUSAL_LM",
"r": 8,
"lora_alpha": 32,
"target_modules": None,
"lora_dropout": 0.0,
"bias": "none",
"target_parameters": [
"feed_forward.experts.down_proj",
"feed_forward.experts.gate_up_proj",
],
},
),
# target q_proj, v_proj as modules, and down_proj as parameter
(
"trl-internal-testing/tiny-Llama4ForCausalLM",
LoraConfig,
{
"task_type": TaskType.CAUSAL_LM,
"target_modules": ["q_proj", "v_proj"],
"lora_dropout": 0.0,
"target_parameters": [
"feed_forward.experts.down_proj",
],
},
),
###########
# gpt-oss #
###########
# target down_proj
(
"trl-internal-testing/tiny-GptOssForCausalLM",
LoraConfig,
{
"task_type": TaskType.CAUSAL_LM,
"target_modules": [],
"lora_dropout": 0.0,
"target_parameters": [
"mlp.experts.down_proj",
],
},
),
# target gate_up_proj and down_proj, but not on the same module
(
"trl-internal-testing/tiny-GptOssForCausalLM",
LoraConfig,
{
"task_type": TaskType.CAUSAL_LM,
"target_modules": [],
"lora_dropout": 0.0,
"target_parameters": [
"0.mlp.experts.gate_up_proj",
"1.mlp.experts.down_proj",
],
},
),
# target down_proj and gate_up_proj on the same module
(
"trl-internal-testing/tiny-GptOssForCausalLM",
LoraConfig,
{
"task_type": "CAUSAL_LM",
"r": 8,
"lora_alpha": 32,
"target_modules": None,
"lora_dropout": 0.0,
"bias": "none",
"target_parameters": [
"mlp.experts.down_proj",
"mlp.experts.gate_up_proj",
],
},
),
# target q_proj, v_proj as modules, and down_proj as parameter
(
"trl-internal-testing/tiny-GptOssForCausalLM",
LoraConfig,
{
"task_type": TaskType.CAUSAL_LM,
"target_modules": ["q_proj", "v_proj"],
"lora_dropout": 0.0,
"target_parameters": [
"mlp.experts.down_proj",
],
},
),
]
class MyAutoModelForCausalLM(AutoModelForCausalLM):
@classmethod
def from_pretrained(cls, *args, **kwargs):
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(*args, **kwargs)
# check that we load the original model, not, say, a trained checkpoint
if args[0] == "trl-internal-testing/tiny-Llama4ForCausalLM":
# model contains weights with values ~1e36 or nan, so we need to reinitialize with sane values
with torch.no_grad():
for param in model.parameters():
param.data = torch.randn(param.shape)
return model
def test_rank_pattern_for_moe_target_parameters(tmp_path):
model_id = "trl-internal-testing/tiny-Llama4ForCausalLM"
with hub_online_once(model_id):
model = MyAutoModelForCausalLM.from_pretrained(model_id)
num_experts = getattr(model.config, "num_local_experts", None) or getattr(model.config, "num_experts", None)
assert num_experts is not None
r = 8
effective_r = max(1, r // num_experts)
config = LoraConfig(
r=r,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
target_parameters=["feed_forward.experts.gate_up_proj"],
rank_pattern={
"experts.gate_up_proj": effective_r,
},
init_lora_weights=False,
)
model = get_peft_model(model, config)
wrappers = [
module
for module in model.modules()
if isinstance(module, ParamWrapper) and module.parameter_name == "gate_up_proj"
]
assert wrappers, "Expected to find ParamWrapper for gate_up_proj."
lora_module = wrappers[0]
assert lora_module.r["default"] == effective_r
assert lora_module.lora_A["default"].weight.shape[0] == effective_r * num_experts
assert lora_module.scaling["default"] == config.lora_alpha / effective_r
assert config.r == r
class TestDecoderModelsTargetParameters(PeftCommonTester):
# This is more or less a copy of TestDecoderModels at the time of the PR being added. Unnecessary code is removed,
# like code required for testing non-LoRA methods. The tests being included are not selected to test specific
# functionality of targeting nn.Parameters, they (together with the tests in test_custom_models.py) just ensure that
# generally, nothing is broken.
transformers_class = MyAutoModelForCausalLM
def prepare_inputs_for_testing(self):
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device)
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_attributes_parametrized(self, model_id, config_cls, config_kwargs):
self._test_model_attr(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_adapter_name(self, model_id, config_cls, config_kwargs):
self._test_adapter_name(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_prepare_for_training_parametrized(self, model_id, config_cls, config_kwargs):
self._test_prepare_for_training(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_save_pretrained(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
self._test_save_pretrained(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_save_pretrained_pickle(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
self._test_save_pretrained(model_id, config_cls, config_kwargs.copy(), safe_serialization=False)
@pytest.mark.skip(reason="Multiple adapters with target_parameters are not supported yet.")
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs.copy())
@pytest.mark.skip(reason="Multiple adapters with target_parameters are not supported yet.")
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_save_pretrained_selected_adapters_pickle(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
self._test_save_pretrained_selected_adapters(
model_id, config_cls, config_kwargs.copy(), safe_serialization=False
)
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_from_pretrained_config_construction(self, model_id, config_cls, config_kwargs):
self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_merge_layers(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
self._test_merge_layers(model_id, config_cls, config_kwargs.copy())
@pytest.mark.skip(reason="Multiple adapters with target_parameters are not supported yet.")
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_merge_layers_multi(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
self._test_merge_layers_multi(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_merge_layers_nan(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
self._test_merge_layers_nan(model_id, config_cls, config_kwargs.copy())
@pytest.mark.skip(reason="Multiple adapters with target_parameters are not supported yet.")
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_mixed_adapter_batches(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
msg = "lora.ParamWrapper does not support mixed adapter batches yet."
with pytest.raises(ValueError, match=msg):
self._test_mixed_adapter_batches(model_id, config_cls, config_kwargs.copy())
@pytest.mark.skip(reason="Multiple adapters with target_parameters are not supported yet.")
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_generate_with_mixed_adapter_batches(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
msg = "lora.ParamWrapper does not support mixed adapter batches yet."
with pytest.raises(ValueError, match=msg):
self._test_generate_with_mixed_adapter_batches_and_beam_search(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_generate(self, model_id, config_cls, config_kwargs):
self._test_generate(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_generate_pos_args(self, model_id, config_cls, config_kwargs):
self._test_generate_pos_args(model_id, config_cls, config_kwargs.copy(), raises_err=False)
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_merge_layers_fp16(self, model_id, config_cls, config_kwargs):
self._test_merge_layers_fp16(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_generate_half_prec(self, model_id, config_cls, config_kwargs):
self._test_generate_half_prec(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_training_decoders(self, model_id, config_cls, config_kwargs):
self._test_training(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_training_decoders_gradient_checkpointing(self, model_id, config_cls, config_kwargs):
self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_inference_safetensors(self, model_id, config_cls, config_kwargs):
self._test_inference_safetensors(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_peft_model_device_map(self, model_id, config_cls, config_kwargs):
self._test_peft_model_device_map(model_id, config_cls, config_kwargs.copy())
@pytest.mark.skip(reason="Multiple adapters with target_parameters are not supported yet.")
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_delete_adapter(self, model_id, config_cls, config_kwargs):
self._test_delete_adapter(model_id, config_cls, config_kwargs.copy())
@pytest.mark.skip(reason="Multiple adapters with target_parameters are not supported yet.")
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_delete_inactive_adapter(self, model_id, config_cls, config_kwargs):
self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls, config_kwargs):
self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_unload_adapter(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
self._test_unload_adapter(model_id, config_cls, config_kwargs.copy())
@pytest.mark.skip(reason="Multiple adapters with target_parameters are not supported yet.")
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
msg = "add_weighted_adapter does not support targeting nn.Parameter"
with pytest.raises(ValueError, match=msg):
self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs):
self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_disable_adapter(self, model_id, config_cls, config_kwargs):
config_kwargs = set_init_weights_false(config_cls, config_kwargs)
self._test_disable_adapter(model_id, config_cls, config_kwargs.copy())
@pytest.mark.parametrize("model_id,config_cls,config_kwargs", ALL_CONFIGS)
def test_passing_input_embeds_works(self, model_id, config_cls, config_kwargs):
self._test_passing_input_embeds_works("", model_id, config_cls, config_kwargs.copy())
class TestTargetParameters:
# Tests specifically designed for target_parameters
def test_targeting_module_and_targeting_param_equivalent(self):
# Test that using LoRA with target_modules vs target_parameters yields identical results.
# note: we purposely target the gate_proj because its weight is not square (unlike q_proj, ...), this makes it
# easier to catch shape errors
torch.manual_seed(0)
model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM"
with hub_online_once(model_id):
model0 = AutoModelForCausalLM.from_pretrained(model_id)
x = torch.arange(10).view(2, 5)
with torch.inference_mode():
out_base = model0(x, output_hidden_states=True).hidden_states[-1]
# targeting the module
config0 = LoraConfig(target_modules=["gate_proj"], init_lora_weights=False)
model0 = get_peft_model(model0, config0)
# targeting the parameter
model1 = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM")
config1 = LoraConfig(target_modules=[], target_parameters=["gate_proj.weight"], init_lora_weights=False)
model1 = get_peft_model(model1, config1)
gate_proj_0_0 = model0.base_model.model.model.layers[0].mlp.gate_proj
gate_proj_0_1 = model0.base_model.model.model.layers[1].mlp.gate_proj
gate_proj_1_0 = model1.base_model.model.model.layers[0].mlp.gate_proj
gate_proj_1_1 = model1.base_model.model.model.layers[1].mlp.gate_proj
# ensure that the randomly initialized LoRA weights are identical
gate_proj_1_0.lora_A.default.weight.data.copy_(gate_proj_0_0.lora_A.default.weight.data)
gate_proj_1_1.lora_A.default.weight.data.copy_(gate_proj_0_1.lora_A.default.weight.data)
gate_proj_1_0.lora_B.default.weight.data.copy_(gate_proj_0_0.lora_B.default.weight.data)
gate_proj_1_1.lora_B.default.weight.data.copy_(gate_proj_0_1.lora_B.default.weight.data)
with torch.inference_mode():
out_lora_0 = model0(x, output_hidden_states=True).hidden_states[-1]
out_lora_1 = model1(x, output_hidden_states=True).hidden_states[-1]
# sanity check: basemodel outputs should be different
atol, rtol = 1e-6, 1e-6
assert not torch.allclose(out_base, out_lora_0, atol=atol, rtol=rtol)
# LoRA outputs should be the same
assert torch.allclose(out_lora_0, out_lora_1, atol=atol, rtol=rtol)
def test_target_multiple_parameters_on_same_module(self, monkeypatch):
# test that if we target multiple nn.Parameters on the same module, all of them are being used during the
# forward pass
torch.manual_seed(0)
model_id = "trl-internal-testing/tiny-Llama4ForCausalLM"
with hub_online_once(model_id):
x = torch.arange(10).view(2, 5)
model = MyAutoModelForCausalLM.from_pretrained(model_id)
shape_gate_up_proj = model.model.layers[0].feed_forward.experts.gate_up_proj.shape
shape_down_proj = model.model.layers[0].feed_forward.experts.down_proj.shape
num_layers = len(model.model.layers)
target_parameters = ["feed_forward.experts.gate_up_proj", "feed_forward.experts.down_proj"]
num_params = len(target_parameters)
config = LoraConfig(target_parameters=target_parameters, init_lora_weights=False)
model = get_peft_model(model, config)
# CHECK FORWARD CALLS
# log the weights seen during the forward call
weights = []
def mock_forward(self, W):
weights.append(W)
return orig_forward(self, W)
from peft.tuners.lora.layer import _LoraParameterProxy
orig_forward = _LoraParameterProxy.forward
monkeypatch.setattr(_LoraParameterProxy, "forward", mock_forward)
num_steps = 3
with torch.inference_mode():
for _ in range(num_steps):
out_base = model(x, output_hidden_states=True).hidden_states[-1]
actual_call_count = len(weights)
# Note: We call forward twice per step, once to create the parametrization and once for the actual forward
# step. This may be a bit wasteful but it's not clear how to prevent this and overall is probably negligible
num_forward_per_step = 2
# Since https://github.com/huggingface/transformers/pull/39501, one of the parameters is accessed twice per
# forward call, but we cache all calls after the first.
expected_call_count = num_steps * num_layers * num_params * num_forward_per_step
assert actual_call_count == expected_call_count
actual_shapes = {W.shape for W in weights}
expected_shapes = {shape_gate_up_proj, shape_down_proj}
assert actual_shapes == expected_shapes
# CHECK WEIGHT UPDATES
lora_weights_before = {
k: v.clone() for k, v in model.named_parameters() if "lora_A.default" in k or "lora_B.default" in k
}
# sanity check:
assert len(lora_weights_before) == 2 * num_layers * num_params
# train
optim = torch.optim.SGD(model.parameters(), lr=0.01)
for _ in range(10):
optim.zero_grad()
out = model(x)
loss = out.logits.sum()
loss.backward()
optim.step()
lora_weights_after = {
k: v for k, v in model.named_parameters() if "lora_A.default" in k or "lora_B.default" in k
}
assert lora_weights_before.keys() == lora_weights_after.keys()
atol, rtol = 0.1, 0.1
for key in lora_weights_before.keys():
assert not torch.allclose(lora_weights_before[key], lora_weights_after[key], atol=atol, rtol=rtol)
def test_target_parameters_works_with_existing_parametrization(self):
# When a parameter is already parametrized, we want the LoRA parametrization to work with it correctly.
class MyLinear(nn.Linear):
# For testing purposes, define a linear layer with 2 parameters: weight and other_weight.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
nn.init.ones_(self.weight)
self.other_weight = nn.Parameter(torch.ones(self.weight.shape))
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.lin = MyLinear(2, 2, bias=False)
def forward(self, x):
return self.lin(x)
class MyParametrization(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x + 1
# base model
model = MyModule()
x = torch.ones((2, 2))
# sanity check: result should be 1*1 + 1*1 == 2
output_base = model(x)
assert torch.all(output_base == 2)
# add parametrization to the weight
nn.utils.parametrize.register_parametrization(model.lin, "weight", MyParametrization())
# result should be (1+1)*1 + (1+1)*1 == 4
output_parametrized = model(x)
assert torch.all(output_parametrized == 4)
# add LoRA parametrization to the weight
config = LoraConfig(r=2, lora_alpha=6, target_parameters=["lin.weight"], init_lora_weights=False)
model = get_peft_model(model, config)
# manually set LoRA weights to ones
nn.init.ones_(model.base_model.model.lin.lora_A["default"].weight)
nn.init.ones_(model.base_model.model.lin.lora_B["default"].weight)
output_lora = model(x)
# delta_weight should be: (1+1) * lora_scale = (1+1) * (alpha / rank) = 2 * (6 / 2) = 6
# result should be: (1+1+6)*1 + (1+1+6)*1 == 8 + 8 == 16
assert torch.all(output_lora == 16)
# calling twice should yield the same result
output_lora2 = model(x)
assert torch.allclose(output_lora, output_lora2)
# add another LoRA parametrization to other_weight, should have no effect on the output
config = LoraConfig(r=2, lora_alpha=6, target_parameters=["lin.other_weight"], init_lora_weights=False)
model.add_adapter("other", config)
output_other_lora = model(x)
# delta_weight should be: (1+1) * lora_scale = (1+1) * (alpha / rank) = 2 * (6 / 2) = 6
# result should be: (1+1+6)*1 + (1+1+6)*1 == 8 + 8 == 16
assert torch.all(output_other_lora == output_lora)
# after unloading, the output should be the same as before LoRA was applied
unloaded = model.unload()
output_unloaded = unloaded(x)
assert torch.all(output_unloaded == output_parametrized)
def test_target_parameter_result_caching_works(self, monkeypatch):
# See 2912
# There was an issue with the caching of _LoraParameterProxy not working correctly. This test checks that the
# results returned from the forward call are all identical to ensure they're not recomputed each time.
torch.manual_seed(0)
model_id = "trl-internal-testing/tiny-GptOssForCausalLM"
tensor_storage = []
def store_tensors_deco(fn):
def wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
tensor_storage.append(result)
return result
return wrapper
monkeypatch.setattr(
peft.tuners.lora.layer._LoraParameterProxy,
"forward",
store_tensors_deco(peft.tuners.lora.layer._LoraParameterProxy.forward),
)
with hub_online_once(model_id):
model = AutoModelForCausalLM.from_pretrained(model_id)
config = LoraConfig(
target_modules=[],
# for simplicity, only target a single layer
target_parameters=["0.mlp.experts.gate_up_proj"],
)
model = get_peft_model(model, config)
x = torch.arange(100).view(2, 50) # larger input to hit many experts
# forward is called twice, once at initialization of the parametrization and once during the forward pass,
# after which it is cached; without caching, it would be called 25 times.
output = model(x, output_hidden_states=True)
assert len(set(map(id, tensor_storage))) == 2
# sanity check: a second forward call _does_ trigger a new forward
output = model(x, output_hidden_states=True)
assert len(set(map(id, tensor_storage))) == 4
def test_target_parameter_init_does_not_warn_about_unknown_layer_type(self, recwarn):
# For target parameters, the layer type is not known. This is fine, as the in_features and out_features are
# derived from the targeted parameter shape. But we need to ensure that there is no warning about the unknown
# layer type.
model_id = "trl-internal-testing/tiny-GptOssForCausalLM"
with hub_online_once(model_id):
model0 = AutoModelForCausalLM.from_pretrained(model_id)
config = LoraConfig(
target_modules=[],
target_parameters=["0.mlp.experts.gate_up_proj"],
)
model = get_peft_model(model0, config)
warn_messages = (w.message.args[0] for w in recwarn.list)
msg_start = "Unsupported layer type"
assert not any(msg.startswith(msg_start) for msg in warn_messages)
| {
"repo_id": "huggingface/peft",
"file_path": "tests/test_target_parameters.py",
"license": "Apache License 2.0",
"lines": 511,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:examples/shira_finetuning/shira_finetuning.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
import torch
import transformers
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
from peft import (
PeftModel,
ShiraConfig,
get_peft_model,
)
def train(
base_model: str = "path/to/model",
data_path: str = "yahma/alpaca-cleaned",
output_dir: str = "shira",
batch_size: int = 16,
num_epochs: int = 1,
learning_rate: float = 3e-4,
cutoff_len: int = 256,
val_set_size: int = 16,
eval_step: int = 100,
save_step: int = 100,
device_map: str = "auto",
shira_r: int = 32,
shira_target_modules: list[str] = None,
dtype: str = "float16",
seed: Optional[int] = None,
use_custom_random_mask_function_with_custom_kwargs: Optional[bool] = False,
):
# Set device_map to the right place when enabling DDP.
world_size = int(os.environ.get("WORLD_SIZE", 0)) or int(os.environ.get("PMI_SIZE", 0))
if world_size > 1 and device_map != "cpu":
from accelerate import Accelerator
device_map = {"": Accelerator().process_index}
# Set seed
if seed is not None:
set_seed(seed)
model_kwargs = {"dtype": getattr(torch, dtype), "device_map": device_map}
model = AutoModelForCausalLM.from_pretrained(base_model, **model_kwargs)
tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
# For some tokenizer with no pad token like llama
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
def tokenize(prompt, add_eos_token=True):
result = tokenizer(
prompt,
truncation=True,
max_length=cutoff_len,
padding=False,
return_tensors=None,
)
if (
result["input_ids"][-1] != tokenizer.eos_token_id
and len(result["input_ids"]) < cutoff_len
and add_eos_token
):
result["input_ids"].append(tokenizer.eos_token_id)
result["attention_mask"].append(1)
result["labels"] = result["input_ids"].copy()
return result
def generate_and_tokenize_prompt(example):
full_prompt = generate_prompt(example)
tokenized_full_prompt = tokenize(full_prompt)
return tokenized_full_prompt
def custom_random_mask_function_with_custom_kwargs(custom_arg):
def mask_fn(base_layer, r):
"""
This mask function is similar to the random_mask provided in src/peft/tuners/shira/mask_functions.py except the seed is derived from custom_kwargs.
Please use this as an example to create your own custom sparse masks that may use custom_kwargs. Remember, for a pretrained weight with shape m, n,
mask_fn must return only one mask (shape: m, n) which must be binary 0 or 1 with num_shira_parameters = r(m+n) for linear layers. Device and dtype
of mask must be same as base layer's weight's device and dtype.
"""
new_seed = custom_arg
shape = base_layer.weight.shape
num_shira_weights = r * (shape[0] + shape[1])
random_generator = torch.Generator()
random_generator.manual_seed(new_seed)
idx = (torch.randperm(base_layer.weight.numel(), generator=random_generator)[:num_shira_weights]).to(
base_layer.weight.device
)
val = torch.ones_like(idx.type(base_layer.weight.dtype))
mask = torch.zeros_like(base_layer.weight.view(1, -1))
mask = mask.scatter_(1, idx.unsqueeze(0), val.unsqueeze(0)).view(shape)
return mask
return mask_fn
mask_type = "random" if not use_custom_random_mask_function_with_custom_kwargs else "custom"
config = ShiraConfig(
r=shira_r,
mask_type=mask_type,
target_modules=shira_target_modules,
task_type="CAUSAL_LM",
)
if use_custom_random_mask_function_with_custom_kwargs:
custom_arg = 120
custom_mask_fn = custom_random_mask_function_with_custom_kwargs(custom_arg)
config.mask_fn = custom_mask_fn
model = get_peft_model(model, config)
data = load_dataset(data_path)
train_val = data["train"].train_test_split(test_size=val_set_size, shuffle=True, seed=42)
train_data = train_val["train"].shuffle().map(generate_and_tokenize_prompt)
val_data = train_val["test"].shuffle().map(generate_and_tokenize_prompt)
trainer = transformers.Trainer(
model=model,
train_dataset=train_data,
eval_dataset=val_data,
args=transformers.TrainingArguments(
per_device_train_batch_size=batch_size,
warmup_steps=100,
num_train_epochs=num_epochs,
learning_rate=learning_rate,
logging_steps=100,
optim="adamw_torch",
eval_strategy="steps",
save_strategy="steps",
eval_steps=eval_step,
save_steps=save_step,
output_dir=output_dir,
save_total_limit=3,
load_best_model_at_end=True,
ddp_find_unused_parameters=False if world_size > 1 else None,
),
data_collator=transformers.DataCollatorForSeq2Seq(
tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
),
)
trainer.train()
model.save_pretrained(output_dir)
# Delete the model and load it again from the checkpoint.
del model
model = AutoModelForCausalLM.from_pretrained(base_model, **model_kwargs)
model = PeftModel.from_pretrained(model, output_dir)
def generate_prompt(example):
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
{example["instruction"]}
### Response:
{example["output"]}"""
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--base_model", type=str, default="path/to/model")
parser.add_argument("--data_path", type=str, default="yahma/alpaca-cleaned")
parser.add_argument("--output_dir", type=str, default="shira")
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--num_epochs", type=int, default=1)
parser.add_argument("--learning_rate", type=float, default=3e-4)
parser.add_argument("--cutoff_len", type=int, default=256)
parser.add_argument("--val_set_size", type=int, default=16)
parser.add_argument("--eval_step", type=int, default=100)
parser.add_argument("--save_step", type=int, default=100)
parser.add_argument("--device_map", type=str, default="auto")
parser.add_argument("--shira_r", type=int, default=32)
parser.add_argument("--shira_target_modules", type=str, default=None)
parser.add_argument("--dtype", type=str, default="float16")
parser.add_argument("--seed", type=int, default=None)
parser.add_argument("--use_custom_random_mask_function_with_custom_kwargs", action="store_true")
args = parser.parse_args()
train(
base_model=args.base_model,
data_path=args.data_path,
output_dir=args.output_dir,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
learning_rate=args.learning_rate,
cutoff_len=args.cutoff_len,
val_set_size=args.val_set_size,
eval_step=args.eval_step,
save_step=args.save_step,
device_map=args.device_map,
shira_r=args.shira_r,
shira_target_modules=args.shira_target_modules,
dtype=args.dtype,
seed=args.seed,
use_custom_random_mask_function_with_custom_kwargs=args.use_custom_random_mask_function_with_custom_kwargs,
)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/shira_finetuning/shira_finetuning.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/shira/config.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from dataclasses import dataclass, field
from typing import Literal, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
from .mask_functions import random_mask
@dataclass
class ShiraConfig(PeftConfig):
"""
This is the configuration class to store the configuration of a [`ShiraModel`].
Args:
r (`int`, *optional*, defaults to `32`):
For a given target module, the number of SHiRA parameters is computed as r(m+n), where the original tensor
dimensions are m x n. This means the number of SHiRA parameters is the same as that for a LoRA adapter.
SHiRA is a high rank adapter. Setting this r parameter does not restrict the rank to this value.
mask_type (`str`, defaults to `random`):
Type of mask function. Defaults to a random sparse mask. An optional user-defined mask_fn to compute the
mask value can also be supplied by instantiating `config = ShiraConfig(...)` and then setting
`config.mask_fn = <your custom mask function>`. For a pretrained weight with shape m x n, the custom mask
function must return only one mask (shape: m x n) which must be binary 0 or 1 with num_shira_parameters =
r(m + n) for linear layers. Device and dtype of mask must be same as base layer's weight's device and
dtype. Please see mask_functions.py for more details and to see the default random sparse mask
implementation.
random_seed (`int`, *optional*, defaults to `None`):
random seed for the torch generator for random_mask.
target_modules (`Union[List[str], str]`):
List of module names or regex expression of the module names to replace with SHiRA. For example, ['q', 'v']
or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. Only linear layers are supported.
fan_in_fan_out (`bool`):
Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses
`Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.
init_weights (`bool`, defaults to `True`):
Initialize SHiRA weight to have zero values. If set to False, SHiRA weights are initialized to randn values
instead of zeros and this is used only for testing.
modules_to_save (`List[str]`):
List of modules apart from SHiRA layers to be set as trainable and saved in the final checkpoint.
"""
r: int = field(
default=32,
metadata={
"help": (
"For a given target module, the number of SHiRA parameters is computed as r(m+n), where the original "
"tensor dimensions are m x n. This means the number of SHiRA parameters is the same as that for a LoRA adapter. "
"SHiRA is a high rank adapter. Setting this r parameter does not restrict the rank to this value."
)
},
)
mask_type: Literal["random"] = field(
default="random",
metadata={
"help": (
"Type of mask function. Defaults to a random sparse mask. "
"An optional user-defined mask_fn to compute the mask value can also be supplied by instantiating `config = ShiraConfig(...)` and then setting "
"`config.mask_fn = <your custom mask function>`. For a pretrained weight with shape m x n, the custom mask function must return only one mask (shape: m x n) "
"which must be binary 0 or 1 with num_shira_parameters = r(m + n) for linear layers. Device and dtype of mask must be same as base layer's weight's device and dtype. "
"Please see mask_functions.py for more details and to see the default random sparse mask implementation."
)
},
)
random_seed: Optional[int] = field(
default=None, metadata={"help": "random seed for the torch generator for random_mask"}
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"List of module names or regex expression of the module names to replace with SHiRA."
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. "
"Only linear layers are supported."
)
},
)
fan_in_fan_out: bool = field(
default=False,
metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"},
)
init_weights: bool = field(
default=True,
metadata={
"help": "Initialize SHiRA weight to have zero values. If set to False, SHiRA weights are initialized to randn values instead of zeros and this is used only for testing."
},
)
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"List of modules apart from SHiRA layers to be set as trainable and saved in the final checkpoint. For"
" example, in Sequence Classification or Token Classification tasks, the final layer"
" `classifier/score` are randomly initialized and as such need to be trainable and saved."
)
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.SHIRA
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
if self.mask_type == "random":
self.mask_fn = random_mask
else:
if not self.inference_mode:
warnings.warn(
f"Argument {self.mask_type=} is not recognized, please supply your own masking function by calling `config.mask_fn = my_mask_fn`."
)
self.mask_fn = None
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/shira/config.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/shira/layer.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import warnings
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
class ShiraLayer(BaseTunerLayer):
# List all names of layers that may contain trainable adapter weights
adapter_layer_names = ("shira_weight",)
# All names of other adapter-related parameters
other_param_names = ("r", "scaling", "shira_indices")
def __init__(self, base_layer: nn.Module, **kwargs):
self.base_layer = base_layer
self.r = {}
self.scaling = {}
self.shira_weight = nn.ParameterDict({})
self.shira_indices = {}
self.weight_shape = base_layer.weight.shape # Assumes SHiRA is on some layer with "weight" parameter
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
in_features, out_features = base_layer.in_features, base_layer.out_features
else:
raise NotImplementedError("Only nn.Linear layers supported currently")
self.in_features = in_features
self.out_features = out_features
self.kwargs = kwargs
def update_layer(
self,
adapter_name,
mask,
r,
init_weights: bool = True,
inference_mode: bool = False,
**kwargs,
):
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
self.scaling[adapter_name] = (
1.0 # Default scale during training. Can be set to any (non-negative) value during inference.
)
# The number of shira weights in this layer is determined by r such that the total number of weights is the same as a LoRA Layer (for direct comparisons)
num_shira_weight = r * (self.in_features + self.out_features)
if num_shira_weight > self.in_features * self.out_features:
raise ValueError(
f"The set rank {r} results in more shira params than the total number of params in the base layer {self.in_features * self.out_features} and this is not allowed."
)
# Actual trainable parameters
# We have used a vector parameter with fixed indices that we use inside a torch.sparse_coo_tensor in get_delta_weight function.
# Directly using a torch.sparse_coo_tensor as a parameter could have been possible but we ran into some issues similar to:
# https://github.com/pytorch/pytorch/issues/79542.
shira_init_weight = torch.zeros(num_shira_weight) if init_weights else torch.randn(num_shira_weight)
self.shira_weight[adapter_name] = nn.Parameter(
shira_init_weight.to(self.base_layer.weight.dtype).to(self.base_layer.weight.device),
requires_grad=True,
)
if mask is not None:
# Compute the shira_indices from the mask. Make sure the mask is formed using r*(self.in_features + self.out_features) and not some other K.
mask_indices = torch.where(mask == 1.0)
self.shira_indices[adapter_name] = torch.cat(
[mask_indices[0].unsqueeze(0), mask_indices[1].unsqueeze(0)], 0
).to(torch.int)
self.shira_indices[adapter_name] = self.shira_indices[adapter_name].to(self.base_layer.weight.device)
if self.shira_indices[adapter_name].shape[1] != self.shira_weight[adapter_name].shape[0]:
raise ValueError(
f"The SHiRA indices and weights are not the same dimensions for adapter {adapter_name} in layer {self.base_layer}"
)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters, inference_mode=inference_mode)
def reset_shira_parameters(self, adapter_name):
nn.init.zeros_(self.shira_weight[adapter_name])
def set_scale(self, adapter, scale):
if adapter not in self.scaling:
# Ignore the case where the adapter is not in the layer
return
self.scaling[adapter] = scale
class Linear(nn.Module, ShiraLayer):
# SHiRA implemented in a dense layer
def __init__(
self,
base_layer,
mask,
adapter_name: str,
r: int = 0,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stored weight like (fan_in, fan_out)
init_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
ShiraLayer.__init__(self, base_layer, **kwargs)
self.fan_in_fan_out = fan_in_fan_out
if self.base_layer is not self.get_base_layer():
raise ValueError("SHiRA does not support nested base layers")
self._active_adapter = adapter_name
self.update_layer(adapter_name, mask, r, init_weights=init_weights)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`List[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.shira_weight.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
orig_weights += self.get_delta_weight(active_adapter)
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
base_layer.weight.data += self.get_delta_weight(active_adapter)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.shira_weight.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def get_delta_weight(self, adapter) -> torch.Tensor:
"""
Compute the delta weight for the given adapter.
Args:
adapter (str):
The name of the adapter for which the delta weight should be computed.
"""
# In multi-gpu environment, the indices are at the wrong gpu. This is needed to correct this.
self.shira_indices[adapter] = self.shira_indices[adapter].to(self.shira_weight[adapter].device)
return torch.sparse_coo_tensor(
self.shira_indices[adapter], self.shira_weight[adapter] * self.scaling[adapter], self.weight_shape
)
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
new_weight = copy.deepcopy(self.base_layer.weight.data)
for active_adapter in self.active_adapters:
if active_adapter not in self.shira_weight.keys():
continue
new_weight += self.get_delta_weight(active_adapter)
result = F.linear(x, new_weight, bias=self.base_layer.bias)
return result
def supports_lora_conversion(self, adapter_name: str = "default") -> bool:
# delta weight is sparse, which does not work with SVD
return False
def __repr__(self) -> str:
rep = super().__repr__()
return "shira." + rep
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/shira/layer.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/shira/mask_functions.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is intended to store mask functions for use inside SHiRA construction. The mask functions are required to
have a specific signature as shown below.
Required positional arguments:
base_layer - This is the linear layer where the shira adapter will be attached. r - This parameter is used to
determine the number of parameters in the
shira adapter in a way that is consistent with LoRA sizing. SHiRA is a high rank adapter. Setting this
parameter does not restrict the adapter rank.
Keyword arguments can be provided as needed by the particular mask function implementation.
Return:
mask - this is a torch.tensor of the same shape as base_layer.weight that contains 0s and 1s with the same
dtype and device as base_layer.weight
If you would like to attach SHiRA adapters to a model using PEFT methods (such as get_peft_model()), using more
arguments than the provided positional arguments, you can create the mask function reference like the following:
```
def create_mask_function_reference(**my_kwargs):
def mask_fn(base_layer, r):
... your implementation here that might use my_kwargs ...
return mask
return mask_fn
```
Then, you can create your peft model with custom SHiRA mask as follows:
```
model = ...
my_kwargs = ...
mask_fn = create_mask_function_reference(**my_kwargs)
peft_config = ShiraConfig(r=4, mask_type='my_custom_mask')
peft_config.mask_fn = mask_fn
peft_model = get_peft_model(model, peft_config)
```
Complete training examples are provided in the examples/shira/ directory.
"""
from typing import Optional
import torch
import torch.nn as nn
def random_mask(base_layer: nn.Module, r: int, random_seed: Optional[int] = None, **kwargs) -> torch.tensor:
shape = base_layer.weight.shape
num_shira_weights = r * (shape[0] + shape[1])
random_generator = torch.Generator()
if random_seed is not None:
random_generator.manual_seed(random_seed)
idx = (torch.randperm(base_layer.weight.numel(), generator=random_generator)[:num_shira_weights]).to(
base_layer.weight.device
)
val = torch.ones_like(idx.type(base_layer.weight.dtype))
mask = torch.zeros_like(base_layer.weight.view(1, -1))
mask = mask.scatter_(1, idx.unsqueeze(0), val.unsqueeze(0)).view(shape)
return mask
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/shira/mask_functions.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/shira/model.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
import torch
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer
from peft.utils import (
TRANSFORMERS_MODELS_TO_SHIRA_TARGET_MODULES_MAPPING,
)
from .layer import Linear, ShiraLayer
class ShiraModel(BaseTuner):
"""
Creates a Sparse High Rank Adapter (SHiRA) Model from a pretrained model.
Args:
model ([`~transformers.PreTrainedModel`]): The model to be adapted.
config ([`ShiraConfig`]): The configuration of the SHiRA model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
Returns:
`torch.nn.Module`: The SHiRA model.
Example:
```py
>>> from transformers import AutoModelForCausalLM
>>> from peft import ShiraConfig, get_peft_model
>>> base_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
>>> config = ShiraConfig(r=32)
>>> model = get_peft_model(base_model, config)
```
**Attributes**:
- **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`ShiraConfig`]): The configuration of the SHiRA model.
"""
prefix: str = "shira_"
tuner_layer_cls = ShiraLayer
target_module_mapping = TRANSFORMERS_MODELS_TO_SHIRA_TARGET_MODULES_MAPPING
def _create_and_replace(
self,
shira_config,
adapter_name,
target,
target_name,
parent,
current_key,
**optional_kwargs,
):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
bias = hasattr(target, "bias") and target.bias is not None
kwargs = {}
kwargs["bias"] = bias
if shira_config.mask_type == "random":
kwargs["random_seed"] = shira_config.random_seed
for k, v in optional_kwargs.items():
kwargs[k] = v
if isinstance(target, Linear):
mask = (
shira_config.mask_fn(target.base_layer, shira_config.r, **kwargs)
if shira_config.mask_fn is not None
else None
)
target.update_layer(
adapter_name,
mask,
shira_config.r,
init_weights=shira_config.init_weights,
)
else:
new_module = self._create_new_module(shira_config, adapter_name, target, **kwargs)
if adapter_name not in self.active_adapter:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _create_new_module(shira_config, adapter_name, target, **kwargs):
fan_in_fan_out = shira_config.fan_in_fan_out
_ = kwargs.pop("bias", False)
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
if fan_in_fan_out:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
fan_in_fan_out = shira_config.fan_in_fan_out = False
else:
raise ValueError(
f"Target module {target} is not supported. Currently, only the following modules are supported: "
"`torch.nn.Linear`."
)
mask = (
shira_config.mask_fn(target_base_layer, shira_config.r, **kwargs)
if shira_config.mask_fn is not None
else None
)
new_module = Linear(
target,
mask,
adapter_name,
shira_config.r,
fan_in_fan_out,
init_weights=shira_config.init_weights,
**kwargs,
)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/shira/model.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:tests/test_shira.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This test file is for tests specific to SHiRA.
import os
import pytest
import torch
from accelerate.utils.imports import is_bf16_available
from torch import nn
from peft import PeftModel, ShiraConfig, get_peft_model
def custom_random_mask_function_with_custom_kwargs(custom_arg):
def mask_fn(base_layer, r):
"""
This mask function is similar to the random_mask provided in src/peft/tuners/shira/mask_functions.py except the
seed is derived from custom_kwargs. Please use this as an example to create your own custom sparse masks that
may use custom_kwargs. Remember, for a pretrained weight with shape m, n, mask_fn must return only one mask
(shape: m, n) which must be binary 0 or 1 with num_shira_parameters = r(m+n) for linear layers. Device and
dtype of mask must be same as base layer's weight's device and dtype.
"""
new_seed = custom_arg
shape = base_layer.weight.shape
num_shira_weights = r * (shape[0] + shape[1])
random_generator = torch.Generator()
random_generator.manual_seed(new_seed)
idx = (torch.randperm(base_layer.weight.numel(), generator=random_generator)[:num_shira_weights]).to(
base_layer.weight.device
)
val = torch.ones_like(idx.type(base_layer.weight.dtype))
mask = torch.zeros_like(base_layer.weight.view(1, -1))
mask = mask.scatter_(1, idx.unsqueeze(0), val.unsqueeze(0)).view(shape)
return mask
return mask_fn
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.relu = nn.ReLU()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.lin1 = nn.Linear(20, 40, bias=bias) # lin1 and lin2 have same shape
self.lin2 = nn.Linear(40, 30, bias=bias)
self.lin3 = nn.Linear(30, 10, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
X = self.relu(X)
X = self.lin2(X)
X = self.relu(X)
X = self.lin3(X)
X = self.sm(X)
return X
class TestShira:
@pytest.fixture
def mlp(self):
torch.manual_seed(0)
model = MLP()
return model
def test_mlp_single_adapter_shapes(self, mlp):
# torch.manual_seed(0)
r = 2
config = ShiraConfig(r=r, target_modules=["lin1", "lin2"])
# creates a default SHiRA adapter
peft_model = get_peft_model(mlp, config)
shira_weight1_size = peft_model.base_model.model.lin1.shira_weight["default"].shape[0]
shira_weight2_size = peft_model.base_model.model.lin2.shira_weight["default"].shape[0]
shira_indices1_size = peft_model.base_model.model.lin1.shira_indices["default"].shape[1]
shira_indices2_size = peft_model.base_model.model.lin2.shira_indices["default"].shape[1]
base_weight1_size = peft_model.base_model.model.lin1.base_layer.weight.shape
base_weight2_size = peft_model.base_model.model.lin2.base_layer.weight.shape
delta_weight1_shape = peft_model.base_model.model.lin1.get_delta_weight("default").shape
delta_weight2_shape = peft_model.base_model.model.lin2.get_delta_weight("default").shape
assert shira_weight1_size == r * (base_weight1_size[0] + base_weight1_size[1])
assert shira_weight2_size == r * (base_weight2_size[0] + base_weight2_size[1])
assert shira_weight1_size == shira_indices1_size
assert shira_weight2_size == shira_indices2_size
assert delta_weight1_shape == base_weight1_size
assert delta_weight2_shape == base_weight2_size
return peft_model
def test_multiple_adapters_save_load(self, mlp, tmp_path):
# check saving and loading works with multiple adapters
# note, the random seeds in the below two configs are not the default values.
# so it will lead to different random sparse masks between saving and loading.
# our goal is to make sure that loaded indices are exactly the same as the saved indices regardless of what initial random mask gets generated.
# we will also make sure that parameters are saved and loaded correctly, and the output remains the same.
config = ShiraConfig(r=2, target_modules=["lin1", "lin2"], random_seed=56)
# creates a default SHiRA adapter
peft_model = get_peft_model(mlp, config, adapter_name="first")
config2 = ShiraConfig(r=3, target_modules=["lin1", "lin2", "lin3"], random_seed=67)
peft_model.add_adapter("second", config2)
assert torch.all(peft_model.base_model.model.lin1.shira_weight["first"] == 0)
assert torch.all(peft_model.base_model.model.lin2.shira_weight["first"] == 0)
assert torch.all(peft_model.base_model.model.lin1.shira_weight["second"] == 0)
assert torch.all(peft_model.base_model.model.lin2.shira_weight["second"] == 0)
assert torch.all(peft_model.base_model.model.lin3.shira_weight["second"] == 0)
shira_assign_val1_f = torch.randn_like(peft_model.base_model.model.lin1.shira_weight["first"])
peft_model.base_model.model.lin1.shira_weight["first"] = shira_assign_val1_f
shira_indices1_f = peft_model.base_model.model.lin1.shira_indices["first"]
shira_assign_val2_f = torch.randn_like(peft_model.base_model.model.lin2.shira_weight["first"])
peft_model.base_model.model.lin2.shira_weight["first"] = shira_assign_val2_f
shira_indices2_f = peft_model.base_model.model.lin2.shira_indices["first"]
shira_assign_val1_s = torch.randn_like(peft_model.base_model.model.lin1.shira_weight["second"])
peft_model.base_model.model.lin1.shira_weight["second"] = shira_assign_val1_s
shira_indices1_s = peft_model.base_model.model.lin1.shira_indices["second"]
shira_assign_val2_s = torch.randn_like(peft_model.base_model.model.lin2.shira_weight["second"])
peft_model.base_model.model.lin2.shira_weight["second"] = shira_assign_val2_s
shira_indices2_s = peft_model.base_model.model.lin2.shira_indices["second"]
shira_assign_val3_s = torch.randn_like(peft_model.base_model.model.lin3.shira_weight["second"])
peft_model.base_model.model.lin3.shira_weight["second"] = shira_assign_val3_s
shira_indices3_s = peft_model.base_model.model.lin3.shira_indices["second"]
input = torch.randn(5, 10)
peft_model.set_adapter("first")
output_first = peft_model(input)
peft_model.set_adapter("second")
output_second = peft_model(input)
# sanity check
assert not torch.allclose(output_first, output_second, atol=1e-3, rtol=1e-3)
save_path = os.path.join(tmp_path, "shira")
peft_model.save_pretrained(save_path)
assert os.path.exists(os.path.join(save_path, "first", "adapter_config.json"))
assert os.path.exists(os.path.join(save_path, "second", "adapter_config.json"))
del peft_model
torch.manual_seed(0)
mlp = MLP()
peft_model = PeftModel.from_pretrained(mlp, os.path.join(save_path, "first"), adapter_name="first")
peft_model.load_adapter(os.path.join(save_path, "second"), "second")
peft_model.set_adapter("first")
output_first_loaded = peft_model(input)
peft_model.set_adapter("second")
output_second_loaded = peft_model(input)
assert torch.allclose(output_first, output_first_loaded)
assert torch.allclose(output_second, output_second_loaded)
assert torch.all(shira_assign_val1_f == peft_model.base_model.model.lin1.shira_weight["first"])
assert torch.all(shira_assign_val2_f == peft_model.base_model.model.lin2.shira_weight["first"])
assert torch.all(shira_indices1_f == peft_model.base_model.model.lin1.shira_indices["first"])
assert torch.all(shira_indices2_f == peft_model.base_model.model.lin2.shira_indices["first"])
assert torch.all(shira_assign_val1_s == peft_model.base_model.model.lin1.shira_weight["second"])
assert torch.all(shira_assign_val2_s == peft_model.base_model.model.lin2.shira_weight["second"])
assert torch.all(shira_assign_val3_s == peft_model.base_model.model.lin3.shira_weight["second"])
assert torch.all(shira_indices1_s == peft_model.base_model.model.lin1.shira_indices["second"])
assert torch.all(shira_indices2_s == peft_model.base_model.model.lin2.shira_indices["second"])
assert torch.all(shira_indices3_s == peft_model.base_model.model.lin3.shira_indices["second"])
return peft_model
def test_save_load_custom_mask_function(self, mlp, tmp_path):
# we want to see if saving and loading works when a custom mask is involved
config = ShiraConfig(r=2, mask_type="custom", target_modules=["lin1", "lin2"], init_weights=False)
custom_arg = 120
custom_mask_fn = custom_random_mask_function_with_custom_kwargs(custom_arg)
config.mask_fn = custom_mask_fn
# create a custom mask SHiRA adapter
peft_model = get_peft_model(mlp, config, adapter_name="first")
shira_assign_val1_f = peft_model.base_model.model.lin1.shira_weight["first"]
shira_indices1_f = peft_model.base_model.model.lin1.shira_indices["first"]
shira_assign_val2_f = peft_model.base_model.model.lin2.shira_weight["first"]
shira_indices2_f = peft_model.base_model.model.lin2.shira_indices["first"]
input = torch.randn(5, 10)
peft_model.set_adapter("first")
output_first = peft_model(input)
save_path = os.path.join(tmp_path, "shira")
peft_model.save_pretrained(save_path)
assert os.path.exists(os.path.join(save_path, "first", "adapter_config.json"))
del peft_model
torch.manual_seed(0)
mlp = MLP()
peft_model = PeftModel.from_pretrained(mlp, os.path.join(save_path, "first"), adapter_name="first")
peft_model.set_adapter("first")
output_first_loaded = peft_model(input)
assert torch.allclose(output_first, output_first_loaded)
assert torch.all(shira_assign_val1_f == peft_model.base_model.model.lin1.shira_weight["first"])
assert torch.all(shira_assign_val2_f == peft_model.base_model.model.lin2.shira_weight["first"])
assert torch.all(shira_indices1_f == peft_model.base_model.model.lin1.shira_indices["first"])
assert torch.all(shira_indices2_f == peft_model.base_model.model.lin2.shira_indices["first"])
return peft_model
def test_save_load_default_random_mask_with_seed_function(self, mlp, tmp_path):
# we want to see if saving and loading works when a random mask is involved but the random seed is fixed.
config = ShiraConfig(r=2, target_modules=["lin1", "lin2"], random_seed=567, init_weights=False)
# create a custom mask SHiRA adapter
peft_model = get_peft_model(mlp, config, adapter_name="first")
shira_assign_val1_f = peft_model.base_model.model.lin1.shira_weight["first"]
shira_indices1_f = peft_model.base_model.model.lin1.shira_indices["first"]
shira_assign_val2_f = peft_model.base_model.model.lin2.shira_weight["first"]
shira_indices2_f = peft_model.base_model.model.lin2.shira_indices["first"]
input = torch.randn(5, 10)
peft_model.set_adapter("first")
output_first = peft_model(input)
save_path = os.path.join(tmp_path, "shira")
peft_model.save_pretrained(save_path)
assert os.path.exists(os.path.join(save_path, "first", "adapter_config.json"))
del peft_model
torch.manual_seed(0)
mlp = MLP()
peft_model = PeftModel.from_pretrained(mlp, os.path.join(save_path, "first"), adapter_name="first")
peft_model.set_adapter("first")
output_first_loaded = peft_model(input)
assert torch.allclose(output_first, output_first_loaded)
assert torch.all(shira_assign_val1_f == peft_model.base_model.model.lin1.shira_weight["first"])
assert torch.all(shira_assign_val2_f == peft_model.base_model.model.lin2.shira_weight["first"])
assert torch.all(shira_indices1_f == peft_model.base_model.model.lin1.shira_indices["first"])
assert torch.all(shira_indices2_f == peft_model.base_model.model.lin2.shira_indices["first"])
return peft_model
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16])
def test_shira_dtypes(self, dtype):
if dtype == torch.bfloat16:
# skip if bf16 is not supported on hardware, see #1872
if not is_bf16_available():
pytest.skip("bfloat16 not supported on this system, skipping the test")
model = MLP().to(dtype)
config = ShiraConfig(r=2, target_modules=["lin1", "lin2"])
peft_model = get_peft_model(model, config)
inputs = torch.randn(5, 10).to(dtype)
output = peft_model(inputs) # should not raise
assert output.dtype == dtype
| {
"repo_id": "huggingface/peft",
"file_path": "tests/test_shira.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:src/peft/tuners/c3a/config.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Literal, Optional, Union
from peft.config import PeftConfig
from peft.utils import PeftType
@dataclass
class C3AConfig(PeftConfig):
"""This is the configuration class to store the configuration of a [`C3AModel`].
Args:
block_size (`int`):
block size for C3A, must be divisible by both the input size and the output size of the target layer. If
you have no idea what block_size you should use, set it to the greatest common divisor of all input &
output sizes of your target layers. Increasing this would result in less parameters.
target_modules (`Union[list[str],str]`): The names of the modules to apply C3A to.
bias (`str`): Bias type for C3A. Can be 'none', 'all' or 'c3a_only'. If 'all' or 'c3a_only', the
corresponding biases will be updated during training. Be aware that this means that, even when disabling
the adapters, the model will not produce the same output as the base model would have without adaptation.
modules_to_save (`list[str]`):list of modules apart from C3A layers to be set as trainable
and saved in the final checkpoint.
layers_to_transform (`Union[list[int],int]`):
The layer indexes to transform, if this argument is specified, it will apply C3A on the layer indexes that
are specified in this list. If a single integer is passed, it will apply C3A on the layer at this index.
layers_pattern (`str`):
The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer
pattern is not in the common layers pattern.
block_size_pattern (`dict`):
The mapping from layer names or regexp expression to block_size which are different from the default
specified. For example, `{"model.decoder.layers.0.encoder_attn.k_proj": 1280`}
init_weights (`Union[bool, Literal["gaussian", "kaiming_uniform", "xavier_uniform"]]`):
Defaults to 'xavier_uniform'. Setting this to `False` also uses 'xavier_uniform'. To set the weights to
zeros (thus making C3A a no-op), set the value to `True`.
"""
block_size: int = field(
default=256,
metadata={
"help": (
"block size for C3A, must be divisible by both the input size and the output size of the target layer."
" If you have no idea what block_size you should use, set it to the greatest common divisor of all"
" input & output sizes of your target layers. Increasing this would result in less parameters."
)
},
)
target_modules: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"list of module names or regex expression of the module names to replace with C3A."
" For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
)
},
)
bias: str = field(default="none", metadata={"help": "Bias type for C3A. Can be 'none', 'all' or 'c3a_only'"})
modules_to_save: Optional[list[str]] = field(
default=None,
metadata={
"help": (
"list of modules apart from C3A layers to be set as trainable and saved in the final checkpoint."
" For example, in Sequence Classification or Token Classification tasks,"
" the final layer `classifier/score` are randomly initialized"
" and as such need to be trainable and saved."
)
},
)
layers_to_transform: Optional[Union[list[int], int]] = field(
default=None,
metadata={
"help": (
"The layer indexes to transform, is this argument is specified,"
" PEFT will transform only the layers indexes that are specified inside this list."
" If a single integer is passed, PEFT will transform only the layer at this index."
" This only works when target_modules is a list of str."
)
},
)
layers_pattern: Optional[Union[list[str], str]] = field(
default=None,
metadata={
"help": (
"The layer pattern name, used only if `layers_to_transform` is different to None"
" and if the layer pattern is not in the common layers pattern."
" This only works when target_modules is a list of str."
)
},
)
block_size_pattern: Optional[dict] = field(
default_factory=dict,
metadata={
"help": (
"The mapping from layer names or regexp expression to block_size"
" which are different from the default specified."
" For example, `{model.decoder.layers.0.encoder_attn.k_proj: 1280`}"
)
},
)
init_weights: Optional[Union[bool, Literal["gaussian", "kaiming_uniform", "xavier_uniform"]]] = field(
default="xavier_uniform",
metadata={
"help": (
"Defaults to 'xavier_uniform'. Setting this to `False` also uses 'xavier_uniform'. To set the weights "
"to zeros (thus making C3A a no-op), set the value to `True`."
)
},
)
def __post_init__(self):
super().__post_init__()
self.peft_type = PeftType.C3A
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
# if target_modules is a regex expression, then layers_to_transform should be None
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.")
# if target_modules is a regex expression, then layers_pattern should be None
if isinstance(self.target_modules, str) and self.layers_pattern is not None:
raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.")
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/c3a/config.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/c3a/layer.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import math
import warnings
from typing import Any, Literal, Optional
import torch
import torch.nn as nn
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from .utils import BlockCircularConvolution, get_circulant_fast
class C3ALayer(BaseTunerLayer):
# All names of layers that may contain (trainable) adapter weights
adapter_layer_names = ("c3a_kernel",)
# All names of other parameters that may contain adapter-related parameters
other_param_names = ("block_size",)
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
self.base_layer = base_layer
self.block_size = {}
self.c3a_kernel = nn.ParameterDict({})
# Mark the weight as unmerged
self._disable_adapters = False
self.merged_adapters = []
self.kwargs = kwargs
base_layer = self.get_base_layer()
if isinstance(base_layer, nn.Linear):
self.in_features, self.out_features = base_layer.in_features, base_layer.out_features
else:
raise ValueError(f"Unsupported layer type {type(base_layer)}")
def get_delta_weight(self, adapter) -> torch.Tensor:
if adapter not in self.c3a_kernel.keys():
raise ValueError(f"Adapter {adapter} not found.")
base_layer_weight = self.get_base_layer().weight
base_layer_weight_dtype = base_layer_weight.dtype
c3a_kernel = self.c3a_kernel[adapter]
delta_weight = get_circulant_fast(c3a_kernel.to(torch.float32)).to(base_layer_weight_dtype)
return delta_weight / base_layer_weight.size(-1)
def update_layer(self, adapter_name, block_size, init_weights, inference_mode: bool = False, **kwargs):
if block_size <= 0:
raise ValueError(f"`block_size` should be a positive integer value but the value passed is {block_size}")
if self.in_features % block_size != 0:
raise ValueError(
f"The block size should be a factor of the input size. However, the input size is {self.in_features} and the block size is {block_size}"
)
if self.out_features % block_size != 0:
raise ValueError(
f"The block size should be a factor of the output size. However, the output size is {self.out_features} and the block size is {block_size}"
)
self.block_size[adapter_name] = block_size
weight = self.get_base_layer().weight
self.c3a_kernel[adapter_name] = nn.Parameter(
torch.zeros(
self.out_features // block_size,
self.in_features // block_size,
block_size,
# Currently, only fp32 is widely supported for FFT (fp16 is only supported on GPU with shapes of powers
# of 2, bf16 lacks FFT support)
dtype=torch.float32,
device=weight.device,
)
)
self.reset_c3a_parameters(adapter_name, init_weights)
self._move_adapter_to_device_of_base_layer(adapter_name)
self.set_adapter(self.active_adapters, inference_mode=inference_mode)
@torch.no_grad()
def reset_c3a_parameters(self, adapter_name, init_weights):
if init_weights is True:
return
if adapter_name in self.c3a_kernel.keys():
if init_weights == "gaussian":
nn.init.normal_(self.c3a_kernel[adapter_name])
elif init_weights in ["xavier_uniform", False]:
fan_in, fan_out = self.in_features, self.out_features
std = 1.0 * math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std
nn.init.uniform_(self.c3a_kernel[adapter_name], -a, a)
elif init_weights == "kaiming_uniform":
fan_in = self.in_features
a = 1.0 * math.sqrt(1.0 / float(fan_in))
nn.init.uniform_(self.c3a_kernel[adapter_name], -a, a)
else:
raise ValueError(f"Unknown init_weights: {init_weights}")
class C3ALinear(nn.Module, C3ALayer):
# Lora implemented in a dense layer
def __init__(
self,
base_layer,
adapter_name: str,
block_size: int,
init_weights: bool | Literal["gaussian", "kaiming_uniform", "xavier_uniform"],
**kwargs,
) -> None:
super().__init__()
C3ALayer.__init__(self, base_layer, **kwargs)
self._active_adapter = adapter_name
self.update_layer(adapter_name, block_size, init_weights)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter in self.c3a_kernel.keys():
base_layer = self.get_base_layer()
if safe_merge:
# Note that safe_merge will be slower than the normal merge
# because of the copy operation.
orig_weights = base_layer.weight.data.clone()
delta_weight = self.get_delta_weight(active_adapter)
orig_weights = orig_weights + delta_weight
if not torch.isfinite(orig_weights).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
base_layer.weight.data = orig_weights
else:
delta_weight = self.get_delta_weight(active_adapter)
base_layer.weight.data = base_layer.weight.data + delta_weight
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter in self.c3a_kernel.keys():
self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
x = x.to(torch.float32)
for active_adapter in self.active_adapters:
if active_adapter not in self.c3a_kernel.keys():
continue
c3a_kernel = self.c3a_kernel[active_adapter].to(torch.float32)
x = BlockCircularConvolution.apply(x, c3a_kernel) / x.size(-1)
result += x.to(result.dtype)
result = result.to(previous_dtype)
return result
def supports_lora_conversion(self, adapter_name: str = "default") -> bool:
return True
def __repr__(self) -> str:
rep = super().__repr__()
return "c3a." + rep
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/c3a/layer.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/c3a/model.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import re
from itertools import chain
import torch
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer
from peft.utils import (
TRANSFORMERS_MODELS_TO_C3A_TARGET_MODULES_MAPPING,
)
from .layer import C3ALayer, C3ALinear
class C3AModel(BaseTuner):
"""
Creates C3A model from a pretrained transformers model.
The method is described in detail in https://huggingface.co/papers/2407.19342.
Args:
model ([`torch.nn.Module`]): The model to be adapted.
config ([`C3AConfig`]): The configuration of the C3A model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
Returns:
`torch.nn.Module`: The C3A model.
**Attributes**:
- **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`C3AConfig`]): The configuration of the C3A model.
"""
prefix: str = "c3a_"
tuner_layer_cls = C3ALayer
target_module_mapping = TRANSFORMERS_MODELS_TO_C3A_TARGET_MODULES_MAPPING
def _create_and_replace(
self,
c3a_config,
adapter_name,
target,
target_name,
parent,
current_key,
**optional_kwargs,
):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
# Regexp matching - Find key which matches current target_name in patterns provided
pattern_keys = list(chain(c3a_config.block_size_pattern.keys()))
target_name_key = next(filter(lambda key: re.match(rf".*\.{key}$", current_key), pattern_keys), current_key)
block_size = c3a_config.block_size_pattern.get(target_name_key, c3a_config.block_size)
kwargs = {
"block_size": block_size,
"init_weights": c3a_config.init_weights,
}
if isinstance(target, C3ALinear):
target.update_layer(
adapter_name,
block_size,
c3a_config.init_weights,
)
else:
new_module = self._create_new_module(c3a_config, adapter_name, target, **kwargs)
if adapter_name != self.active_adapter:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _create_new_module(c3a_config, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
new_module = C3ALinear(target, adapter_name, **kwargs)
else:
raise ValueError(
f"Target module {target} is not supported. Currently, only `torch.nn.Linear` is supported."
)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/c3a/model.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/c3a/utils.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.autograd import Function
from torch.fft import fft, ifft
def get_circulant_fast(w):
m, n, b = w.shape
x = torch.eye(n * b, dtype=w.dtype, device=w.device)
x = x.reshape(*x.shape[:-1], n, b)
x = torch.einsum("...nb,mnb->...mb", ifft(x), fft(w))
x = fft(x).real.flatten(start_dim=1).T
return x
class BlockCircularConvolution(Function):
@staticmethod
def forward(ctx, x, w):
m, n, b = w.shape
x = x.reshape(*x.shape[:-1], n, b)
ctx.save_for_backward(x, w)
x = torch.einsum("...nb,mnb->...mb", ifft(x), fft(w))
x = fft(x).real
x = x.reshape(*x.shape[:-2], -1)
return x
@staticmethod
def backward(ctx, grad_output):
x, w = ctx.saved_tensors
m, n, b = w.shape
grad_output = grad_output.reshape(*grad_output.shape[:-1], m, b)
grad_output_fft = fft(grad_output)
x_grad = fft(torch.einsum("...mb,mnb->...nb", grad_output_fft, ifft(w))).real
x_grad = x_grad.reshape(*x_grad.shape[:-2], -1)
w_grad = fft(torch.einsum("...mb,...nb->mnb", grad_output_fft, ifft(x))).real
return x_grad, w_grad
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/c3a/utils.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/oft/aqlm.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from peft.import_utils import is_aqlm_available
from peft.tuners.oft.layer import OFTLayer
from peft.tuners.tuners_utils import BaseTunerLayer
if is_aqlm_available():
from aqlm import QuantizedLinear
class AqlmOFTLinear(torch.nn.Module, OFTLayer):
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 0,
oft_block_size: int = 32,
module_dropout: float = 0.0,
init_weights: bool = True,
coft: bool = False,
eps: float = 6e-5,
block_share: bool = False,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
use_cayley_neumann: bool = False,
num_cayley_neumann_terms: int = 5,
**kwargs,
):
super().__init__()
OFTLayer.__init__(self, base_layer)
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
oft_block_size=oft_block_size,
module_dropout=module_dropout,
init_weights=init_weights,
coft=coft,
eps=eps,
block_share=block_share,
use_cayley_neumann=use_cayley_neumann,
num_cayley_neumann_terms=num_cayley_neumann_terms,
)
def forward(self, x: torch.Tensor):
# note: logic differs from default Linear because merging is not supported
if self.disable_adapters:
return self.base_layer(x)
for active_adapter in self.active_adapters:
if active_adapter not in self.oft_R.keys():
continue
oft_R = self.oft_R[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = x.dtype
x = self._cast_input_dtype(x, oft_R.weight.dtype)
x = oft_R(x)
result = self.base_layer(x)
if requires_conversion:
result = result.to(expected_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "oft." + rep
def dispatch_aqlm(
target: torch.nn.Module,
adapter_name: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear):
new_module = AqlmOFTLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.codes
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/oft/aqlm.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/oft/awq.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from peft.import_utils import is_gptqmodel_available
from peft.tuners.oft.layer import OFTLayer
from peft.tuners.tuners_utils import BaseTunerLayer
class AwqOFTLinear(torch.nn.Module, OFTLayer):
def __init__(
self,
base_layer,
adapter_name,
r: int = 0,
oft_block_size: int = 32,
module_dropout: float = 0.0,
coft: bool = False,
eps: float = 6e-5,
block_share: bool = False,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
init_weights: bool = True,
use_cayley_neumann: bool = False,
num_cayley_neumann_terms: int = 5,
**kwargs,
):
super().__init__()
OFTLayer.__init__(self, base_layer)
# self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
# for backwards compatibility
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
oft_block_size=oft_block_size,
module_dropout=module_dropout,
coft=coft,
eps=eps,
block_share=block_share,
init_weights=init_weights,
use_cayley_neumann=use_cayley_neumann,
num_cayley_neumann_terms=num_cayley_neumann_terms,
)
def forward(self, x: torch.Tensor):
if self.disable_adapters:
result = self.quant_linear_module(x)
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.oft_R.keys():
continue
oft_R = self.oft_R[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = x.dtype
x = self._cast_input_dtype(x, oft_R.weight.dtype)
x = oft_R(x)
if requires_conversion:
x = x.to(expected_dtype)
result = self.quant_linear_module(x)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "oft." + rep
def dispatch_awq(
target: torch.nn.Module,
adapter_name: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_gptqmodel_available():
from gptqmodel.nn_modules.qlinear.gemm_awq import AwqGEMMQuantLinear
if isinstance(target_base_layer, AwqGEMMQuantLinear):
new_module = AwqOFTLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.qweight
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/oft/awq.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/oft/bnb.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from typing import Optional
import bitsandbytes as bnb
import torch
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from peft.utils.integrations import dequantize_bnb_weight
from .layer import OFTLayer
if is_bnb_available():
class Linear8bitLt(torch.nn.Module, OFTLayer):
# OFT implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
r: int = 8,
oft_block_size: int = 0,
module_dropout: float = 0.0,
init_weights: bool = True,
coft: bool = False,
eps: float = 6e-5,
block_share: bool = False,
use_cayley_neumann: bool = False,
num_cayley_neumann_terms: int = 5,
**kwargs,
) -> None:
super().__init__()
OFTLayer.__init__(self, base_layer)
self.fan_in_fan_out = False
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
oft_block_size=oft_block_size,
module_dropout=module_dropout,
coft=coft,
eps=eps,
block_share=block_share,
init_weights=init_weights,
use_cayley_neumann=use_cayley_neumann,
num_cayley_neumann_terms=num_cayley_neumann_terms,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter not in self.oft_R.keys():
continue
warnings.warn("Merge oft module to 8-bit linear may get different generations due to rounding errors.")
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
# Dequantize the result of identity matrix and int8 weight because bitsandbytes does not support int8
# dequantization directly
output = dequantize_bnb_weight(weight, state=state)
oft_data = self.get_delta_weight(active_adapter)
output = torch.transpose(output, 0, 1)
w_data = torch.mm(oft_data, output.to(oft_data.dtype))
w_data = torch.transpose(w_data, 0, 1)
w_data = output.to(oft_data.dtype).to(oft_data.device)
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
self.get_base_layer().weight = bnb.nn.Int8Params(
w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
).to(weight.device)
state.reset_grads()
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.oft_R.keys():
continue
warnings.warn(
"Unmerge oft module to 8-bit linear may get different generations due to rounding errors."
)
weight = self.get_base_layer().weight
state = self.get_base_layer().state
if state.SCB is None:
state.SCB = weight.SCB
output = dequantize_bnb_weight(weight, state=state)
oft_data = self.get_delta_weight(active_adapter)
output = torch.transpose(output, 0, 1)
w_data = torch.mm(oft_data.t(), output.to(oft_data.dtype))
w_data = torch.transpose(w_data, 0, 1)
w_data = w_data.to(oft_data.dtype).to(oft_data.device)
self.get_base_layer().weight = bnb.nn.Int8Params(
w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights
).to(weight.device)
state.reset_grads()
def get_delta_weight(self, adapter):
return self.oft_R[adapter].get_weight()
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
for active_adapter in self.active_adapters:
if active_adapter not in self.oft_R.keys():
continue
oft_R = self.oft_R[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = x.dtype
x = self._cast_input_dtype(x, oft_R.weight.dtype)
x = oft_R(x)
if requires_conversion:
x = x.to(expected_dtype)
result = self.base_layer(x, *args, **kwargs)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "oft." + rep
def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
loaded_in_8bit = kwargs.get("loaded_in_8bit", False)
if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt):
eightbit_kwargs = kwargs.copy()
eightbit_kwargs.update(
{
"has_fp16_weights": target.state.has_fp16_weights,
"threshold": target.state.threshold,
"index": target.index,
}
)
new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs)
return new_module
if is_bnb_4bit_available():
class Linear4bit(torch.nn.Module, OFTLayer):
# OFT implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
r: int = 8,
oft_block_size: int = 0,
module_dropout: float = 0.0,
coft: bool = False,
eps: float = 6e-5,
block_share: bool = False,
init_weights: bool = True,
use_cayley_neumann: bool = False,
num_cayley_neumann_terms: int = 5,
**kwargs,
) -> None:
super().__init__()
OFTLayer.__init__(self, base_layer)
self.fan_in_fan_out = False
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
oft_block_size=oft_block_size,
module_dropout=module_dropout,
coft=coft,
eps=eps,
block_share=block_share,
init_weights=init_weights,
use_cayley_neumann=use_cayley_neumann,
num_cayley_neumann_terms=num_cayley_neumann_terms,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter not in self.oft_R.keys():
continue
warnings.warn("Merge oft module to 4-bit linear may get different generations due to rounding errors.")
# Refer to https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930
weight = self.get_base_layer().weight
kwargs = weight.__dict__
output = dequantize_bnb_weight(weight, state=weight.quant_state)
oft_data = self.get_delta_weight(active_adapter)
output = torch.transpose(output, 0, 1)
w_data = torch.mm(oft_data, output.to(oft_data.dtype))
w_data = torch.transpose(w_data, 0, 1)
w_data = output.to(oft_data.dtype).to(oft_data.device)
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
if "bnb_quantized" in kwargs:
kwargs["bnb_quantized"] = False
kwargs["requires_grad"] = False
kwargs.pop("data", None)
# torch.compile can introduce attributes preceded by '_', remove them
kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")}
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), **kwargs).to(weight.device)
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.oft_R.keys():
continue
warnings.warn(
"Unmerge oft module to 4-bit linear may get different generations due to rounding errors."
)
weight = self.get_base_layer().weight
kwargs = weight.__dict__
output = dequantize_bnb_weight(weight, state=weight.quant_state)
oft_data = self.get_delta_weight(active_adapter)
output = torch.transpose(output, 0, 1)
w_data = torch.mm(oft_data.t(), output.to(oft_data.dtype))
w_data = torch.transpose(w_data, 0, 1)
w_data = output.to(oft_data.dtype).to(oft_data.device)
if "bnb_quantized" in kwargs:
kwargs["bnb_quantized"] = False
kwargs["requires_grad"] = False
kwargs.pop("data", None)
self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), **kwargs).to(weight.device)
def get_delta_weight(self, adapter):
return self.oft_R[adapter].get_weight()
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
# As per Tim Dettmers, for 4bit, we need to defensively clone here.
# The reason is that in some cases, an error can occur that backprop
# does not work on a manipulated view. This issue may be solved with
# newer PyTorch versions but this would need extensive testing to be
# sure.
# result = result.clone()
for active_adapter in self.active_adapters:
if active_adapter not in self.oft_R.keys():
continue
oft_R = self.oft_R[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = x.dtype
x = self._cast_input_dtype(x, oft_R.weight.dtype)
x = oft_R(x)
if requires_conversion:
x = x.to(expected_dtype)
result = self.base_layer(x, *args, **kwargs)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "oft." + rep
def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
loaded_in_4bit = kwargs.get("loaded_in_4bit", False)
if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit):
fourbit_kwargs = kwargs.copy()
fourbit_kwargs.update(
{
"compute_dtype": target_base_layer.compute_dtype,
"compress_statistics": target_base_layer.weight.compress_statistics,
"quant_type": target_base_layer.weight.quant_type,
}
)
new_module = Linear4bit(target, adapter_name, **fourbit_kwargs)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/oft/bnb.py",
"license": "Apache License 2.0",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/oft/eetq.py | # Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from peft.import_utils import is_eetq_available
from peft.tuners.oft.layer import OFTLayer
from peft.tuners.tuners_utils import BaseTunerLayer
if is_eetq_available():
from eetq import EetqLinear
class EetqOFTLinear(torch.nn.Module, OFTLayer):
def __init__(
self,
base_layer,
adapter_name,
r: int = 0,
oft_block_size: int = 0,
module_dropout: float = 0.0,
init_weights: bool = True,
coft: bool = False,
eps: float = 6e-5,
block_share: bool = False,
use_cayley_neumann: bool = False,
num_cayley_neumann_terms: int = 5,
fan_in_fan_out: bool = False,
**kwargs,
):
super().__init__()
OFTLayer.__init__(self, base_layer)
# self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
# for backwards compatibility
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
oft_block_size=oft_block_size,
module_dropout=module_dropout,
init_weights=init_weights,
coft=coft,
eps=eps,
block_share=block_share,
fan_in_fan_out=fan_in_fan_out,
use_cayley_neumann=use_cayley_neumann,
num_cayley_neumann_terms=num_cayley_neumann_terms,
)
def forward(self, x: torch.Tensor):
if self.disable_adapters:
return self.quant_linear_module(x)
for active_adapter in self.active_adapters:
if active_adapter not in self.oft_R.keys():
continue
oft_R = self.oft_R[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = x.dtype
x = self._cast_input_dtype(x, oft_R.weight.dtype)
x = oft_R(x)
result = self.quant_linear_module(x)
if requires_conversion:
result = result.to(expected_dtype)
return result
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
raise AttributeError("Merging LoRA layers is not supported for Eetq layers.")
def unmerge(self) -> None:
raise AttributeError("Unmerging LoRA layers is not supported for Eetq layers.")
def __repr__(self) -> str:
rep = super().__repr__()
return "oft." + rep
def dispatch_eetq(
target: torch.nn.Module,
adapter_name: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_eetq_available() and isinstance(target_base_layer, EetqLinear):
new_module = EetqOFTLinear(target, adapter_name, **kwargs)
target.weight = target_base_layer.weight
if hasattr(target, "bias"):
target.bias = target_base_layer.bias
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/oft/eetq.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/oft/gptq.py | # Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from peft.import_utils import is_gptqmodel_available
from peft.tuners.oft.layer import OFTLayer
from peft.tuners.tuners_utils import BaseTunerLayer
class GPTQOFTLinear(torch.nn.Module, OFTLayer):
def __init__(
self,
base_layer,
adapter_name: str,
r: int = 8,
oft_block_size: int = 0,
module_dropout: float = 0.0,
coft: bool = False,
eps: float = 6e-5,
block_share: bool = False,
use_cayley_neumann: bool = False,
num_cayley_neumann_terms: int = 5,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
init_weights: bool = True,
**kwargs,
):
super().__init__()
OFTLayer.__init__(self, base_layer)
# self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
# for backwards compatibility
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
oft_block_size=oft_block_size,
module_dropout=module_dropout,
coft=coft,
eps=eps,
block_share=block_share,
init_weights=init_weights,
use_cayley_neumann=use_cayley_neumann,
num_cayley_neumann_terms=num_cayley_neumann_terms,
)
def forward(self, x: torch.Tensor):
# note: logic differs from default Linear because merging is not supported
if self.disable_adapters:
return self.quant_linear_module(x)
for active_adapter in self.active_adapters:
if active_adapter not in self.oft_R.keys():
continue
oft_R = self.oft_R[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = x.dtype
x = self._cast_input_dtype(x, oft_R.weight.dtype)
x = oft_R(x)
if requires_conversion:
x = x.to(expected_dtype)
result = self.quant_linear_module(x)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "oft." + rep
def dispatch_gptq(
target: torch.nn.Module,
adapter_name: str,
**kwargs: Any,
) -> Optional[torch.nn.Module]:
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_gptqmodel_available():
from gptqmodel.nn_modules.qlinear import BaseQuantLinear
if isinstance(target_base_layer, BaseQuantLinear):
new_module = GPTQOFTLinear(target, adapter_name, **kwargs)
target.qweight = target_base_layer.qweight
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/oft/gptq.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/oft/hqq.py | # Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import warnings
from typing import Optional
import torch
from peft.import_utils import is_hqq_available
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
from .layer import OFTLayer
if is_hqq_available():
from hqq.core.quantize import HQQLinear
class HqqOFTLinear(torch.nn.Module, OFTLayer):
# Lora implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
r: int = 8,
oft_block_size: int = 0,
module_dropout: float = 0.0,
init_weights: bool = True,
coft: bool = False,
eps: float = 6e-5,
block_share: bool = False,
use_cayley_neumann: bool = False,
num_cayley_neumann_terms: int = 5,
**kwargs,
) -> None:
super().__init__()
OFTLayer.__init__(self, base_layer)
self.fan_in_fan_out = False
self._active_adapter = adapter_name
self.update_layer(
adapter_name,
r,
oft_block_size=oft_block_size,
module_dropout=module_dropout,
init_weights=init_weights,
coft=coft,
eps=eps,
block_share=block_share,
use_cayley_neumann=use_cayley_neumann,
num_cayley_neumann_terms=num_cayley_neumann_terms,
)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged.
Defaults to `None`.
"""
adapter_names = check_adapters_to_merge(self, adapter_names)
if not adapter_names:
# no adapter to merge
return
for active_adapter in adapter_names:
if active_adapter not in self.lora_A.keys():
continue
layer = self.get_base_layer()
quant_config = {**copy.deepcopy(layer.quant_config), "offload_meta": layer.offload_meta}
output = layer.dequantize()
oft_data = self.get_delta_weight(active_adapter)
output = torch.transpose(output, 0, 1)
w_data = torch.mm(oft_data, output.to(oft_data.dtype))
w_data = torch.transpose(w_data, 0, 1)
w_data = output.to(oft_data.dtype).to(oft_data.device)
if safe_merge and not torch.isfinite(w_data).all():
raise ValueError(
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
)
new_hqq_layer = HQQLinear(None, quant_config, compute_dtype=layer.compute_dtype, device=layer.device)
quant_config.pop("offload_meta", None)
new_hqq_layer.quantize(w_data, **quant_config)
self.base_layer = new_hqq_layer
self.merged_adapters.append(active_adapter)
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
while len(self.merged_adapters) > 0:
active_adapter = self.merged_adapters.pop()
if active_adapter not in self.oft_R.keys():
continue
layer = self.get_base_layer()
quant_config = {**copy.deepcopy(layer.quant_config), "offload_meta": layer.offload_meta}
output = layer.dequantize()
oft_data = self.get_delta_weight(active_adapter)
output = torch.transpose(output, 0, 1)
w_data = torch.mm(oft_data.t(), output.to(oft_data.dtype))
w_data = torch.transpose(w_data, 0, 1)
w_data = w_data.to(oft_data.dtype).to(oft_data.device)
new_hqq_layer = HQQLinear(None, quant_config, compute_dtype=layer.compute_dtype, device=layer.device)
quant_config.pop("offload_meta", None)
new_hqq_layer.quantize(w_data, **quant_config)
self.base_layer = new_hqq_layer
def get_delta_weight(self, adapter):
return self.oft_R[adapter].get_weight()
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
self._check_forward_args(x, *args, **kwargs)
adapter_names = kwargs.pop("adapter_names", None)
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
for active_adapter in self.active_adapters:
if active_adapter not in self.oft_R.keys():
continue
oft_R = self.oft_R[active_adapter]
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = x.dtype
x = self._cast_input_dtype(x, oft_R.weight.dtype)
x = oft_R(x)
result = self.base_layer(x, *args, **kwargs)
if requires_conversion:
result = result.to(expected_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "oft." + rep
def dispatch_hqq(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_hqq_available() and isinstance(target_base_layer, HQQLinear):
new_module = HqqOFTLinear(target_base_layer, adapter_name, **kwargs)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/oft/hqq.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:src/peft/tuners/oft/inc.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: PEFT tests related to INC are handled under Optimum-Habana repository:
# - LLMs: https://github.com/huggingface/optimum-habana/blob/main/tests/test_peft_inference.py
# - Diffusers: https://github.com/huggingface/optimum-habana/blob/main/tests/test_diffusers.py
from typing import Optional
import torch
from peft.import_utils import is_inc_available
from peft.tuners.tuners_utils import BaseTunerLayer
from .layer import Linear
if is_inc_available():
class IncOFTLinear(Linear):
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
**kwargs,
):
super().__init__(base_layer, adapter_name, **kwargs)
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
"""
Merge the active adapter weights into the base weights
Args:
safe_merge (`bool`, *optional*):
If True, the merge operation will be performed in a copy of the original weights and check for NaNs
before merging the weights. This is useful if you want to check if the merge operation will produce
NaNs. Defaults to `False`.
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged.
Defaults to `None`.
"""
raise NotImplementedError("Merging OFT with INC layers is not yet implemented")
def unmerge(self) -> None:
"""
This method unmerges all merged adapter layers from the base weights.
"""
raise NotImplementedError("Unmerging OFT from INC layers is not yet implemented")
def dispatch_inc(target: torch.nn.Module, adapter_name: str, **kwargs):
new_module = None
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if is_inc_available():
from neural_compressor.torch.algorithms.fp8_quant._quant_common.helper_modules import (
PatchedLinear,
)
if isinstance(target_base_layer, PatchedLinear):
new_module = IncOFTLinear(target, adapter_name, **kwargs)
return new_module
| {
"repo_id": "huggingface/peft",
"file_path": "src/peft/tuners/oft/inc.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/peft:examples/qalora_finetuning/qalora_gptq_finetuning.py | #!/usr/bin/env python3
"""
Training script for fine-tuning language models with QALoRA using GPTQ quantization.
This script supports cached quantization to avoid repeating expensive quantization processes.
"""
import argparse
import os
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
GPTQConfig,
Trainer,
TrainingArguments,
)
from peft import LoraConfig, get_peft_model
def load_or_quantize_model(
base_model: str, tokenizer, bits: int = 4, cache_dir: str = "./quantized_models"
) -> AutoModelForCausalLM:
"""
Load a pre-quantized model from cache or quantize and cache a new one.
Automatically detects if the model is already GPTQ-quantized.
Args:
base_model: Model identifier or path
tokenizer: Tokenizer for the model
bits: Bit-width for quantization (default: 4)
cache_dir: Directory to store quantized models
Returns:
The loaded (quantized) model
"""
# First, check if the model is already GPTQ-quantized by trying to load it
print(f"Checking if {base_model} is already GPTQ-quantized...")
try:
# Try to load the model and check if it has GPTQ quantization
test_model = AutoModelForCausalLM.from_pretrained(
base_model,
device_map="auto",
dtype=torch.float16,
trust_remote_code=True, # Some GPTQ models might need this
)
# Check if the model has GPTQ quantization attributes
has_gptq = False
for module in test_model.modules():
if hasattr(module, "qweight") or hasattr(module, "qzeros") or "gptq" in str(type(module)).lower():
has_gptq = True
break
if has_gptq:
print(f"✅ Model {base_model} is already GPTQ-quantized. Using directly.")
return test_model
else:
print(f"Model {base_model} is not GPTQ-quantized. Will quantize it.")
# Clean up the test model to free memory
del test_model
if torch.cuda.is_available():
torch.cuda.empty_cache()
elif torch.xpu.is_available():
torch.xpu.empty_cache()
except Exception as e:
print(f"Could not load model {base_model} directly: {e}")
print("Will attempt to quantize it...")
# If we get here, the model needs to be quantized
os.makedirs(cache_dir, exist_ok=True)
model_id = base_model.replace("/", "_").replace("\\", "_") # Handle Windows paths too
quantized_model_path = os.path.join(cache_dir, f"{model_id}_gptq_{bits}bit")
# Check if we already have a cached quantized version
if os.path.exists(quantized_model_path) and os.path.exists(os.path.join(quantized_model_path, "config.json")):
print(f"Loading pre-quantized model from cache: {quantized_model_path}")
return AutoModelForCausalLM.from_pretrained(quantized_model_path, device_map="auto")
print(f"Quantizing model and saving to cache: {quantized_model_path}")
# Configure GPTQ for first-time quantization
gptq_config = GPTQConfig(
bits=bits,
dataset="c4",
tokenizer=tokenizer,
group_size=128,
desc_act=False,
sym=False,
)
# Load and quantize the model
model = AutoModelForCausalLM.from_pretrained(
base_model, device_map="auto", quantization_config=gptq_config, dtype=torch.float16
)
# Save the quantized model to cache
print(f"Saving quantized model to {quantized_model_path}")
model.save_pretrained(quantized_model_path)
tokenizer.save_pretrained(quantized_model_path)
return model
def tokenize_and_preprocess(examples, tokenizer, max_length: int = 128):
"""
Tokenize text data and prepare it for language modeling.
Args:
examples: Dataset examples with 'text' field
tokenizer: Tokenizer to use
max_length: Maximum sequence length
Returns:
Processed examples with input_ids and labels
"""
# Tokenize the text with truncation and padding
tokenized_output = tokenizer(examples["text"], truncation=True, padding="max_length", max_length=max_length)
# Preprocess labels (set pad tokens to -100 for loss masking)
labels = tokenized_output["input_ids"].copy()
labels = [[-100 if token == tokenizer.pad_token_id else token for token in seq] for seq in labels]
tokenized_output["labels"] = labels
return tokenized_output
def train_model(
base_model: str,
data_path: str,
data_split: str,
output_dir: str,
batch_size: int,
num_epochs: int,
learning_rate: float,
cutoff_len: int,
use_qalora: bool,
eval_step: int,
save_step: int,
device: str,
lora_r: int,
lora_alpha: int,
lora_dropout: float,
lora_target_modules: str,
push_to_hub: bool,
qalora_group_size: int,
bits: int,
) -> None:
"""
Train a model with QALoRA and GPTQ quantization.
Args:
base_model: Base model to fine-tune
data_path: Dataset path
output_dir: Directory to save model outputs
batch_size: Training batch size
num_epochs: Number of training epochs
learning_rate: Learning rate
cutoff_len: Maximum sequence length
val_set_size: Validation set size
use_dora: Whether to use DoRA
use_qalora: Whether to use QALoRA
quantize: Whether to use quantization
eval_step: Steps between evaluations
save_step: Steps between saving checkpoints
device: Device to use (cuda:0, xpu:0, etc.)
lora_r: LoRA rank
lora_alpha: LoRA alpha
lora_dropout: LoRA dropout rate
lora_target_modules: Target modules for LoRA
push_to_hub: Whether to push to Hugging Face Hub
"""
os.environ["TOKENIZERS_PARALLELISM"] = "false"
hf_token = os.getenv("HF_TOKEN")
device = torch.device(device)
print(f"Using device: {device}")
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Load or quantize model
model = load_or_quantize_model(base_model, tokenizer, bits=bits)
# Configure LoRA
target_modules = (
lora_target_modules.split(",")
if lora_target_modules
else ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
)
print("use_qalora", use_qalora)
lora_config = LoraConfig(
task_type="CAUSAL_LM",
use_qalora=use_qalora,
qalora_group_size=qalora_group_size,
r=lora_r,
lora_alpha=lora_alpha,
target_modules=target_modules,
lora_dropout=lora_dropout,
bias="none",
)
# Get PEFT model with adapters
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
# Move model to device if not already there
if not hasattr(model, "device") or model.device.type != device.type:
model = model.to(device)
# Load and prepare dataset
dataset = load_dataset(data_path, data_split)
tokenized_datasets = {
"train": dataset["train"].map(
lambda x: tokenize_and_preprocess(x, tokenizer, max_length=cutoff_len),
batched=True,
remove_columns=["text"],
load_from_cache_file=True,
),
"test": dataset["test"].map(
lambda x: tokenize_and_preprocess(x, tokenizer, max_length=cutoff_len),
batched=True,
remove_columns=["text"],
load_from_cache_file=True,
),
}
# Data collator for language modeling
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# Configure training arguments
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_steps=100,
weight_decay=0.01,
logging_dir="./logs",
logging_steps=eval_step,
save_steps=save_step,
save_total_limit=2,
push_to_hub=push_to_hub,
gradient_accumulation_steps=16,
fp16=True,
learning_rate=learning_rate,
hub_token=hf_token,
label_names=["labels"],
)
# Clear accelerator cache to free memory
if torch.cuda.is_available():
torch.cuda.empty_cache()
elif torch.xpu.is_available():
torch.xpu.empty_cache()
# Initialize trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
data_collator=data_collator,
)
# Start training
print("\nStarting training...")
trainer.train()
# Save the final model
if push_to_hub:
trainer.push_to_hub(commit_message="Fine-tuned model with QALoRA")
# Always save locally
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
print(f"\nTraining complete. Model saved to {output_dir}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Fine-tune LLMs with QALoRA and GPTQ quantization")
# Model and dataset parameters
parser.add_argument("--base_model", type=str, default="TheBloke/Llama-2-7b-GPTQ", help="Base model path or name")
parser.add_argument(
"--data_path", type=str, default="timdettmers/openassistant-guanaco", help="Dataset path or name"
)
parser.add_argument("--data_split", type=str, default="", help="Dataset path or name")
parser.add_argument(
"--output_dir", type=str, default="./qalora_output", help="Output directory for the fine-tuned model"
)
parser.add_argument("--bits", type=int, default=4, help="Init quantization bits")
# Training parameters
parser.add_argument("--batch_size", type=int, default=4, help="Batch size")
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=3e-4, help="Learning rate")
parser.add_argument("--cutoff_len", type=int, default=128, help="Max sequence length")
# Adapter configuration
parser.add_argument("--use_qalora", action="store_true", help="Apply QALoRA")
parser.add_argument("--qalora_group_size", type=int, default=32, help="LoRA rank")
parser.add_argument("--lora_r", type=int, default=8, help="LoRA rank")
parser.add_argument("--lora_alpha", type=int, default=16, help="LoRA alpha")
parser.add_argument("--lora_dropout", type=float, default=0.05, help="LoRA dropout rate")
parser.add_argument(
"--lora_target_modules", type=str, default=None, help="Comma-separated list of target modules for LoRA"
)
# Training process options
parser.add_argument("--eval_step", type=int, default=100, help="Evaluation step interval")
parser.add_argument("--save_step", type=int, default=500, help="Save step interval")
parser.add_argument("--device", type=str, default="auto", help="Device to use for training")
# Hugging Face Hub options
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to Hugging Face Hub")
args = parser.parse_args()
device = args.device
if args.device == "auto":
device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
# If use_qalora isn't explicitly set in args but passed to train_model
if not args.use_qalora:
args.use_qalora = True # Default to True as in the original code
train_model(
base_model=args.base_model,
data_path=args.data_path,
data_split=args.data_split,
output_dir=args.output_dir,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
learning_rate=args.learning_rate,
cutoff_len=args.cutoff_len,
use_qalora=args.use_qalora,
eval_step=args.eval_step,
save_step=args.save_step,
device=device,
lora_r=args.lora_r,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
lora_target_modules=args.lora_target_modules,
push_to_hub=args.push_to_hub,
qalora_group_size=args.qalora_group_size,
bits=args.bits,
)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/qalora_finetuning/qalora_gptq_finetuning.py",
"license": "Apache License 2.0",
"lines": 303,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/peft:method_comparison/test_sanitizer.py | import pandas as pd
import pytest
from .sanitizer import parse_and_filter
@pytest.fixture
def df_products():
data = {
'product_id': [101, 102, 103, 104, 105, 106],
'category': ['Electronics', 'Books', 'Electronics', 'Home Goods', 'Books', 'Electronics'],
'price': [799.99, 19.99, 49.50, 120.00, 24.99, 150.00],
'stock': [15, 300, 50, 25, 150, 0]
}
return pd.DataFrame(data)
def test_exploit_fails(df_products):
with pytest.raises(ValueError) as e:
mask1 = parse_and_filter(df_products,
"""price < 50 and @os.system("/bin/echo password")""")
assert 'Invalid filter syntax' in str(e)
@pytest.mark.parametrize('expression,ids', [
("price < 50", [102, 103, 105]),
("product_id in [101, 102]", [101, 102]),
("price < 50 and category == 'Electronics'", [103]),
("stock < 100 or category == 'Home Goods'", [101, 103, 104, 106]),
("(price > 100 and stock < 20) or category == 'Books'", [101, 102, 105, 106]),
("not (price > 50 or stock > 100)", [103]),
("not price > 50", [102, 103, 105]),
("(price < 50) & (category == 'Electronics')", [103]),
("(stock < 100) | (category == 'Home Goods')", [101, 103, 104, 106]),
])
def test_operations(df_products, expression, ids):
mask1 = parse_and_filter(df_products, expression)
assert sorted(df_products[mask1].product_id) == sorted(ids)
| {
"repo_id": "huggingface/peft",
"file_path": "method_comparison/test_sanitizer.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:tests/test_lora_variants.py | # Copyright 2025-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from transformers import AutoModelForCausalLM
from peft import LoraConfig, TaskType, get_peft_model
from peft.tuners.lora.layer import Conv1d as LoraConv1d
from peft.tuners.lora.layer import Conv2d as LoraConv2d
from peft.tuners.lora.layer import Embedding as LoraEmbedding
from peft.tuners.lora.layer import Linear as LoraLinear
from peft.tuners.lora.variants import (
ALoraLinearVariant,
DoraConv1dVariant,
DoraConv2dVariant,
DoraEmbeddingVariant,
DoraLinearVariant,
calculate_alora_offsets,
get_alora_offsets_for_forward,
get_alora_offsets_for_generate,
)
from .testing_common import hub_online_once
# Custom model featuring embeddings and a 'visual stack'
class CustomModel(nn.Module):
"""pytorch module that contains common targetable layers (linear, embedding, conv, ...)"""
def __init__(self, num_embeddings=100, embedding_dim=16, num_classes=10):
super().__init__()
self.embedding = nn.Embedding(num_embeddings, embedding_dim)
self.conv1d = nn.Conv1d(in_channels=embedding_dim, out_channels=32, kernel_size=3, padding=1)
self.conv2d = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1)
self.flatten = nn.Flatten()
self.dummy_conv1d_output_dim = 32 * 10
self.dummy_conv2d_output_dim = 16 * 10 * 10
self.linear1 = nn.Linear(self.dummy_conv1d_output_dim + self.dummy_conv2d_output_dim, 64)
self.linear2 = nn.Linear(64, num_classes)
self.relu = nn.ReLU()
def forward(self, input_ids, dummy_image_input):
# Path 1: Embedding -> Conv1d
x1 = self.embedding(input_ids) # (batch_size, seq_len, embedding_dim)
x1 = x1.transpose(1, 2) # (batch_size, embedding_dim, seq_len)
x1 = self.relu(self.conv1d(x1)) # (batch_size, 32, seq_len)
x1_flat = self.flatten(x1)
# Path 2: Conv2d -> Linear
x2 = self.relu(self.conv2d(dummy_image_input)) # (batch_size, 16, H, W)
x2_flat = self.flatten(x2) # (batch_size, 16*H*W)
# Combine or select paths if making a functional model.
# For this test, we mainly care about layer types, so forward might not be fully executed.
# Let's use x2_flat for subsequent linear layers.
output = self.relu(self.linear1(torch.concat([x1_flat, x2_flat], dim=1)))
output = self.linear2(output)
return output
# Used for testing alora_offsets for aLoRA
class DummyLM(nn.Module):
def __init__(self, vocab_size: int = 10, hidden_dim: int = 8):
super().__init__()
self.embed = nn.Embedding(vocab_size, hidden_dim)
self.linear = nn.Linear(hidden_dim, vocab_size)
def prepare_inputs_for_generation(self, *args, **kwargs):
return kwargs
def forward(self, X=None, embeds=None, num_beams=None, alora_offsets=None):
if X is not None:
embeds = self.embed(X)
return self.linear(embeds)
class MockTransformerWrapper:
"""Mock class to behave like a transformers model.
This is needed because the tests initialize the model by calling transformers_class.from_pretrained.
"""
@classmethod
def from_pretrained(cls):
# set the seed so that from_pretrained always returns the same model
torch.manual_seed(0)
dtype = torch.float32
return DummyLM().to(dtype)
VARIANT_MAP = {
"dora": {
LoraLinear: DoraLinearVariant,
LoraEmbedding: DoraEmbeddingVariant,
LoraConv1d: DoraConv1dVariant,
LoraConv2d: DoraConv2dVariant,
},
"alora": {
LoraLinear: ALoraLinearVariant,
},
}
TEST_CASES = [
(
"dora",
LoraConfig,
{"target_modules": ["linear1", "linear2", "conv1d", "conv2d", "embedding"], "use_dora": True},
),
(
"alora",
LoraConfig,
{"target_modules": ["linear1", "linear2"], "alora_invocation_tokens": [1]},
),
]
class TestLoraVariants:
@pytest.mark.parametrize("variant_name, config_cls, config_kwargs", TEST_CASES)
def test_variant_is_applied_to_layers(self, variant_name, config_cls, config_kwargs):
# This test assumes that targeting and replacing layers works and that after `get_peft_model` we
# have a model with LoRA layers. We just make sure that each LoRA layer has its variant set and
# it is also the correct variant for that layer.
base_model = CustomModel()
peft_config = config_cls(**config_kwargs)
peft_model = get_peft_model(base_model, peft_config)
layer_type_map = VARIANT_MAP[variant_name]
for _, module in peft_model.named_modules():
if not hasattr(module, "lora_variant"):
continue
# Note that not every variant supports every layer. If it is not mapped it is deemed unsupported and
# will not be tested.
expected_variant_type = layer_type_map.get(type(module), None)
if not expected_variant_type:
continue
assert isinstance(module.lora_variant["default"], expected_variant_type)
def custom_model_with_loss_backpropagated(self, peft_config):
"""Returns the CustomModel + PEFT model instance with a dummy loss that was backpropagated once."""
base_model = CustomModel()
peft_model = get_peft_model(base_model, peft_config)
x, y = torch.ones(10, 10).long(), torch.ones(10, 1, 10, 10)
out = peft_model(x, y)
loss = out.sum()
loss.backward()
return base_model, peft_model
def test_dora_params_have_gradients(self):
"""Ensure that the parameters added by the DoRA variant are participating in the output computation."""
layer_names = ["linear1", "linear2", "conv1d", "conv2d", "embedding"]
peft_config = LoraConfig(target_modules=layer_names, use_dora=True)
base_model, peft_model = self.custom_model_with_loss_backpropagated(peft_config)
for layer in layer_names:
assert getattr(peft_model.base_model.model, layer).lora_magnitude_vector["default"].weight.grad is not None
class TestActivatedLora:
@pytest.mark.parametrize(
"input_ids, alora_invocation_tokens, expected_offsets",
[
([[0, 1, 2, 3], [0, 4, 5, 6]], [1, 2], [3, None]),
([[1, 2, 1, 2], [0, 4, 1, 2]], [1, 2], [2, 2]),
([[1, 2, 3, 4], [0, 4, 1, 4]], [1, 2], [4, None]),
([[1, 2, 3, 4]], None, [None]),
],
)
# Verify alora_offsets are calculated correctly
def test_calculate_alora_offsets(self, input_ids, alora_invocation_tokens, expected_offsets):
config = LoraConfig(task_type=TaskType.CAUSAL_LM, alora_invocation_tokens=alora_invocation_tokens)
peft_config = {"default": config}
# compute offsets
offsets = calculate_alora_offsets(peft_config, "default", torch.tensor(input_ids))
assert offsets == expected_offsets
@pytest.mark.parametrize(
"input_ids, alora_invocations, expected_offsets",
[
([[0, 1, 1], [0, 2, 2]], {"a1": [1], "a2": [2]}, [1, 1]),
([[0, 1, 1], [0, 2, 2]], {"a1": [1], "a2": None}, [1, None]),
],
)
# Verify alora_offsets are correct with adapter names
def test_calculate_alora_offsets_with_adapter_names(self, input_ids, alora_invocations, expected_offsets):
peft_config = {}
for alora_name in alora_invocations.keys():
peft_config[alora_name] = LoraConfig(alora_invocation_tokens=alora_invocations[alora_name])
adapter_names = list(alora_invocations.keys())
offsets = calculate_alora_offsets(
peft_config, adapter_names[0], torch.tensor(input_ids), adapter_names=adapter_names
)
assert offsets == expected_offsets
# Verify that the adapter does not modify outputs prior to invocation point
def test_alora_activation_matches_base_until_invocation(self):
transformers_class = MockTransformerWrapper
base_model = transformers_class.from_pretrained()
cfg = LoraConfig(target_modules=["linear"], alora_invocation_tokens=[2], init_lora_weights=False)
lora_model = get_peft_model(base_model, cfg)
lora_model.eval()
input_ids = torch.tensor([[0, 1, 2, 3]])
start = 2
with lora_model.disable_adapter():
with torch.no_grad():
base_out = lora_model(X=input_ids)
kwargs = get_alora_offsets_for_forward(lora_model, input_ids)
with torch.no_grad():
lora_out = lora_model(X=input_ids, **kwargs)
assert torch.allclose(lora_out[:, :start], base_out[:, :start])
assert not torch.allclose(lora_out[:, start:], base_out[:, start:])
# Verify that warning is given for alora when providing embeddings only
def test_input_embeds_warning(self):
transformers_class = MockTransformerWrapper
base_model = transformers_class.from_pretrained()
cfg = LoraConfig(
task_type=TaskType.CAUSAL_LM,
target_modules=["linear"],
alora_invocation_tokens=[2],
init_lora_weights=False,
)
lora_model = get_peft_model(base_model, cfg)
lora_model.eval()
input_ids = torch.tensor([[0, 1, 2, 3]])
input_embeds = base_model.embed(input_ids)
with pytest.warns(
UserWarning,
match="Cannot calculate aLoRA offsets when only inputs_embeds are provided. Disabling aLoRA for this forward pass.",
):
kwargs = get_alora_offsets_for_forward(lora_model, inputs_embeds=input_embeds)
assert kwargs.get("alora_offsets") is None
with pytest.warns(
UserWarning,
match="Cannot calculate aLoRA offsets during generate as input_ids are not available. Disabling aLoRA.",
):
kwargs = get_alora_offsets_for_generate(lora_model, inputs_embeds=input_embeds)
assert kwargs.get("alora_offsets") is None
# Verify that error is raised when requesting num_beams > 1 for alora
def test_num_beams_error(self):
transformers_class = MockTransformerWrapper
base_model = transformers_class.from_pretrained()
cfg = LoraConfig(target_modules=["linear"], alora_invocation_tokens=[2], init_lora_weights=False)
lora_model = get_peft_model(base_model, cfg)
lora_model.eval()
input_ids = torch.tensor([[0, 1, 2, 3]])
with pytest.raises(ValueError) as e:
with torch.no_grad():
lora_out = lora_model(X=input_ids, num_beams=2, alora_offsets=[3])
assert "Beam search not yet supported for aLoRA." in str(e.value)
def test_gradient_checkpointing_double_forward_raises(self):
model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM"
with hub_online_once(model_id):
base_model = AutoModelForCausalLM.from_pretrained(model_id)
cfg = LoraConfig(task_type=TaskType.CAUSAL_LM, target_modules="all-linear", alora_invocation_tokens=[0])
lora_model = get_peft_model(base_model, cfg)
lora_model.train()
lora_model.prepare_model_for_gradient_checkpointing(lora_model)
lora_model.gradient_checkpointing_enable()
inputs = {"input_ids": torch.tensor([[0, 1, 2, 3]])}
lora_model.forward(**inputs)
with pytest.raises(ValueError, match="Multiple invocations of PEFT forward hooks.*"):
lora_model.forward(**inputs)
def test_gradient_checkpointing_dpo_doesnt_raise(self):
model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM"
with hub_online_once(model_id):
base_model = AutoModelForCausalLM.from_pretrained(model_id)
cfg = LoraConfig(task_type=TaskType.CAUSAL_LM, target_modules="all-linear", alora_invocation_tokens=[0])
lora_model = get_peft_model(base_model, cfg)
lora_model.train()
lora_model.prepare_model_for_gradient_checkpointing(lora_model)
lora_model.gradient_checkpointing_enable()
inputs = {"input_ids": torch.tensor([[0, 1, 2, 3]])}
with lora_model.disable_adapter():
lora_model.forward(**inputs)
lora_model.forward(**inputs)
| {
"repo_id": "huggingface/peft",
"file_path": "tests/test_lora_variants.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/peft:examples/randlora_finetuning/randlora_finetuning.py | # This script is based on examples/dora_finetuning/dora_finetuning.py
import os
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import LoraConfig, RandLoraConfig, get_peft_model, prepare_model_for_kbit_training
def train_model(
base_model: str,
data_path: str,
output_dir: str,
batch_size: int,
num_epochs: int,
learning_rate: float,
cutoff_len: int,
val_set_size: int,
use_lora: bool,
quantize: bool,
eval_step: int,
save_step: int,
device: str,
rank: int,
randlora_alpha: int,
randlora_dropout: float,
randlora_target_modules: str,
hub_model_id: str,
push_to_hub: bool,
sparse: bool,
very_sparse: bool,
):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
hf_token = os.getenv("HF_TOKEN")
# Setup device
device = torch.device(device)
print(f"Using device: {device}")
# load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token)
# Compute type
device_type = device.type
device_module = getattr(torch, device_type, torch.cuda)
bf16_suppotrted = device_module.is_available() and device_module.is_bf16_supported()
dtype = torch.bfloat16 if bf16_suppotrted else torch.float16
# QRandLora (quantized randlora): IF YOU WANNA QUANTIZE THE MODEL
if quantize:
model = AutoModelForCausalLM.from_pretrained(
base_model,
token=hf_token,
quantization_config=BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.bfloat16 if bf16_suppotrted else torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
),
dtype=dtype,
)
# setup for quantized training
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True)
else:
model = AutoModelForCausalLM.from_pretrained(
base_model,
dtype=dtype,
token=hf_token,
)
# LoRa config for the PEFT model
if use_lora:
peft_config = LoraConfig(
r=rank, # Rank of matrix
lora_alpha=randlora_alpha,
target_modules=(randlora_target_modules.split(",") if randlora_target_modules else ["k_proj", "v_proj"]),
lora_dropout=randlora_dropout,
bias="none",
)
else:
peft_config = RandLoraConfig(
r=rank, # Rank of random bases
randlora_alpha=randlora_alpha,
target_modules=(randlora_target_modules.split(",") if randlora_target_modules else ["k_proj", "v_proj"]),
randlora_dropout=randlora_dropout,
bias="none",
sparse=sparse,
very_sparse=very_sparse,
)
# get the peft model with RandLora config
model = get_peft_model(model, peft_config)
model.to(device) # MODEL TO ACCELERATOR
tokenizer.pad_token = tokenizer.eos_token
# Load the dataset
dataset = load_dataset(data_path)
def tokenize_function(examples):
inputs = tokenizer(examples["text"], padding="max_length", truncation=True, max_length=cutoff_len)
inputs["labels"] = inputs["input_ids"].copy() # setting labels for a language modeling task
return inputs
# Tokenize the dataset and prepare for training
tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=dataset["train"].column_names)
# Data collator to dynamically pad the batched examples
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# Compute the total amount of training step for warmup
max_steps = int((len(dataset) // batch_size) * num_epochs)
# Define training arguments
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_steps=int(max_steps * 0.1), # 10% of total trainig steps
weight_decay=0.01,
logging_dir="./logs",
logging_steps=eval_step,
save_steps=save_step,
save_total_limit=2,
push_to_hub=push_to_hub,
hub_model_id=hub_model_id,
gradient_accumulation_steps=16
// batch_size, # Maintaining a minimum batch size of 16 post accumulation is recommended to ensure good performance
learning_rate=learning_rate,
hub_token=hf_token,
label_names=["labels"],
)
# Clear accelerator cache to free memory
device_module.empty_cache()
# Initialize the Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"],
data_collator=data_collator,
)
# Start model training
trainer.train()
# Save and push the trained model and tokenizer
if push_to_hub:
# Push the main model to the hub
trainer.push_to_hub(commit_message="Fine-tuned model")
# Save the model and tokenizer locally
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Fine-tune LLaMA with DoRA and PEFT")
parser.add_argument("--base_model", type=str, default="huggyllama/llama-7b", help="Base model path or name")
parser.add_argument(
"--data_path", type=str, default="timdettmers/openassistant-guanaco", help="Dataset path or name"
)
parser.add_argument(
"--output_dir", type=str, default="path/to/output", help="Output directory for the fine-tuned model"
)
parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--learning_rate", type=float, default=3e-4, help="Learning rate")
parser.add_argument("--cutoff_len", type=int, default=512, help="Cutoff length for tokenization")
parser.add_argument("--val_set_size", type=int, default=500, help="Validation set size")
parser.add_argument("--use_lora", action="store_true", help="Apply Lora instead of RandLora")
parser.add_argument("--quantize", action="store_true", help="Use quantization")
parser.add_argument("--eval_step", type=int, default=10, help="Evaluation step interval")
parser.add_argument("--save_step", type=int, default=100, help="Save step interval")
parser.add_argument("--device", type=str, default="auto", help="Device to use for training")
parser.add_argument("--rank", type=int, default=32, help="RandLora basis rank")
parser.add_argument("--randlora_alpha", type=int, default=640, help="RandLora alpha")
parser.add_argument("--randlora_dropout", type=float, default=0.05, help="RandLora dropout rate")
parser.add_argument(
"--randlora_target_modules", type=str, default=None, help="Comma-separated list of target modules for RandLora"
)
parser.add_argument("--sparse", action="store_true", help="Use sparse matrix multiplication")
parser.add_argument("--very_sparse", action="store_true", help="Use very sparse matrix multiplication")
parser.add_argument(
"--hub_model_id",
type=str,
default="path/to/repo",
help="Repository name to push the model on the Hugging Face Hub",
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to Hugging Face Hub")
args = parser.parse_args()
if args.device == "auto":
args.device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
train_model(
base_model=args.base_model,
data_path=args.data_path,
output_dir=args.output_dir,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
learning_rate=args.learning_rate,
cutoff_len=args.cutoff_len,
val_set_size=args.val_set_size,
use_lora=args.use_lora,
quantize=args.quantize,
eval_step=args.eval_step,
save_step=args.save_step,
device=args.device,
rank=args.rank,
randlora_alpha=args.randlora_alpha,
randlora_dropout=args.randlora_dropout,
randlora_target_modules=args.randlora_target_modules,
hub_model_id=args.hub_model_id,
push_to_hub=args.push_to_hub,
sparse=args.sparse,
very_sparse=args.very_sparse,
)
| {
"repo_id": "huggingface/peft",
"file_path": "examples/randlora_finetuning/randlora_finetuning.py",
"license": "Apache License 2.0",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:timm/task/token_distillation.py | """Token-based distillation training task for models with distillation heads."""
import logging
from typing import Dict, Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models import create_model
from timm.utils import unwrap_model
from .task import TrainingTask
_logger = logging.getLogger(__name__)
class TokenDistillationTeacher(nn.Module):
"""Wrapper for a teacher model used in token-based distillation.
Creates and manages a pre-trained teacher model for token distillation,
handling model creation and normalization differences between teacher and student.
Can be created from:
- A model name string (creates the model internally)
- An existing nn.Module (wraps it with the necessary interface)
Args:
model_name_or_module: Either a model name string or an nn.Module
num_classes: Number of output classes (required if model_name_or_module is a string)
in_chans: Number of input channels (used if model_name_or_module is a string)
pretrained_path: Optional path to pretrained weights (used if model_name_or_module is a string)
device: Device to place the model on
dtype: Model dtype (uses float32 if None)
"""
def __init__(
self,
model_name_or_module: Union[str, nn.Module],
num_classes: Optional[int] = None,
in_chans: int = 3,
pretrained_path: Optional[str] = None,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
):
super().__init__()
if isinstance(model_name_or_module, str):
_logger.info(f"Creating token distillation teacher model: '{model_name_or_module}'")
pretrained_kwargs = {'pretrained': True}
if pretrained_path:
pretrained_kwargs['pretrained_cfg_overlay'] = dict(
file=pretrained_path,
num_classes=num_classes,
)
model = create_model(
model_name=model_name_or_module,
num_classes=num_classes,
in_chans=in_chans,
device=device,
dtype=dtype,
**pretrained_kwargs,
)
elif isinstance(model_name_or_module, nn.Module):
model = model_name_or_module
else:
raise TypeError(
f"model_name_or_module must be a string or nn.Module, got {type(model_name_or_module).__name__}"
)
model.eval()
self.model = model
# Get normalization values from pretrained_cfg if available
model_unwrapped = unwrap_model(model)
if hasattr(model_unwrapped, 'pretrained_cfg'):
mean = model_unwrapped.pretrained_cfg.get('mean', (0.485, 0.456, 0.406))
std = model_unwrapped.pretrained_cfg.get('std', (0.229, 0.224, 0.225))
else:
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
mean_kd = torch.tensor(mean, device=device, dtype=dtype).view(1, -1, 1, 1)
std_kd = torch.tensor(std, device=device, dtype=dtype).view(1, -1, 1, 1)
self.register_buffer('mean_kd', mean_kd, persistent=False)
self.register_buffer('std_kd', std_kd, persistent=False)
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Forward pass through teacher model.
Args:
input: Input tensor (should already be normalized for teacher)
Returns:
Teacher logits
"""
return self.model(input)
def normalize_input(
self,
input: torch.Tensor,
student_mean: Optional[torch.Tensor] = None,
student_std: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Normalize input to match teacher's expected normalization.
Args:
input: Input tensor (already normalized for student)
student_mean: Student normalization mean buffer [1, 3, 1, 1]
student_std: Student normalization std buffer [1, 3, 1, 1]
Returns:
Input tensor normalized for the teacher model
"""
if student_mean is None or student_std is None:
return input
if torch.equal(student_mean, self.mean_kd) and torch.equal(student_std, self.std_kd):
return input
return (input * student_std + student_mean - self.mean_kd) / self.std_kd
class TokenDistillationTask(TrainingTask):
"""Token-based distillation task for models with distillation heads.
For models like DeiT that have a dedicated distillation token/head that returns
a tuple (main_logits, dist_logits) when distilled_training is enabled. The main
head is trained against ground truth labels while the distillation head matches
teacher outputs.
Supports two distillation modes:
- 'soft': KL divergence with temperature scaling (default)
- 'hard': Cross-entropy with teacher's hard predictions (argmax)
Loss weighting supports two modes:
1. Independent weights: loss = task_loss_weight * task_loss + distill_loss_weight * distill_loss
2. Complementary mode: loss = task_loss_weight * task_loss + (1 - task_loss_weight) * distill_loss
(used when only task_loss_weight is specified)
Args:
student_model: Student model with set_distilled_training() method
teacher_model: Teacher model - can be a model name string, nn.Module, or TokenDistillationTeacher
criterion: Task loss function for main head (default: CrossEntropyLoss)
teacher_pretrained_path: Path to teacher pretrained weights (used when teacher_model is a string)
distill_type: 'soft' for KL-div or 'hard' for CE with teacher argmax
distill_loss_weight: Weight for distillation loss
task_loss_weight: Weight for task loss
temperature: Softmax temperature for soft distillation (ignored for hard)
device: Device for task tensors/buffers
dtype: Dtype for task tensors/buffers
verbose: Enable info logging
Example:
>>> # With model name string (num_classes/in_chans inferred from student)
>>> task = TokenDistillationTask(
... student_model=model, teacher_model='deit_base_patch16_224',
... criterion=nn.CrossEntropyLoss(),
... distill_type='soft', temperature=3.0, task_loss_weight=0.5,
... device=torch.device('cuda'),
... )
>>> # With raw model
>>> task = TokenDistillationTask(
... student_model=model, teacher_model=my_teacher_model,
... criterion=nn.CrossEntropyLoss(),
... distill_type='hard', task_loss_weight=0.5,
... )
"""
def __init__(
self,
student_model: nn.Module,
teacher_model: Union[str, nn.Module, TokenDistillationTeacher],
criterion: Optional[nn.Module] = None,
teacher_pretrained_path: Optional[str] = None,
distill_type: str = 'soft',
distill_loss_weight: Optional[float] = None,
task_loss_weight: Optional[float] = None,
temperature: float = 1.0,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
verbose: bool = True,
):
super().__init__(device=device, dtype=dtype, verbose=verbose)
# Validate model has set_distilled_training method
student_unwrapped = unwrap_model(student_model)
if not hasattr(student_unwrapped, 'set_distilled_training'):
raise ValueError(
f"Model {student_unwrapped.__class__.__name__} does not have 'set_distilled_training' method. "
"TokenDistillationTask requires a model with a distillation head (e.g., DeiT distilled variants)."
)
# Enable distilled training mode
student_unwrapped.set_distilled_training(True)
# Handle different teacher input types
if isinstance(teacher_model, TokenDistillationTeacher):
teacher = teacher_model
elif isinstance(teacher_model, str) or isinstance(teacher_model, nn.Module):
# Get num_classes and in_chans from student
num_classes = student_unwrapped.num_classes
in_chans = student_unwrapped.in_chans
teacher = TokenDistillationTeacher(
model_name_or_module=teacher_model,
num_classes=num_classes,
in_chans=in_chans,
pretrained_path=teacher_pretrained_path,
device=self.device,
dtype=self.dtype,
)
else:
raise TypeError(
f"teacher_model must be a model name string, nn.Module, or TokenDistillationTeacher, "
f"got {type(teacher_model).__name__}"
)
self.student = student_model
self.teacher = teacher
self.criterion = criterion if criterion is not None else nn.CrossEntropyLoss()
self.distill_type = distill_type
self.temperature = temperature
if distill_type not in ('soft', 'hard'):
raise ValueError(f"Unsupported distill_type '{distill_type}'. Must be 'soft' or 'hard'.")
# Register student normalization values as non-persistent buffers
student_mean = torch.tensor(
student_unwrapped.pretrained_cfg['mean'],
device=self.device,
dtype=self.dtype,
).view(1, -1, 1, 1)
student_std = torch.tensor(
student_unwrapped.pretrained_cfg['std'],
device=self.device,
dtype=self.dtype,
).view(1, -1, 1, 1)
self.register_buffer('student_mean', student_mean, persistent=False)
self.register_buffer('student_std', student_std, persistent=False)
# Determine weighting mode
if distill_loss_weight is not None:
# Mode 1: distill_weight specified - independent weights (task defaults to 1.0 if not set)
self.distill_loss_weight = distill_loss_weight
self.task_loss_weight = task_loss_weight if task_loss_weight is not None else 1.0
if self.verbose:
_logger.info(
f"TokenDistillationTask: Independent weights - "
f"task_weight={self.task_loss_weight}, distill_weight={distill_loss_weight}"
)
elif task_loss_weight is not None:
# Mode 2: only task_weight specified - complementary mode (distill = 1 - task)
self.task_loss_weight = task_loss_weight
self.distill_loss_weight = 1.0 - task_loss_weight
if self.verbose:
_logger.info(
f"TokenDistillationTask: Complementary mode - "
f"task_weight={task_loss_weight}, distill_weight={self.distill_loss_weight}"
)
else:
# Mode 3: neither specified - equal weights (both 1.0)
self.distill_loss_weight = 1.0
self.task_loss_weight = 1.0
if self.verbose:
_logger.info(
f"TokenDistillationTask: Default equal weights - "
f"task_weight={self.task_loss_weight}, distill_weight={self.distill_loss_weight}"
)
if self.verbose:
_logger.info(
f"TokenDistillationTask: distill_type={distill_type}, temperature={temperature}"
)
def prepare_distributed(
self,
device_ids: Optional[list] = None,
**ddp_kwargs
) -> 'TokenDistillationTask':
"""Prepare task for distributed training.
Wraps the student model in DistributedDataParallel (DDP) while leaving
the frozen teacher model unwrapped.
Args:
device_ids: List of device IDs for DDP (e.g., [local_rank])
**ddp_kwargs: Additional arguments passed to DistributedDataParallel
Returns:
self (for method chaining)
"""
from torch.nn.parallel import DistributedDataParallel as DDP
for param in self.teacher.parameters():
param.requires_grad = False
self.student = DDP(self.student, device_ids=device_ids, **ddp_kwargs)
return self
def forward(
self,
input: torch.Tensor,
target: torch.Tensor,
) -> Dict[str, torch.Tensor]:
"""Forward pass with token distillation.
Args:
input: Input tensor [B, C, H, W]
target: Target labels [B]
Returns:
Dictionary containing:
- 'loss': Combined training loss (task + distillation)
- 'output': Main head logits (for metrics)
- 'task_loss': Classification loss component
- 'distill_loss': Distillation loss component
"""
# Student forward pass - returns tuple (main_logits, dist_logits)
student_output = self.student(input)
main_logits, dist_logits = student_output
# Compute task loss on main head
task_loss = self.criterion(main_logits, target)
# Teacher forward pass (no gradient)
with torch.no_grad():
input_kd = self.teacher.normalize_input(input, self.student_mean, self.student_std)
teacher_logits = self.teacher(input_kd.detach())
# Compute distillation loss on distillation head
if self.distill_type == 'soft':
prob_s = F.log_softmax(dist_logits / self.temperature, dim=-1)
prob_t = F.log_softmax(teacher_logits / self.temperature, dim=-1)
distill_loss = F.kl_div(prob_s, prob_t, reduction='batchmean', log_target=True) * (self.temperature ** 2)
else:
teacher_hard = teacher_logits.argmax(dim=-1)
distill_loss = F.cross_entropy(dist_logits, teacher_hard)
total_loss = self.task_loss_weight * task_loss + self.distill_loss_weight * distill_loss
return {
'loss': total_loss,
'output': main_logits,
'task_loss': task_loss,
'distill_loss': distill_loss,
}
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/task/token_distillation.py",
"license": "Apache License 2.0",
"lines": 293,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:tests/test_scheduler.py | """ Scheduler Tests
Tests for learning rate schedulers in timm.scheduler.
"""
import math
import pytest
import torch
from torch.nn import Parameter
from timm.scheduler import (
CosineLRScheduler,
StepLRScheduler,
MultiStepLRScheduler,
PlateauLRScheduler,
PolyLRScheduler,
TanhLRScheduler,
)
from timm.scheduler.scheduler import Scheduler
def _create_optimizer(lr: float = 0.1, num_groups: int = 1) -> torch.optim.Optimizer:
"""Create a mock optimizer with simple parameters for testing."""
param_groups = []
for _ in range(num_groups):
param = Parameter(torch.randn(10, 5))
param_groups.append({'params': [param], 'lr': lr})
optimizer = torch.optim.SGD(param_groups, lr=lr)
return optimizer
class TestSchedulerBasics:
"""Test basic scheduler initialization and stepping."""
@pytest.mark.parametrize('scheduler_cls,kwargs', [
(CosineLRScheduler, {'t_initial': 100}),
(StepLRScheduler, {'decay_t': 10}),
(MultiStepLRScheduler, {'decay_t': [10, 20, 30]}),
(PlateauLRScheduler, {}),
(PolyLRScheduler, {'t_initial': 100}),
(TanhLRScheduler, {'t_initial': 100}),
])
def test_scheduler_init(self, scheduler_cls, kwargs):
"""Test that all schedulers can be initialized."""
optimizer = _create_optimizer()
scheduler = scheduler_cls(optimizer, **kwargs)
assert scheduler is not None
assert scheduler.optimizer is optimizer
@pytest.mark.parametrize('scheduler_cls,kwargs', [
(CosineLRScheduler, {'t_initial': 100}),
(StepLRScheduler, {'decay_t': 10}),
(MultiStepLRScheduler, {'decay_t': [10, 20, 30]}),
(PolyLRScheduler, {'t_initial': 100}),
(TanhLRScheduler, {'t_initial': 100}),
])
def test_scheduler_step(self, scheduler_cls, kwargs):
"""Test that schedulers can step without error."""
optimizer = _create_optimizer()
scheduler = scheduler_cls(optimizer, **kwargs)
initial_lr = optimizer.param_groups[0]['lr']
for epoch in range(10):
scheduler.step(epoch)
# LR should have changed after stepping
final_lr = optimizer.param_groups[0]['lr']
# For most schedulers, LR should decrease or stay same
assert final_lr <= initial_lr
def test_plateau_scheduler_step(self):
"""Test PlateauLRScheduler with metric."""
optimizer = _create_optimizer()
scheduler = PlateauLRScheduler(optimizer, patience_t=2, decay_rate=0.5)
# Simulate plateau - same metric for multiple steps
for epoch in range(10):
scheduler.step(epoch, metric=1.0)
class TestWarmup:
"""Test warmup behavior across schedulers."""
@pytest.mark.parametrize('scheduler_cls,kwargs', [
(CosineLRScheduler, {'t_initial': 100}),
(StepLRScheduler, {'decay_t': 10}),
(MultiStepLRScheduler, {'decay_t': [10, 20, 30]}),
(PolyLRScheduler, {'t_initial': 100}),
(TanhLRScheduler, {'t_initial': 100}),
])
def test_warmup_lr_increases(self, scheduler_cls, kwargs):
"""Test that LR increases during warmup period."""
base_lr = 0.1
warmup_lr_init = 0.001
warmup_t = 5
optimizer = _create_optimizer(lr=base_lr)
scheduler = scheduler_cls(
optimizer,
warmup_t=warmup_t,
warmup_lr_init=warmup_lr_init,
**kwargs,
)
# Initial LR should be warmup_lr_init
assert optimizer.param_groups[0]['lr'] == pytest.approx(warmup_lr_init, rel=1e-5)
# LR should increase during warmup
prev_lr = warmup_lr_init
for epoch in range(1, warmup_t):
scheduler.step(epoch)
current_lr = optimizer.param_groups[0]['lr']
assert current_lr > prev_lr, f"LR should increase during warmup at epoch {epoch}"
prev_lr = current_lr
@pytest.mark.parametrize('scheduler_cls,kwargs', [
(CosineLRScheduler, {'t_initial': 100}),
(StepLRScheduler, {'decay_t': 10, 'decay_rate': 0.5}),
(MultiStepLRScheduler, {'decay_t': [10, 20, 30], 'decay_rate': 0.5}),
(PolyLRScheduler, {'t_initial': 100}),
(TanhLRScheduler, {'t_initial': 100}),
])
def test_warmup_prefix_reaches_target_lr(self, scheduler_cls, kwargs):
"""Test that target LR is reached at first step after warmup when warmup_prefix=True."""
base_lr = 0.1
warmup_lr_init = 0.001
warmup_t = 5
optimizer = _create_optimizer(lr=base_lr)
scheduler = scheduler_cls(
optimizer,
warmup_t=warmup_t,
warmup_lr_init=warmup_lr_init,
warmup_prefix=True,
**kwargs,
)
# Step through warmup
for epoch in range(warmup_t):
scheduler.step(epoch)
# At t=warmup_t (first step after warmup), with warmup_prefix=True,
# the main schedule starts at t=0, which should be base_lr
scheduler.step(warmup_t)
lr_after_warmup = optimizer.param_groups[0]['lr']
assert lr_after_warmup == pytest.approx(base_lr, rel=1e-5), \
f"LR should be base_lr ({base_lr}) at first step after warmup, got {lr_after_warmup}"
class TestCosineScheduler:
"""Test CosineLRScheduler specific behavior."""
def test_cosine_decay(self):
"""Test that cosine scheduler decays LR correctly."""
base_lr = 0.1
lr_min = 0.001
t_initial = 100
optimizer = _create_optimizer(lr=base_lr)
scheduler = CosineLRScheduler(
optimizer,
t_initial=t_initial,
lr_min=lr_min,
)
# At t=0, LR should be base_lr
assert optimizer.param_groups[0]['lr'] == pytest.approx(base_lr, rel=1e-5)
# At t=t_initial/2, LR should be approximately (base_lr + lr_min) / 2
scheduler.step(t_initial // 2)
mid_lr = optimizer.param_groups[0]['lr']
expected_mid = lr_min + 0.5 * (base_lr - lr_min) * (1 + math.cos(math.pi * 0.5))
assert mid_lr == pytest.approx(expected_mid, rel=1e-2)
# At t=t_initial, LR should be lr_min
scheduler.step(t_initial)
assert optimizer.param_groups[0]['lr'] == pytest.approx(lr_min, rel=1e-5)
def test_cosine_cycles(self):
"""Test cosine scheduler with multiple cycles."""
base_lr = 0.1
lr_min = 0.001
t_initial = 50
cycle_limit = 2
optimizer = _create_optimizer(lr=base_lr)
scheduler = CosineLRScheduler(
optimizer,
t_initial=t_initial,
lr_min=lr_min,
cycle_limit=cycle_limit,
)
# Step through the first cycle - at t_initial-1, LR should be near minimum
scheduler.step(t_initial - 1)
lr_near_end = optimizer.param_groups[0]['lr']
assert lr_near_end < base_lr * 0.5, "LR should be significantly lower near end of cycle"
# After cycle limit is exceeded, LR should stay at lr_min
for epoch in range(t_initial * cycle_limit, t_initial * cycle_limit + 10):
scheduler.step(epoch)
lr_after_cycles = optimizer.param_groups[0]['lr']
assert lr_after_cycles == pytest.approx(lr_min, rel=1e-5)
def test_get_cycle_length(self):
"""Test get_cycle_length method."""
optimizer = _create_optimizer()
t_initial = 100
scheduler = CosineLRScheduler(optimizer, t_initial=t_initial)
assert scheduler.get_cycle_length(1) == t_initial
# With warmup prefix
warmup_t = 10
scheduler_warmup = CosineLRScheduler(
optimizer,
t_initial=t_initial,
warmup_t=warmup_t,
warmup_prefix=True,
)
assert scheduler_warmup.get_cycle_length(1) == t_initial + warmup_t
class TestStepScheduler:
"""Test StepLRScheduler specific behavior."""
def test_step_decay(self):
"""Test that step scheduler decays at correct intervals."""
base_lr = 0.1
decay_t = 10
decay_rate = 0.5
optimizer = _create_optimizer(lr=base_lr)
scheduler = StepLRScheduler(
optimizer,
decay_t=decay_t,
decay_rate=decay_rate,
)
# Before first decay
scheduler.step(decay_t - 1)
assert optimizer.param_groups[0]['lr'] == pytest.approx(base_lr, rel=1e-5)
# After first decay
scheduler.step(decay_t)
assert optimizer.param_groups[0]['lr'] == pytest.approx(base_lr * decay_rate, rel=1e-5)
# After second decay
scheduler.step(2 * decay_t)
assert optimizer.param_groups[0]['lr'] == pytest.approx(base_lr * decay_rate ** 2, rel=1e-5)
class TestMultiStepScheduler:
"""Test MultiStepLRScheduler specific behavior."""
def test_multistep_decay(self):
"""Test decay at specified milestones."""
base_lr = 0.1
decay_t = [10, 20, 30]
decay_rate = 0.5
optimizer = _create_optimizer(lr=base_lr)
scheduler = MultiStepLRScheduler(
optimizer,
decay_t=decay_t,
decay_rate=decay_rate,
)
# Before first milestone
scheduler.step(8)
assert optimizer.param_groups[0]['lr'] == pytest.approx(base_lr, rel=1e-5)
# After first milestone (step 10 means we've passed milestone at 10)
scheduler.step(11)
assert optimizer.param_groups[0]['lr'] == pytest.approx(base_lr * decay_rate, rel=1e-5)
# After second milestone
scheduler.step(21)
assert optimizer.param_groups[0]['lr'] == pytest.approx(base_lr * decay_rate ** 2, rel=1e-5)
# After third milestone
scheduler.step(31)
assert optimizer.param_groups[0]['lr'] == pytest.approx(base_lr * decay_rate ** 3, rel=1e-5)
class TestPolyScheduler:
"""Test PolyLRScheduler specific behavior."""
def test_poly_decay(self):
"""Test polynomial decay behavior."""
base_lr = 0.1
lr_min = 0.001
t_initial = 100
power = 1.0 # Linear decay
optimizer = _create_optimizer(lr=base_lr)
scheduler = PolyLRScheduler(
optimizer,
t_initial=t_initial,
lr_min=lr_min,
power=power,
)
# At t=0, LR should be base_lr
assert optimizer.param_groups[0]['lr'] == pytest.approx(base_lr, rel=1e-5)
# At t=t_initial, LR should be lr_min
scheduler.step(t_initial)
assert optimizer.param_groups[0]['lr'] == pytest.approx(lr_min, rel=1e-5)
class TestTanhScheduler:
"""Test TanhLRScheduler specific behavior."""
def test_tanh_decay(self):
"""Test tanh decay behavior."""
base_lr = 0.1
lr_min = 0.001
t_initial = 100
optimizer = _create_optimizer(lr=base_lr)
scheduler = TanhLRScheduler(
optimizer,
t_initial=t_initial,
lr_min=lr_min,
)
# Collect LR values
lrs = [optimizer.param_groups[0]['lr']]
for epoch in range(1, t_initial + 1):
scheduler.step(epoch)
lrs.append(optimizer.param_groups[0]['lr'])
# LR should generally decrease (with possible non-monotonic behavior due to tanh)
assert lrs[-1] < lrs[0]
class TestStateDict:
"""Test state dict save/load functionality."""
@pytest.mark.parametrize('scheduler_cls,kwargs', [
(CosineLRScheduler, {'t_initial': 100}),
(StepLRScheduler, {'decay_t': 10}),
(MultiStepLRScheduler, {'decay_t': [10, 20, 30]}),
(PolyLRScheduler, {'t_initial': 100}),
(TanhLRScheduler, {'t_initial': 100}),
])
def test_state_dict_save_load(self, scheduler_cls, kwargs):
"""Test that state dict can be saved and loaded."""
optimizer = _create_optimizer()
scheduler = scheduler_cls(optimizer, **kwargs)
# Step a few times
for epoch in range(5):
scheduler.step(epoch)
# Save state
state_dict = scheduler.state_dict()
assert isinstance(state_dict, dict)
# Create new scheduler and load state
optimizer2 = _create_optimizer()
scheduler2 = scheduler_cls(optimizer2, **kwargs)
scheduler2.load_state_dict(state_dict)
# State should be restored
assert scheduler2.state_dict() == state_dict
def test_plateau_state_dict_save_load(self):
"""Test PlateauLRScheduler state dict save/load."""
optimizer = _create_optimizer()
scheduler = PlateauLRScheduler(optimizer)
# Step a few times
for epoch in range(5):
scheduler.step(epoch, metric=1.0)
# Save state
state_dict = scheduler.state_dict()
assert isinstance(state_dict, dict)
# Create new scheduler and load state
optimizer2 = _create_optimizer()
scheduler2 = PlateauLRScheduler(optimizer2)
scheduler2.load_state_dict(state_dict)
# State should be restored
assert scheduler2.state_dict() == state_dict
class TestStepUpdate:
"""Test step_update for update-based scheduling."""
@pytest.mark.parametrize('scheduler_cls,kwargs', [
(CosineLRScheduler, {'t_initial': 100}),
(StepLRScheduler, {'decay_t': 10, 'decay_rate': 0.5}),
(MultiStepLRScheduler, {'decay_t': [10, 20, 30], 'decay_rate': 0.5}),
(PolyLRScheduler, {'t_initial': 100}),
(TanhLRScheduler, {'t_initial': 100}),
])
def test_step_update_with_t_in_epochs_false(self, scheduler_cls, kwargs):
"""Test step_update when t_in_epochs=False."""
optimizer = _create_optimizer()
scheduler = scheduler_cls(
optimizer,
t_in_epochs=False,
**kwargs,
)
initial_lr = optimizer.param_groups[0]['lr']
# step_update should work when t_in_epochs=False
for update in range(50):
scheduler.step_update(update)
# LR should have changed for all these schedulers by step 50
final_lr = optimizer.param_groups[0]['lr']
assert final_lr != initial_lr, f"LR should change after 50 updates for {scheduler_cls.__name__}"
class TestMultipleParamGroups:
"""Test schedulers with multiple parameter groups."""
@pytest.mark.parametrize('scheduler_cls,kwargs', [
(CosineLRScheduler, {'t_initial': 100}),
(StepLRScheduler, {'decay_t': 10}),
(MultiStepLRScheduler, {'decay_t': [10, 20, 30]}),
(PolyLRScheduler, {'t_initial': 100}),
(TanhLRScheduler, {'t_initial': 100}),
])
def test_multiple_param_groups(self, scheduler_cls, kwargs):
"""Test that schedulers handle multiple param groups correctly."""
optimizer = _create_optimizer(num_groups=3)
scheduler = scheduler_cls(optimizer, **kwargs)
initial_lrs = [pg['lr'] for pg in optimizer.param_groups]
for epoch in range(20):
scheduler.step(epoch)
final_lrs = [pg['lr'] for pg in optimizer.param_groups]
# All param groups should be updated
for i, (initial, final) in enumerate(zip(initial_lrs, final_lrs)):
assert final <= initial, f"Param group {i} LR should decrease or stay same"
class TestNoise:
"""Test noise application in schedulers."""
@pytest.mark.parametrize('scheduler_cls,kwargs', [
(CosineLRScheduler, {'t_initial': 100}),
(StepLRScheduler, {'decay_t': 10}),
(PolyLRScheduler, {'t_initial': 100}),
(TanhLRScheduler, {'t_initial': 100}),
])
def test_noise_range(self, scheduler_cls, kwargs):
"""Test that noise is applied within specified range."""
optimizer = _create_optimizer()
noise_range_t = (10, 50)
scheduler = scheduler_cls(
optimizer,
noise_range_t=noise_range_t,
noise_pct=0.5,
noise_seed=42,
**kwargs,
)
# Collect LRs with same seed - should be deterministic
lrs_run1 = []
for epoch in range(60):
scheduler.step(epoch)
lrs_run1.append(optimizer.param_groups[0]['lr'])
# Reset and run again with same seed
optimizer2 = _create_optimizer()
scheduler2 = scheduler_cls(
optimizer2,
noise_range_t=noise_range_t,
noise_pct=0.5,
noise_seed=42,
**kwargs,
)
lrs_run2 = []
for epoch in range(60):
scheduler2.step(epoch)
lrs_run2.append(optimizer2.param_groups[0]['lr'])
# With same seed, noise should be deterministic
assert lrs_run1 == lrs_run2
class TestKDecay:
"""Test k-decay option in cosine and poly schedulers."""
def test_cosine_k_decay(self):
"""Test k-decay in cosine scheduler."""
optimizer1 = _create_optimizer()
optimizer2 = _create_optimizer()
scheduler_k1 = CosineLRScheduler(optimizer1, t_initial=100, k_decay=1.0)
scheduler_k2 = CosineLRScheduler(optimizer2, t_initial=100, k_decay=2.0)
# Different k values should produce different schedules
lrs_k1 = []
lrs_k2 = []
for epoch in range(100):
scheduler_k1.step(epoch)
scheduler_k2.step(epoch)
lrs_k1.append(optimizer1.param_groups[0]['lr'])
lrs_k2.append(optimizer2.param_groups[0]['lr'])
# The schedules should differ (except at endpoints)
assert lrs_k1[50] != lrs_k2[50]
def test_poly_k_decay(self):
"""Test k-decay in poly scheduler."""
optimizer1 = _create_optimizer()
optimizer2 = _create_optimizer()
scheduler_k1 = PolyLRScheduler(optimizer1, t_initial=100, k_decay=1.0)
scheduler_k2 = PolyLRScheduler(optimizer2, t_initial=100, k_decay=2.0)
lrs_k1 = []
lrs_k2 = []
for epoch in range(100):
scheduler_k1.step(epoch)
scheduler_k2.step(epoch)
lrs_k1.append(optimizer1.param_groups[0]['lr'])
lrs_k2.append(optimizer2.param_groups[0]['lr'])
# The schedules should differ
assert lrs_k1[50] != lrs_k2[50]
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "tests/test_scheduler.py",
"license": "Apache License 2.0",
"lines": 428,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/pytorch-image-models:timm/models/csatv2.py | """CSATv2
A frequency-domain vision model using DCT transforms with spatial attention.
Paper: TBD
This model created by members of MLPA Lab. Welcome feedback and suggestion, questions.
gusdlf93@naver.com
juno.demie.oh@gmail.com
Refined for timm by Ross Wightman
"""
import math
import warnings
from functools import partial, reduce
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, Mlp, LayerNorm2d, Attention, NormMlpClassifierHead, LayerScale, LayerScale2d
from timm.layers.grn import GlobalResponseNorm
from timm.models._builder import build_model_with_cfg
from timm.models._features import feature_take_indices
from timm.models._manipulate import checkpoint, checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['CSATv2', 'csatv2']
# DCT frequency normalization statistics (Y, Cb, Cr channels x 64 coefficients)
_DCT_MEAN = (
(932.42657, -0.00260, 0.33415, -0.02840, 0.00003, -0.02792, -0.00183, 0.00006,
0.00032, 0.03402, -0.00571, 0.00020, 0.00006, -0.00038, -0.00558, -0.00116,
-0.00000, -0.00047, -0.00008, -0.00030, 0.00942, 0.00161, -0.00009, -0.00006,
-0.00014, -0.00035, 0.00001, -0.00220, 0.00033, -0.00002, -0.00003, -0.00020,
0.00007, -0.00000, 0.00005, 0.00293, -0.00004, 0.00006, 0.00019, 0.00004,
0.00006, -0.00015, -0.00002, 0.00007, 0.00010, -0.00004, 0.00008, 0.00000,
0.00008, -0.00001, 0.00015, 0.00002, 0.00007, 0.00003, 0.00004, -0.00001,
0.00004, -0.00000, 0.00002, -0.00000, -0.00008, -0.00000, -0.00003, 0.00003),
(962.34735, -0.00428, 0.09835, 0.00152, -0.00009, 0.00312, -0.00141, -0.00001,
-0.00013, 0.01050, 0.00065, 0.00006, -0.00000, 0.00003, 0.00264, 0.00000,
0.00001, 0.00007, -0.00006, 0.00003, 0.00341, 0.00163, 0.00004, 0.00003,
-0.00001, 0.00008, -0.00000, 0.00090, 0.00018, -0.00006, -0.00001, 0.00007,
-0.00003, -0.00001, 0.00006, 0.00084, -0.00000, -0.00001, 0.00000, 0.00004,
-0.00001, -0.00002, 0.00000, 0.00001, 0.00002, 0.00001, 0.00004, 0.00011,
0.00000, -0.00003, 0.00011, -0.00002, 0.00001, 0.00001, 0.00001, 0.00001,
-0.00007, -0.00003, 0.00001, 0.00000, 0.00001, 0.00002, 0.00001, 0.00000),
(1053.16101, -0.00213, -0.09207, 0.00186, 0.00013, 0.00034, -0.00119, 0.00002,
0.00011, -0.00984, 0.00046, -0.00007, -0.00001, -0.00005, 0.00180, 0.00042,
0.00002, -0.00010, 0.00004, 0.00003, -0.00301, 0.00125, -0.00002, -0.00003,
-0.00001, -0.00001, -0.00001, 0.00056, 0.00021, 0.00001, -0.00001, 0.00002,
-0.00001, -0.00001, 0.00005, -0.00070, -0.00002, -0.00002, 0.00005, -0.00004,
-0.00000, 0.00002, -0.00002, 0.00001, 0.00000, -0.00003, 0.00004, 0.00007,
0.00001, 0.00000, 0.00013, -0.00000, 0.00000, 0.00002, -0.00000, -0.00001,
-0.00004, -0.00003, 0.00000, 0.00001, -0.00001, 0.00001, -0.00000, 0.00000),
)
_DCT_VAR = (
(270372.37500, 6287.10645, 5974.94043, 1653.10889, 1463.91748, 1832.58997, 755.92468, 692.41528,
648.57184, 641.46881, 285.79288, 301.62100, 380.43405, 349.84027, 374.15891, 190.30960,
190.76746, 221.64578, 200.82646, 145.87979, 126.92046, 62.14622, 67.75562, 102.42001,
129.74922, 130.04631, 103.12189, 97.76417, 53.17402, 54.81048, 73.48712, 81.04342,
69.35100, 49.06024, 33.96053, 37.03279, 20.48858, 24.94830, 33.90822, 44.54912,
47.56363, 40.03160, 30.43313, 22.63899, 26.53739, 26.57114, 21.84404, 17.41557,
15.18253, 10.69678, 11.24111, 12.97229, 15.08971, 15.31646, 8.90409, 7.44213,
6.66096, 6.97719, 4.17834, 3.83882, 4.51073, 2.36646, 2.41363, 1.48266),
(18839.21094, 321.70932, 300.15259, 77.47830, 76.02293, 89.04748, 33.99642, 34.74807,
32.12333, 28.19588, 12.04675, 14.26871, 18.45779, 16.59588, 15.67892, 7.37718,
8.56312, 10.28946, 9.41013, 6.69090, 5.16453, 2.55186, 3.03073, 4.66765,
5.85418, 5.74644, 4.33702, 3.66948, 1.95107, 2.26034, 3.06380, 3.50705,
3.06359, 2.19284, 1.54454, 1.57860, 0.97078, 1.13941, 1.48653, 1.89996,
1.95544, 1.64950, 1.24754, 0.93677, 1.09267, 1.09516, 0.94163, 0.78966,
0.72489, 0.50841, 0.50909, 0.55664, 0.63111, 0.64125, 0.38847, 0.33378,
0.30918, 0.33463, 0.20875, 0.19298, 0.21903, 0.13380, 0.13444, 0.09554),
(17127.39844, 292.81421, 271.45209, 66.64056, 63.60253, 76.35437, 28.06587, 27.84831,
25.96656, 23.60370, 9.99173, 11.34992, 14.46955, 12.92553, 12.69353, 5.91537,
6.60187, 7.90891, 7.32825, 5.32785, 4.29660, 2.13459, 2.44135, 3.66021,
4.50335, 4.38959, 3.34888, 2.97181, 1.60633, 1.77010, 2.35118, 2.69018,
2.38189, 1.74596, 1.26014, 1.31684, 0.79327, 0.92046, 1.17670, 1.47609,
1.50914, 1.28725, 0.99898, 0.74832, 0.85736, 0.85800, 0.74663, 0.63508,
0.58748, 0.41098, 0.41121, 0.44663, 0.50277, 0.51519, 0.31729, 0.27336,
0.25399, 0.27241, 0.17353, 0.16255, 0.18440, 0.11602, 0.11511, 0.08450),
)
def _zigzag_permutation(rows: int, cols: int) -> List[int]:
"""Generate zigzag scan order for DCT coefficients."""
idx_matrix = np.arange(0, rows * cols, 1).reshape(rows, cols).tolist()
dia = [[] for _ in range(rows + cols - 1)]
zigzag = []
for i in range(rows):
for j in range(cols):
s = i + j
if s % 2 == 0:
dia[s].insert(0, idx_matrix[i][j])
else:
dia[s].append(idx_matrix[i][j])
for d in dia:
zigzag.extend(d)
return zigzag
def _dct_kernel_type_2(
kernel_size: int,
orthonormal: bool,
device=None,
dtype=None,
) -> torch.Tensor:
"""Generate Type-II DCT kernel matrix."""
dd = dict(device=device, dtype=dtype)
x = torch.eye(kernel_size, **dd)
v = x.clone().contiguous().view(-1, kernel_size)
v = torch.cat([v, v.flip([1])], dim=-1)
v = torch.fft.fft(v, dim=-1)[:, :kernel_size]
k = (
torch.tensor(-1j, device=device, dtype=torch.complex64) * torch.pi
* torch.arange(kernel_size, device=device, dtype=torch.long)[None, :]
)
k = torch.exp(k / (kernel_size * 2))
v = v * k
v = v.real
if orthonormal:
v[:, 0] = v[:, 0] * torch.sqrt(torch.tensor(1 / (kernel_size * 4), **dd))
v[:, 1:] = v[:, 1:] * torch.sqrt(torch.tensor(1 / (kernel_size * 2), **dd))
v = v.contiguous().view(*x.shape)
return v
def _dct_kernel_type_3(
kernel_size: int,
orthonormal: bool,
device=None,
dtype=None,
) -> torch.Tensor:
"""Generate Type-III DCT kernel matrix (inverse of Type-II)."""
return torch.linalg.inv(_dct_kernel_type_2(kernel_size, orthonormal, device, dtype))
class Dct1d(nn.Module):
"""1D Discrete Cosine Transform layer."""
def __init__(
self,
kernel_size: int,
kernel_type: int = 2,
orthonormal: bool = True,
device=None,
dtype=None,
) -> None:
dd = dict(device=device, dtype=dtype)
super().__init__()
kernel = {'2': _dct_kernel_type_2, '3': _dct_kernel_type_3}
dct_weights = kernel[f'{kernel_type}'](kernel_size, orthonormal, **dd).T
self.register_buffer('weights', dct_weights.contiguous())
self.register_parameter('bias', None)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return F.linear(x, self.weights, self.bias)
class Dct2d(nn.Module):
"""2D Discrete Cosine Transform layer."""
def __init__(
self,
kernel_size: int,
kernel_type: int = 2,
orthonormal: bool = True,
device=None,
dtype=None,
) -> None:
dd = dict(device=device, dtype=dtype)
super().__init__()
self.transform = Dct1d(kernel_size, kernel_type, orthonormal, **dd)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.transform(self.transform(x).transpose(-1, -2)).transpose(-1, -2)
def _split_out_chs(out_chs: int, ratio=(24, 4, 4)):
# reduce ratio to smallest integers (24,4,4) -> (6,1,1)
g = reduce(math.gcd, ratio)
r = tuple(x // g for x in ratio)
denom = sum(r)
assert out_chs % denom == 0 and out_chs >= denom, (
f"out_chs={out_chs} can't be split into Y/Cb/Cr with ratio {ratio} "
f"(reduced {r}); out_chs must be a multiple of {denom}."
)
unit = out_chs // denom
y, cb, cr = (ri * unit for ri in r)
assert y + cb + cr == out_chs and min(y, cb, cr) > 0
return y, cb, cr
class LearnableDct2d(nn.Module):
"""Learnable 2D DCT stem with RGB to YCbCr conversion and frequency selection."""
def __init__(
self,
kernel_size: int,
kernel_type: int = 2,
orthonormal: bool = True,
out_chs: int = 32,
device=None,
dtype=None,
) -> None:
dd = dict(device=device, dtype=dtype)
super().__init__()
self.k = kernel_size
self.transform = Dct2d(kernel_size, kernel_type, orthonormal, **dd)
self.permutation = _zigzag_permutation(kernel_size, kernel_size)
y_ch, cb_ch, cr_ch = _split_out_chs(out_chs, ratio=(24, 4, 4))
self.conv_y = nn.Conv2d(kernel_size ** 2, y_ch, kernel_size=1, padding=0, **dd)
self.conv_cb = nn.Conv2d(kernel_size ** 2, cb_ch, kernel_size=1, padding=0, **dd)
self.conv_cr = nn.Conv2d(kernel_size ** 2, cr_ch, kernel_size=1, padding=0, **dd)
# Register empty buffers for DCT normalization statistics
self.register_buffer('mean', torch.empty(3, 64, device=device, dtype=dtype), persistent=False)
self.register_buffer('var', torch.empty(3, 64, device=device, dtype=dtype), persistent=False)
# Shape (3, 1, 1) for BCHW broadcasting
self.register_buffer('imagenet_mean', torch.empty(3, 1, 1, device=device, dtype=dtype), persistent=False)
self.register_buffer('imagenet_std', torch.empty(3, 1, 1, device=device, dtype=dtype), persistent=False)
# TODO: skip init when on meta device when safe to do so
self.reset_parameters()
def reset_parameters(self) -> None:
"""Initialize buffers."""
self._init_buffers()
def _init_buffers(self) -> None:
"""Compute and fill non-persistent buffer values."""
self.mean.copy_(torch.tensor(_DCT_MEAN))
self.var.copy_(torch.tensor(_DCT_VAR))
self.imagenet_mean.copy_(torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1))
self.imagenet_std.copy_(torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1))
def init_non_persistent_buffers(self) -> None:
"""Initialize non-persistent buffers."""
self._init_buffers()
def _denormalize(self, x: torch.Tensor) -> torch.Tensor:
"""Convert from ImageNet normalized to [0, 255] range."""
return x.mul(self.imagenet_std).add_(self.imagenet_mean) * 255
def _rgb_to_ycbcr(self, x: torch.Tensor) -> torch.Tensor:
"""Convert RGB to YCbCr color space (BCHW input/output)."""
r, g, b = x[:, 0], x[:, 1], x[:, 2]
y = r * 0.299 + g * 0.587 + b * 0.114
cb = 0.564 * (b - y) + 128
cr = 0.713 * (r - y) + 128
return torch.stack([y, cb, cr], dim=1)
def _frequency_normalize(self, x: torch.Tensor) -> torch.Tensor:
"""Normalize DCT coefficients using precomputed statistics."""
std = self.var ** 0.5 + 1e-8
return (x - self.mean) / std
def forward(self, x: torch.Tensor) -> torch.Tensor:
b, c, h, w = x.shape
x = self._denormalize(x)
x = self._rgb_to_ycbcr(x)
# Extract non-overlapping k x k patches
x = x.reshape(b, c, h // self.k, self.k, w // self.k, self.k) # (B, C, H//k, k, W//k, k)
x = x.permute(0, 2, 4, 1, 3, 5) # (B, H//k, W//k, C, k, k)
x = self.transform(x)
x = x.reshape(-1, c, self.k * self.k)
x = x[:, :, self.permutation]
x = self._frequency_normalize(x)
x = x.reshape(b, h // self.k, w // self.k, c, -1)
x = x.permute(0, 3, 4, 1, 2).contiguous()
x_y = self.conv_y(x[:, 0])
x_cb = self.conv_cb(x[:, 1])
x_cr = self.conv_cr(x[:, 2])
return torch.cat([x_y, x_cb, x_cr], dim=1)
class Dct2dStats(nn.Module):
"""Utility module to compute DCT coefficient statistics."""
def __init__(
self,
kernel_size: int,
kernel_type: int = 2,
orthonormal: bool = True,
device=None,
dtype=None,
) -> None:
dd = dict(device=device, dtype=dtype)
super().__init__()
self.k = kernel_size
self.transform = Dct2d(kernel_size, kernel_type, orthonormal, **dd)
self.permutation = _zigzag_permutation(kernel_size, kernel_size)
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
b, c, h, w = x.shape
# Extract non-overlapping k x k patches
x = x.reshape(b, c, h // self.k, self.k, w // self.k, self.k) # (B, C, H//k, k, W//k, k)
x = x.permute(0, 2, 4, 1, 3, 5) # (B, H//k, W//k, C, k, k)
x = self.transform(x)
x = x.reshape(-1, c, self.k * self.k)
x = x[:, :, self.permutation]
x = x.reshape(b * (h // self.k) * (w // self.k), c, -1)
mean_list = torch.zeros([3, 64])
var_list = torch.zeros([3, 64])
for i in range(3):
mean_list[i] = torch.mean(x[:, i], dim=0)
var_list[i] = torch.var(x[:, i], dim=0)
return mean_list, var_list
class Block(nn.Module):
"""ConvNeXt-style block with spatial attention."""
def __init__(
self,
dim: int,
drop_path: float = 0.,
ls_init_value: Optional[float] = None,
device=None,
dtype=None,
) -> None:
dd = dict(device=device, dtype=dtype)
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim, **dd)
self.norm = nn.LayerNorm(dim, eps=1e-6, **dd)
self.pwconv1 = nn.Linear(dim, 4 * dim, **dd)
self.act = nn.GELU()
self.grn = GlobalResponseNorm(4 * dim, channels_last=True, **dd)
self.pwconv2 = nn.Linear(4 * dim, dim, **dd)
self.ls = LayerScale2d(dim, init_values=ls_init_value, **dd) if ls_init_value else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.attn = SpatialAttention(**dd)
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.grn(x)
x = self.pwconv2(x)
x = x.permute(0, 3, 1, 2)
attn = self.attn(x)
attn = F.interpolate(attn, size=x.shape[2:], mode='bilinear', align_corners=True)
x = x * attn
x = self.ls(x)
return shortcut + self.drop_path(x)
class SpatialTransformerBlock(nn.Module):
"""Lightweight transformer block for spatial attention (1-channel, 7x7 grid).
This is a simplified transformer with single-head, 1-dim attention over spatial
positions. Used inside SpatialAttention where input is 1 channel at 7x7 resolution.
"""
def __init__(
self,
device=None,
dtype=None,
) -> None:
dd = dict(device=device, dtype=dtype)
super().__init__()
# Single-head attention with 1-dim q/k/v (no output projection needed)
self.pos_embed = PosConv(in_chans=1, **dd)
self.norm1 = nn.LayerNorm(1, **dd)
self.qkv = nn.Linear(1, 3, bias=False, **dd)
# Feedforward: 1 -> 4 -> 1
self.norm2 = nn.LayerNorm(1, **dd)
self.mlp = Mlp(1, 4, 1, act_layer=nn.GELU, **dd)
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, H, W = x.shape
# Attention block
shortcut = x
x_t = x.flatten(2).transpose(1, 2) # (B, N, 1)
x_t = self.norm1(x_t)
x_t = self.pos_embed(x_t, (H, W))
# Simple single-head attention with scalar q/k/v
qkv = self.qkv(x_t) # (B, N, 3)
q, k, v = qkv.unbind(-1) # each (B, N)
attn = (q @ k.transpose(-1, -2)).softmax(dim=-1) # (B, N, N)
x_t = (attn @ v).unsqueeze(-1) # (B, N, 1)
x_t = x_t.transpose(1, 2).reshape(B, C, H, W)
x = shortcut + x_t
# Feedforward block
shortcut = x
x_t = x.flatten(2).transpose(1, 2)
x_t = self.mlp(self.norm2(x_t))
x_t = x_t.transpose(1, 2).reshape(B, C, H, W)
x = shortcut + x_t
return x
class SpatialAttention(nn.Module):
"""Spatial attention module using channel statistics and transformer."""
def __init__(
self,
device=None,
dtype=None,
) -> None:
dd = dict(device=device, dtype=dtype)
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.conv = nn.Conv2d(2, 1, kernel_size=7, padding=3, **dd)
self.attn = SpatialTransformerBlock(**dd)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x_avg = x.mean(dim=1, keepdim=True)
x_max = x.amax(dim=1, keepdim=True)
x = torch.cat([x_avg, x_max], dim=1)
x = self.avgpool(x)
x = self.conv(x)
x = self.attn(x)
return x
class TransformerBlock(nn.Module):
"""Transformer block with optional downsampling and convolutional position encoding."""
def __init__(
self,
inp: int,
oup: int,
num_heads: int = 8,
attn_head_dim: int = 32,
downsample: bool = False,
attn_drop: float = 0.,
proj_drop: float = 0.,
drop_path: float = 0.,
ls_init_value: Optional[float] = None,
device=None,
dtype=None,
) -> None:
dd = dict(device=device, dtype=dtype)
super().__init__()
hidden_dim = int(inp * 4)
self.downsample = downsample
if self.downsample:
self.pool1 = nn.MaxPool2d(3, 2, 1)
self.pool2 = nn.MaxPool2d(3, 2, 1)
self.proj = nn.Conv2d(inp, oup, 1, 1, 0, bias=False, **dd)
else:
self.pool1 = nn.Identity()
self.pool2 = nn.Identity()
self.proj = nn.Identity()
self.pos_embed = PosConv(in_chans=inp, **dd)
self.norm1 = nn.LayerNorm(inp, **dd)
self.attn = Attention(
dim=inp,
num_heads=num_heads,
attn_head_dim=attn_head_dim,
dim_out=oup,
attn_drop=attn_drop,
proj_drop=proj_drop,
**dd,
)
self.ls1 = LayerScale(oup, init_values=ls_init_value, **dd) if ls_init_value else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = nn.LayerNorm(oup, **dd)
self.mlp = Mlp(oup, hidden_dim, oup, act_layer=nn.GELU, drop=proj_drop, **dd)
self.ls2 = LayerScale(oup, init_values=ls_init_value, **dd) if ls_init_value else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.downsample:
shortcut = self.proj(self.pool1(x))
x_t = self.pool2(x)
B, C, H, W = x_t.shape
x_t = x_t.flatten(2).transpose(1, 2)
x_t = self.norm1(x_t)
x_t = self.pos_embed(x_t, (H, W))
x_t = self.ls1(self.attn(x_t))
x_t = x_t.transpose(1, 2).reshape(B, -1, H, W)
x = shortcut + self.drop_path1(x_t)
else:
B, C, H, W = x.shape
shortcut = x
x_t = x.flatten(2).transpose(1, 2)
x_t = self.norm1(x_t)
x_t = self.pos_embed(x_t, (H, W))
x_t = self.ls1(self.attn(x_t))
x_t = x_t.transpose(1, 2).reshape(B, -1, H, W)
x = shortcut + self.drop_path1(x_t)
# MLP block
B, C, H, W = x.shape
shortcut = x
x_t = x.flatten(2).transpose(1, 2)
x_t = self.ls2(self.mlp(self.norm2(x_t)))
x_t = x_t.transpose(1, 2).reshape(B, C, H, W)
x = shortcut + self.drop_path2(x_t)
return x
class PosConv(nn.Module):
"""Convolutional position encoding."""
def __init__(
self,
in_chans: int,
device=None,
dtype=None,
) -> None:
dd = dict(device=device, dtype=dtype)
super().__init__()
self.proj = nn.Conv2d(in_chans, in_chans, kernel_size=3, stride=1, padding=1, bias=True, groups=in_chans, **dd)
def forward(self, x: torch.Tensor, size: Tuple[int, int]) -> torch.Tensor:
B, N, C = x.shape
H, W = size
cnn_feat = x.transpose(1, 2).view(B, C, H, W)
x = self.proj(cnn_feat) + cnn_feat
return x.flatten(2).transpose(1, 2)
class CSATv2(nn.Module):
"""CSATv2: Frequency-domain vision model with spatial attention.
A hybrid architecture that processes images in the DCT frequency domain
with ConvNeXt-style blocks and transformer attention.
"""
def __init__(
self,
num_classes: int = 1000,
in_chans: int = 3,
dims: Tuple[int, ...] = (32, 72, 168, 386),
depths: Tuple[int, ...] = (2, 2, 8, 6),
transformer_depths: Tuple[int, ...] = (0, 0, 2, 2),
drop_path_rate: float = 0.0,
transformer_drop_path: bool = False,
ls_init_value: Optional[float] = None,
global_pool: str = 'avg',
device=None,
dtype=None,
**kwargs,
) -> None:
dd = dict(device=device, dtype=dtype)
super().__init__()
if in_chans != 3:
warnings.warn(
f'CSATv2 is designed for 3-channel RGB input. '
f'in_chans={in_chans} may not work correctly with the DCT stem.'
)
self.num_classes = num_classes
self.in_chans = in_chans
self.global_pool = global_pool
self.grad_checkpointing = False
self.num_features = dims[-1]
self.head_hidden_size = self.num_features
# Build feature_info dynamically
self.feature_info = [dict(num_chs=dims[0], reduction=8, module='stem_dct')]
reduction = 8
for i, dim in enumerate(dims):
if i > 0:
reduction *= 2
self.feature_info.append(dict(num_chs=dim, reduction=reduction, module=f'stages.{i}'))
# Build drop path rates for all blocks (0 for transformer blocks when transformer_drop_path=False)
total_blocks = sum(depths) if transformer_drop_path else sum(d - t for d, t in zip(depths, transformer_depths))
dp_iter = iter(torch.linspace(0, drop_path_rate, total_blocks).tolist())
dp_rates = []
for depth, t_depth in zip(depths, transformer_depths):
dp_rates += [next(dp_iter) for _ in range(depth - t_depth)]
dp_rates += [next(dp_iter) if transformer_drop_path else 0. for _ in range(t_depth)]
self.stem_dct = LearnableDct2d(8, out_chs=dims[0], **dd)
# Build stages dynamically
dp_iter = iter(dp_rates)
stages = []
for i, (dim, depth, t_depth) in enumerate(zip(dims, depths, transformer_depths)):
layers = (
# Downsample at start of stage (except first stage)
([nn.Conv2d(dims[i - 1], dim, kernel_size=2, stride=2, **dd)] if i > 0 else []) +
# Conv blocks
[Block(dim=dim, drop_path=next(dp_iter), ls_init_value=ls_init_value, **dd) for _ in range(depth - t_depth)] +
# Transformer blocks at end of stage
[TransformerBlock(inp=dim, oup=dim, drop_path=next(dp_iter), ls_init_value=ls_init_value, **dd) for _ in range(t_depth)] +
# Trailing LayerNorm (except last stage)
([LayerNorm2d(dim, eps=1e-6, **dd)] if i < len(depths) - 1 else [])
)
stages.append(nn.Sequential(*layers))
self.stages = nn.Sequential(*stages)
self.head = NormMlpClassifierHead(dims[-1], num_classes, pool_type=global_pool, **dd)
# TODO: skip init when on meta device when safe to do so
self.init_weights(needs_reset=False)
def init_weights(self, needs_reset: bool = True):
self.apply(partial(self._init_weights, needs_reset=needs_reset))
def _init_weights(self, m: nn.Module, needs_reset: bool = True) -> None:
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif needs_reset and hasattr(m, 'reset_parameters'):
m.reset_parameters()
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None:
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head.reset(num_classes, pool_type=global_pool)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True) -> None:
self.grad_checkpointing = enable
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.stem_dct(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
"""Forward pass returning intermediate features.
Args:
x: Input image tensor.
indices: Indices of features to return (0=stem_dct, 1-4=stages). None returns all.
norm: Apply norm layer to final intermediate (unused, for API compat).
stop_early: Stop iterating when last desired intermediate is reached.
output_fmt: Output format, must be 'NCHW'.
intermediates_only: Only return intermediate features.
Returns:
List of intermediate features or tuple of (final features, intermediates).
"""
assert output_fmt == 'NCHW', 'Output format must be NCHW.'
intermediates = []
# 5 feature levels: stem_dct (0) + stages 0-3 (1-4)
take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices)
x = self.stem_dct(x)
if 0 in take_indices:
intermediates.append(x)
if torch.jit.is_scripting() or not stop_early:
stages = self.stages
else:
# max_index is 0-4, stages are 1-4, so we need max_index stages
stages = self.stages[:max_index] if max_index > 0 else []
for feat_idx, stage in enumerate(stages):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(stage, x)
else:
x = stage(x)
if feat_idx + 1 in take_indices: # +1 because stem is index 0
intermediates.append(x)
if intermediates_only:
return intermediates
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
) -> List[int]:
"""Prune layers not required for specified intermediates.
Args:
indices: Indices of intermediate layers to keep (0=stem_dct, 1-4=stages).
prune_norm: Whether to prune the final norm layer.
prune_head: Whether to prune the classifier head.
Returns:
List of indices that were kept.
"""
# 5 feature levels: stem_dct (0) + stages 0-3 (1-4)
take_indices, max_index = feature_take_indices(len(self.stages) + 1, indices)
# max_index is 0-4, stages are 1-4, so we keep max_index stages
self.stages = self.stages[:max_index] if max_index > 0 else nn.Sequential()
if prune_norm:
self.head.norm = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
return self.head(x, pre_logits=pre_logits)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
return self.forward_head(x)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 512, 512), 'pool_size': (8, 8),
'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225),
'interpolation': 'bilinear', 'crop_pct': 1.0,
'classifier': 'head.fc', 'first_conv': [],
**kwargs,
}
default_cfgs = generate_default_cfgs({
'csatv2.r512_in1k': _cfg(
hf_hub_id='timm/',
),
'csatv2_21m.sw_r640_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 640, 640),
interpolation='bicubic',
),
'csatv2_21m.sw_r512_in1k': _cfg(
hf_hub_id='timm/',
pool_size=(10, 10),
interpolation='bicubic',
),
})
def checkpoint_filter_fn(state_dict: dict, model: nn.Module) -> dict:
"""Remap original CSATv2 checkpoint to timm format.
Handles two key structural changes:
1) Stage naming: stages1/2/3/4 -> stages.0/1/2/3
2) Downsample position: moved from end of stage N to start of stage N+1
"""
if "stages.0.0.grn.weight" in state_dict:
return state_dict # already in timm format
import re
# FIXME this downsample idx is wired to the original 'csatv2' model size
downsample_idx = {1: 3, 2: 3, 3: 9} # original stage -> downsample index
dct_re = re.compile(r"^dct\.")
stage_re = re.compile(r"^stages([1-4])\.(\d+)\.(.*)$")
head_re = re.compile(r"^head\.")
norm_re = re.compile(r"^norm\.")
def remap_stage(m: re.Match) -> str:
stage, idx, rest = int(m.group(1)), int(m.group(2)), m.group(3)
if stage in downsample_idx and idx == downsample_idx[stage]:
return f"stages.{stage}.0.{rest}" # move downsample to next stage @0
if stage == 1:
return f"stages.0.{idx}.{rest}" # stage1 -> stages.0
return f"stages.{stage - 1}.{idx + 1}.{rest}" # stage2-4 -> stages.1-3, shift +1
out = {}
for k, v in state_dict.items():
# dct -> stem_dct, and Y/Cb/Cr conv names
k = dct_re.sub("stem_dct.", k)
k = (k.replace(".Y_Conv.", ".conv_y.")
.replace(".Cb_Conv.", ".conv_cb.")
.replace(".Cr_Conv.", ".conv_cr."))
# stage remap + downsample relocation
k = stage_re.sub(remap_stage, k)
# GRN: gamma/beta -> weight/bias (reshape)
if "grn.gamma" in k:
k, v = k.replace("grn.gamma", "grn.weight"), v.reshape(-1)
elif "grn.beta" in k:
k, v = k.replace("grn.beta", "grn.bias"), v.reshape(-1)
# FeedForward(nn.Sequential) -> Mlp + norm renames
if ".ff.net.0." in k:
k = k.replace(".ff.net.0.", ".mlp.fc1.")
elif ".ff.net.3." in k:
k = k.replace(".ff.net.3.", ".mlp.fc2.")
elif ".ff_norm." in k:
k = k.replace(".ff_norm.", ".norm2.")
elif ".attn_norm." in k:
k = k.replace(".attn_norm.", ".norm1.")
# attention -> attn (handle nested first)
if ".attention.attention." in k:
k = (k.replace(".attention.attention.attn.to_qkv.", ".attn.attn.qkv.")
.replace(".attention.attention.attn.", ".attn.attn.")
.replace(".attention.attention.", ".attn.attn."))
elif ".attention." in k:
k = k.replace(".attention.", ".attn.")
# TransformerBlock attention name remaps
if ".attn.to_qkv." in k:
k = k.replace(".attn.to_qkv.", ".attn.qkv.")
elif ".attn.to_out.0." in k:
k = k.replace(".attn.to_out.0.", ".attn.proj.")
# .attn.pos_embed -> .pos_embed (but not SpatialTransformerBlock's .attn.attn.pos_embed)
if ".attn.pos_embed." in k and ".attn.attn." not in k:
k = k.replace(".attn.pos_embed.", ".pos_embed.")
# head -> head.fc, norm -> head.norm (order matters)
k = head_re.sub("head.fc.", k)
k = norm_re.sub("head.norm.", k)
out[k] = v
return out
def _create_csatv2(variant: str, pretrained: bool = False, **kwargs) -> CSATv2:
out_indices = kwargs.pop('out_indices', (1, 2, 3, 4))
return build_model_with_cfg(
CSATv2,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, flatten_sequential=True),
default_cfg=default_cfgs[variant],
**kwargs,
)
@register_model
def csatv2(pretrained: bool = False, **kwargs) -> CSATv2:
return _create_csatv2('csatv2', pretrained, **kwargs)
@register_model
def csatv2_21m(pretrained: bool = False, **kwargs) -> CSATv2:
# experimental ~20-21M param larger model to validate flexible arch spec
model_args = dict(
dims = (48, 96, 224, 448),
depths = (3, 3, 10, 8),
transformer_depths = (0, 0, 4, 3)
)
return _create_csatv2('csatv2_21m', pretrained, **dict(model_args, **kwargs)) | {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/models/csatv2.py",
"license": "Apache License 2.0",
"lines": 732,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:timm/layers/diff_attention.py | """Differential Attention
Paper: 'Differential Transformer' - https://arxiv.org/abs/2410.05258
Reference impl: https://github.com/microsoft/unilm/tree/master/Diff-Transformer
Hacked together by / Copyright 2024, Ross Wightman
"""
import math
from typing import Optional, Type
import torch
import torch.nn as nn
import torch.nn.functional as F
from .attention import maybe_add_mask
from .config import use_fused_attn
from .norm import RmsNorm
class DiffAttention(nn.Module):
"""Differential Attention module.
Computes attention as the difference between two softmax attention maps, which helps
cancel out noise and promotes sparse attention patterns. The module splits Q and K
into two groups, computes separate attention maps, and subtracts one from the other
scaled by a learnable lambda parameter.
The attention output is computed as:
Attn = softmax(Q1 @ K1^T) - lambda * softmax(Q2 @ K2^T)
Output = Attn @ V
Supports both fused (scaled_dot_product_attention) and manual implementations.
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
qk_norm: bool = False,
scale_norm: bool = False,
proj_bias: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
norm_layer: Optional[Type[nn.Module]] = None,
depth: int = 0,
dual_lambda: bool = False,
device=None,
dtype=None,
) -> None:
"""Initialize the DiffAttention module.
Args:
dim: Input dimension of the token embeddings.
num_heads: Number of attention heads.
qkv_bias: Whether to use bias in the query, key, value projections.
qk_norm: Whether to apply normalization to query and key vectors.
scale_norm: Whether to apply normalization before the output projection.
proj_bias: Whether to use bias in the output projection.
attn_drop: Dropout rate applied to the attention weights.
proj_drop: Dropout rate applied after the output projection.
norm_layer: Normalization layer constructor (defaults to RmsNorm).
depth: Block depth index, used to compute depth-dependent lambda_init.
dual_lambda: If True, use simplified dual scalar lambda parameterization
(2 params). If False, use the paper's original formulation with
lambda_q/k vectors (4 * head_dim params).
"""
super().__init__()
dd = {'device': device, 'dtype': dtype}
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
if norm_layer is None:
norm_layer = RmsNorm
self.num_heads = num_heads
self.head_dim = dim // num_heads // 2
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias, **dd)
self.q_norm = norm_layer(self.head_dim, **dd) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim, **dd) if qk_norm else nn.Identity()
self.attn_drop = nn.Dropout(attn_drop)
self.attn_drop_p = attn_drop
self.norm = norm_layer(dim, **dd) if scale_norm else nn.Identity()
self.proj = nn.Linear(dim, dim, bias=proj_bias, **dd)
self.proj_drop = nn.Dropout(proj_drop)
self.dual_lambda = dual_lambda
if dual_lambda:
self.lambda_a = nn.Parameter(torch.empty((), dtype=torch.float32, device=device))
self.lambda_b = nn.Parameter(torch.empty((), dtype=torch.float32, device=device))
self.lambda_q1 = self.lambda_k1 = self.lambda_q2 = self.lambda_k2 = None
else:
self.lambda_a = self.lambda_b = None
self.lambda_q1 = nn.Parameter(torch.empty(self.head_dim, dtype=torch.float32, device=device))
self.lambda_k1 = nn.Parameter(torch.empty(self.head_dim, dtype=torch.float32, device=device))
self.lambda_q2 = nn.Parameter(torch.empty(self.head_dim, dtype=torch.float32, device=device))
self.lambda_k2 = nn.Parameter(torch.empty(self.head_dim, dtype=torch.float32, device=device))
self.sub_norm = RmsNorm(2 * self.head_dim, eps=1e-5, **dd)
self.lambda_init = 0.8
self.set_lambda_init(depth)
self.reset_parameters()
def set_lambda_init(self, depth: int):
self.lambda_init = 0.8 - 0.6 * math.exp(-0.3 * depth)
def reset_parameters(self):
if self.dual_lambda:
nn.init.zeros_(self.lambda_a)
nn.init.zeros_(self.lambda_b)
else:
nn.init.normal_(self.lambda_q1, mean=0, std=0.1)
nn.init.normal_(self.lambda_k1, mean=0, std=0.1)
nn.init.normal_(self.lambda_q2, mean=0, std=0.1)
nn.init.normal_(self.lambda_k2, mean=0, std=0.1)
def _compute_lambda(self) -> torch.Tensor:
if self.lambda_a is not None:
lambda_1 = torch.exp(self.lambda_a)
lambda_2 = torch.exp(self.lambda_b)
else:
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1).float())
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1).float())
return lambda_1 - lambda_2 + self.lambda_init
def forward(
self,
x: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
B, N, C = x.shape
q, k, v = self.qkv(x).chunk(3, dim=2)
q = q.reshape(B, N, 2 * self.num_heads, self.head_dim).transpose(1, 2)
k = k.reshape(B, N, 2 * self.num_heads, self.head_dim).transpose(1, 2)
v = v.reshape(B, N, self.num_heads, 2 * self.head_dim).transpose(1, 2)
q, k = self.q_norm(q), self.k_norm(k)
lambda_full = self._compute_lambda().type_as(q)
if self.fused_attn:
q = q.reshape(B, self.num_heads, 2, N, self.head_dim)
k = k.reshape(B, self.num_heads, 2, N, self.head_dim)
q1, q2 = q.unbind(2)
k1, k2 = k.unbind(2)
dropout_p = self.attn_drop_p if self.training else 0.0
attn1 = F.scaled_dot_product_attention(q1, k1, v, attn_mask=attn_mask, dropout_p=dropout_p)
attn2 = F.scaled_dot_product_attention(q2, k2, v, attn_mask=attn_mask, dropout_p=dropout_p)
x = attn1 - lambda_full * attn2
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = maybe_add_mask(attn, attn_mask)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
attn = attn.view(B, self.num_heads, 2, N, N)
attn = attn[:, :, 0] - lambda_full * attn[:, :, 1]
x = attn @ v
x = self.sub_norm(x)
x = x * (1 - self.lambda_init)
x = x.transpose(1, 2).reshape(B, N, C)
x = self.norm(x)
x = self.proj(x)
x = self.proj_drop(x)
return x
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/layers/diff_attention.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/pytorch-image-models:tests/test_layers_pool.py | """Tests for timm pooling layers."""
import pytest
import torch
import torch.nn as nn
import importlib
import os
torch_backend = os.environ.get('TORCH_BACKEND')
if torch_backend is not None:
importlib.import_module(torch_backend)
torch_device = os.environ.get('TORCH_DEVICE', 'cpu')
# Adaptive Avg/Max Pooling Tests
class TestAdaptiveAvgMaxPool:
"""Test adaptive_avgmax_pool module."""
def test_adaptive_avgmax_pool2d(self):
from timm.layers import adaptive_avgmax_pool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
out = adaptive_avgmax_pool2d(x, 1)
assert out.shape == (2, 64, 1, 1)
# Should be average of avg and max
expected = 0.5 * (x.mean(dim=(2, 3), keepdim=True) + x.amax(dim=(2, 3), keepdim=True))
assert torch.allclose(out, expected)
def test_select_adaptive_pool2d(self):
from timm.layers import select_adaptive_pool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
out_avg = select_adaptive_pool2d(x, pool_type='avg', output_size=1)
assert out_avg.shape == (2, 64, 1, 1)
assert torch.allclose(out_avg, x.mean(dim=(2, 3), keepdim=True))
out_max = select_adaptive_pool2d(x, pool_type='max', output_size=1)
assert out_max.shape == (2, 64, 1, 1)
assert torch.allclose(out_max, x.amax(dim=(2, 3), keepdim=True))
def test_adaptive_avgmax_pool2d_module(self):
from timm.layers import AdaptiveAvgMaxPool2d
x = torch.randn(2, 64, 14, 14, device=torch_device)
pool = AdaptiveAvgMaxPool2d(output_size=1).to(torch_device)
out = pool(x)
assert out.shape == (2, 64, 1, 1)
def test_select_adaptive_pool2d_module(self):
from timm.layers import SelectAdaptivePool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
for pool_type in ['avg', 'max', 'avgmax', 'catavgmax']:
pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True).to(torch_device)
out = pool(x)
if pool_type == 'catavgmax':
assert out.shape == (2, 128) # concatenated
else:
assert out.shape == (2, 64)
def test_select_adaptive_pool2d_fast(self):
from timm.layers import SelectAdaptivePool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
for pool_type in ['fast', 'fastavg', 'fastmax', 'fastavgmax', 'fastcatavgmax']:
pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True).to(torch_device)
out = pool(x)
if 'cat' in pool_type:
assert out.shape == (2, 128)
else:
assert out.shape == (2, 64)
# Attention Pool Tests
class TestAttentionPool:
"""Test attention-based pooling layers."""
def test_attention_pool_latent_basic(self):
from timm.layers import AttentionPoolLatent
x = torch.randn(2, 49, 64, device=torch_device)
pool = AttentionPoolLatent(in_features=64, num_heads=4).to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
def test_attention_pool_latent_multi_latent(self):
from timm.layers import AttentionPoolLatent
x = torch.randn(2, 49, 64, device=torch_device)
pool = AttentionPoolLatent(
in_features=64,
num_heads=4,
latent_len=4,
pool_type='avg',
).to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
def test_attention_pool2d_basic(self):
from timm.layers import AttentionPool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
pool = AttentionPool2d(in_features=64, feat_size=7).to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
def test_attention_pool2d_different_feat_size(self):
from timm.layers import AttentionPool2d
# Test with different spatial sizes (requires pos_embed interpolation)
pool = AttentionPool2d(in_features=64, feat_size=7).to(torch_device)
for size in [7, 14]:
x = torch.randn(2, 64, size, size, device=torch_device)
out = pool(x)
assert out.shape == (2, 64)
def test_rot_attention_pool2d_basic(self):
from timm.layers import RotAttentionPool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
pool = RotAttentionPool2d(in_features=64, ref_feat_size=7).to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
def test_rot_attention_pool2d_different_sizes(self):
from timm.layers import RotAttentionPool2d
pool = RotAttentionPool2d(in_features=64, ref_feat_size=7).to(torch_device)
for size in [7, 14, 10]:
x = torch.randn(2, 64, size, size, device=torch_device)
out = pool(x)
assert out.shape == (2, 64)
def test_rot_attention_pool2d_rope_types(self):
from timm.layers import RotAttentionPool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
for rope_type in ['base', 'cat', 'dinov3']:
pool = RotAttentionPool2d(
in_features=64,
ref_feat_size=7,
rope_type=rope_type,
).to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
# LSE Pool Tests
class TestLsePool:
"""Test LogSumExp pooling layers."""
def test_lse_plus_2d_basic(self):
from timm.layers import LsePlus2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
pool = LsePlus2d().to(torch_device)
out = pool(x)
# Default is flatten=True
assert out.shape == (2, 64)
def test_lse_plus_2d_no_flatten(self):
from timm.layers import LsePlus2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
pool = LsePlus2d(flatten=False).to(torch_device)
out = pool(x)
assert out.shape == (2, 64, 1, 1)
def test_lse_plus_1d_basic(self):
from timm.layers import LsePlus1d
x = torch.randn(2, 49, 64, device=torch_device)
pool = LsePlus1d().to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
def test_lse_high_r_approximates_max(self):
from timm.layers import LsePlus2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
pool = LsePlus2d(r=100.0, r_learnable=False).to(torch_device)
out = pool(x)
out_max = x.amax(dim=(2, 3))
assert torch.allclose(out, out_max, atol=0.1)
def test_lse_low_r_approximates_avg(self):
from timm.layers import LsePlus2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
pool = LsePlus2d(r=0.01, r_learnable=False).to(torch_device)
out = pool(x)
out_avg = x.mean(dim=(2, 3))
assert torch.allclose(out, out_avg, atol=0.1)
def test_lse_learnable_r_gradient(self):
from timm.layers import LsePlus2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
pool = LsePlus2d(r=10.0, r_learnable=True).to(torch_device)
out = pool(x).sum()
out.backward()
assert pool.r.grad is not None
assert pool.r.grad.abs() > 0
# SimPool Tests
class TestSimPool:
"""Test SimPool attention-based pooling layers."""
def test_simpool_2d_basic(self):
from timm.layers import SimPool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
pool = SimPool2d(dim=64).to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
def test_simpool_1d_basic(self):
from timm.layers import SimPool1d
x = torch.randn(2, 49, 64, device=torch_device)
pool = SimPool1d(dim=64).to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
def test_simpool_multi_head(self):
from timm.layers import SimPool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
for num_heads in [1, 2, 4, 8]:
pool = SimPool2d(dim=64, num_heads=num_heads).to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
def test_simpool_with_gamma(self):
from timm.layers import SimPool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
pool = SimPool2d(dim=64, gamma=2.0).to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
assert not torch.isnan(out).any()
def test_simpool_qk_norm(self):
from timm.layers import SimPool2d
x = torch.randn(2, 64, 7, 7, device=torch_device)
pool = SimPool2d(dim=64, qk_norm=True).to(torch_device)
out = pool(x)
assert out.shape == (2, 64)
# Common Tests (Gradient, JIT, dtype)
class TestPoolingCommon:
"""Common tests across all pooling layers."""
@pytest.mark.parametrize('pool_cls,kwargs,input_shape', [
('LsePlus2d', {}, (2, 64, 7, 7)),
('LsePlus1d', {}, (2, 49, 64)),
('SimPool2d', {'dim': 64}, (2, 64, 7, 7)),
('SimPool1d', {'dim': 64}, (2, 49, 64)),
('SelectAdaptivePool2d', {'pool_type': 'avg', 'flatten': True}, (2, 64, 7, 7)),
('AttentionPoolLatent', {'in_features': 64, 'num_heads': 4}, (2, 49, 64)),
('AttentionPool2d', {'in_features': 64, 'feat_size': 7}, (2, 64, 7, 7)),
('RotAttentionPool2d', {'in_features': 64, 'ref_feat_size': 7}, (2, 64, 7, 7)),
])
def test_gradient_flow(self, pool_cls, kwargs, input_shape):
import timm.layers as layers
x = torch.randn(*input_shape, device=torch_device, requires_grad=True)
pool = getattr(layers, pool_cls)(**kwargs).to(torch_device)
out = pool(x)
loss = out.sum()
loss.backward()
assert x.grad is not None
assert x.grad.abs().sum() > 0
@pytest.mark.parametrize('pool_cls,kwargs,input_shape', [
('LsePlus2d', {}, (2, 64, 7, 7)),
('LsePlus1d', {}, (2, 49, 64)),
('SimPool2d', {'dim': 64}, (2, 64, 7, 7)),
('SimPool1d', {'dim': 64}, (2, 49, 64)),
('AttentionPool2d', {'in_features': 64, 'feat_size': 7}, (2, 64, 7, 7)),
('RotAttentionPool2d', {'in_features': 64, 'ref_feat_size': 7}, (2, 64, 7, 7)),
])
def test_torchscript(self, pool_cls, kwargs, input_shape):
import timm.layers as layers
x = torch.randn(*input_shape, device=torch_device)
pool = getattr(layers, pool_cls)(**kwargs).to(torch_device)
pool.eval()
scripted = torch.jit.script(pool)
out_orig = pool(x)
out_script = scripted(x)
assert torch.allclose(out_orig, out_script, atol=1e-5)
@pytest.mark.parametrize('pool_cls,kwargs,input_shape', [
('LsePlus2d', {}, (2, 64, 7, 7)),
('LsePlus1d', {}, (2, 49, 64)),
('SimPool2d', {'dim': 64}, (2, 64, 7, 7)),
('SimPool1d', {'dim': 64}, (2, 49, 64)),
('AttentionPool2d', {'in_features': 64, 'feat_size': 7}, (2, 64, 7, 7)),
('RotAttentionPool2d', {'in_features': 64, 'ref_feat_size': 7}, (2, 64, 7, 7)),
])
def test_eval_deterministic(self, pool_cls, kwargs, input_shape):
import timm.layers as layers
x = torch.randn(*input_shape, device=torch_device)
pool = getattr(layers, pool_cls)(**kwargs).to(torch_device)
pool.eval()
with torch.no_grad():
out1 = pool(x)
out2 = pool(x)
assert torch.allclose(out1, out2)
@pytest.mark.parametrize('pool_cls,kwargs,input_shape', [
('LsePlus2d', {}, (2, 64, 7, 7)),
('SimPool2d', {'dim': 64}, (2, 64, 7, 7)),
('RotAttentionPool2d', {'in_features': 64, 'ref_feat_size': 7}, (2, 64, 7, 7)),
])
def test_different_spatial_sizes(self, pool_cls, kwargs, input_shape):
import timm.layers as layers
B, C, _, _ = input_shape
pool = getattr(layers, pool_cls)(**kwargs).to(torch_device)
for H, W in [(7, 7), (14, 14), (1, 1), (3, 5)]:
x = torch.randn(B, C, H, W, device=torch_device)
out = pool(x)
assert out.shape[0] == B
assert out.shape[-1] == C
# BlurPool Tests
class TestBlurPool:
"""Test BlurPool anti-aliasing layer."""
def test_blur_pool_2d_basic(self):
from timm.layers import BlurPool2d
x = torch.randn(2, 64, 14, 14, device=torch_device)
pool = BlurPool2d(channels=64).to(torch_device)
out = pool(x)
assert out.shape == (2, 64, 7, 7)
def test_blur_pool_2d_stride(self):
from timm.layers import BlurPool2d
x = torch.randn(2, 64, 28, 28, device=torch_device)
pool = BlurPool2d(channels=64, stride=4).to(torch_device)
out = pool(x)
assert out.shape == (2, 64, 8, 8)
# Pool1d Tests
class TestPool1d:
"""Test 1D pooling utilities."""
def test_global_pool_nlc(self):
from timm.layers import global_pool_nlc
x = torch.randn(2, 49, 64, device=torch_device)
# By default, avg/max excludes first token (num_prefix_tokens=1)
out_avg = global_pool_nlc(x, pool_type='avg')
assert out_avg.shape == (2, 64)
assert torch.allclose(out_avg, x[:, 1:].mean(dim=1))
out_max = global_pool_nlc(x, pool_type='max')
assert out_max.shape == (2, 64)
assert torch.allclose(out_max, x[:, 1:].amax(dim=1))
out_first = global_pool_nlc(x, pool_type='token')
assert out_first.shape == (2, 64)
assert torch.allclose(out_first, x[:, 0])
# Test with reduce_include_prefix=True
out_avg_all = global_pool_nlc(x, pool_type='avg', reduce_include_prefix=True)
assert torch.allclose(out_avg_all, x.mean(dim=1))
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "tests/test_layers_pool.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/pytorch-image-models:timm/layers/other_pool.py | """ Non-Local Attention Pooling Layers
A collection of global pooling layers that go beyond simple avg/max pooling.
LSEPool - LogSumExp pooling, a smooth approximation between avg and max pooling
SimPool - Attention-based pooling from 'Keep It SimPool' (ICCV 2023)
Based on implementations from:
* LSE Pooling: custom implementation by Bill Psomas
* SimPool: https://arxiv.org/abs/2309.06891 - 'Keep It SimPool: Who Said Supervised Transformers
Suffer from Attention Deficit?' by Bill Psomas et al.
Hacked together by / Copyright 2024 Ross Wightman, original code by Bill Psomas
"""
from typing import Optional, Type, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from .config import use_fused_attn
class LsePlus2d(nn.Module):
"""LogSumExp (LSE) Pooling for 2D inputs.
A smooth approximation to max pooling that provides a learnable interpolation between
average and max pooling. When r is large, LSE approaches max pooling; when r is small,
it approaches average pooling.
Implements: (1/r) * log((1/n) * sum(exp(r * (x - x_max)))) + x_max
The x_max subtraction provides numerical stability.
"""
def __init__(
self,
r: float = 10.0,
r_learnable: bool = True,
flatten: bool = True,
device=None,
dtype=None,
):
"""
Args:
r: Initial value of the pooling parameter. Higher = closer to max pooling.
r_learnable: If True, r is a learnable parameter.
flatten: If True, flatten spatial dims in output.
"""
super().__init__()
if r_learnable:
self.r = nn.Parameter(torch.tensor(r, device=device, dtype=dtype))
else:
self.register_buffer('r', torch.tensor(r, device=device, dtype=dtype))
self.flatten = flatten
def forward(self, x: torch.Tensor) -> torch.Tensor:
x_max = F.adaptive_max_pool2d(x, 1)
exp_x = torch.exp(self.r * (x - x_max))
sum_exp = exp_x.mean(dim=(2, 3), keepdim=True)
out = x_max + (1.0 / self.r) * torch.log(sum_exp)
if self.flatten:
out = out.flatten(1)
return out
class LsePlus1d(nn.Module):
"""LogSumExp (LSE) Pooling for sequence (NLC) inputs.
A smooth approximation to max pooling that provides a learnable interpolation between
average and max pooling. When r is large, LSE approaches max pooling; when r is small,
it approaches average pooling.
"""
def __init__(
self,
r: float = 10.0,
r_learnable: bool = True,
device=None,
dtype=None,
):
"""
Args:
r: Initial value of the pooling parameter. Higher = closer to max pooling.
r_learnable: If True, r is a learnable parameter.
"""
super().__init__()
if r_learnable:
self.r = nn.Parameter(torch.tensor(r, device=device, dtype=dtype))
else:
self.register_buffer('r', torch.tensor(r, device=device, dtype=dtype))
def forward(self, x: torch.Tensor) -> torch.Tensor:
# x: (B, N, C)
x_max = x.max(dim=1, keepdim=True).values
exp_x = torch.exp(self.r * (x - x_max))
sum_exp = exp_x.mean(dim=1, keepdim=True)
out = x_max + (1.0 / self.r) * torch.log(sum_exp)
return out.squeeze(1) # (B, C)
class SimPool2d(nn.Module):
"""SimPool: Simple Attention-Based Pooling for 2D (NCHW) inputs.
From 'Keep It SimPool: Who Said Supervised Transformers Suffer from Attention Deficit?'
https://arxiv.org/abs/2309.06891
Uses GAP as query initialization and applies cross-attention between the GAP query
and spatial features to produce a weighted pooled representation.
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 1,
qkv_bias: bool = False,
qk_norm: bool = False,
gamma: Optional[float] = None,
norm_layer: Optional[Type[nn.Module]] = None,
device=None,
dtype=None,
):
"""
Args:
dim: Input feature dimension (number of channels).
num_heads: Number of attention heads.
qkv_bias: If True, add bias to query and key projections.
qk_norm: If True, apply normalization to queries and keys.
gamma: If provided, apply power normalization to values with this exponent.
norm_layer: Normalization layer for patches and optionally qk_norm.
flatten: If True, flatten output to (B, C).
"""
super().__init__()
dd = {'device': device, 'dtype': dtype}
assert dim % num_heads == 0, 'dim must be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.gamma = gamma
self.fused_attn = use_fused_attn()
norm_layer = norm_layer or nn.LayerNorm
self.norm = norm_layer(dim, **dd)
self.q = nn.Linear(dim, dim, bias=qkv_bias, **dd)
self.k = nn.Linear(dim, dim, bias=qkv_bias, **dd)
if qk_norm:
self.q_norm = norm_layer(self.head_dim, **dd)
self.k_norm = norm_layer(self.head_dim, **dd)
else:
self.q_norm = nn.Identity()
self.k_norm = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, H, W = x.shape
N = H * W
# Reshape to (B, N, C) for attention
x = x.flatten(2).transpose(1, 2) # (B, N, C)
# GAP as query initialization
q = x.mean(dim=1, keepdim=True) # (B, 1, C)
# Normalize patches for keys and values
x_norm = self.norm(x)
# Project query and keys
q = self.q(q).reshape(B, 1, self.num_heads, self.head_dim).transpose(1, 2)
k = self.k(x_norm).reshape(B, N, self.num_heads, self.head_dim).transpose(1, 2)
v = x_norm.reshape(B, N, self.num_heads, self.head_dim).transpose(1, 2)
q, k = self.q_norm(q), self.k_norm(k)
if self.gamma is not None:
# Power normalization on values
v_min = v.amin(dim=-2, keepdim=True)
v_shifted = v - v_min + 1e-6
if self.fused_attn:
attn_out = F.scaled_dot_product_attention(q, k, v_shifted.pow(self.gamma))
else:
attn = (q * self.scale) @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn_out = attn @ v_shifted.pow(self.gamma)
out = attn_out.pow(1.0 / self.gamma)
else:
if self.fused_attn:
out = F.scaled_dot_product_attention(q, k, v)
else:
attn = (q * self.scale) @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
out = attn @ v
# (B, num_heads, 1, head_dim) -> (B, C) or (B, C)
out = out.transpose(1, 2).reshape(B, C)
return out
class SimPool1d(nn.Module):
"""SimPool: Simple Attention-Based Pooling for sequence (NLC) inputs.
From 'Keep It SimPool: Who Said Supervised Transformers Suffer from Attention Deficit?'
https://arxiv.org/abs/2309.06891
Uses GAP as query initialization and applies cross-attention between the GAP query
and sequence tokens to produce a weighted pooled representation.
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 1,
qkv_bias: bool = False,
qk_norm: bool = False,
gamma: Optional[float] = None,
norm_layer: Optional[Type[nn.Module]] = None,
device=None,
dtype=None,
):
"""
Args:
dim: Input feature dimension.
num_heads: Number of attention heads.
qkv_bias: If True, add bias to query and key projections.
qk_norm: If True, apply normalization to queries and keys.
gamma: If provided, apply power normalization to values with this exponent.
norm_layer: Normalization layer for tokens and optionally qk_norm.
"""
super().__init__()
dd = {'device': device, 'dtype': dtype}
assert dim % num_heads == 0, 'dim must be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.gamma = gamma
self.fused_attn = use_fused_attn()
norm_layer = norm_layer or nn.LayerNorm
self.norm = norm_layer(dim, **dd)
self.q = nn.Linear(dim, dim, bias=qkv_bias, **dd)
self.k = nn.Linear(dim, dim, bias=qkv_bias, **dd)
if qk_norm:
self.q_norm = norm_layer(self.head_dim, **dd)
self.k_norm = norm_layer(self.head_dim, **dd)
else:
self.q_norm = nn.Identity()
self.k_norm = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, N, C = x.shape
# GAP as query initialization
q = x.mean(dim=1, keepdim=True) # (B, 1, C)
# Normalize tokens for keys and values
x_norm = self.norm(x)
# Project query and keys
q = self.q(q).reshape(B, 1, self.num_heads, self.head_dim).transpose(1, 2)
k = self.k(x_norm).reshape(B, N, self.num_heads, self.head_dim).transpose(1, 2)
v = x_norm.reshape(B, N, self.num_heads, self.head_dim).transpose(1, 2)
q, k = self.q_norm(q), self.k_norm(k)
if self.gamma is not None:
# Power normalization on values
v_min = v.amin(dim=-2, keepdim=True)
v_shifted = v - v_min + 1e-6
if self.fused_attn:
attn_out = F.scaled_dot_product_attention(q, k, v_shifted.pow(self.gamma))
else:
attn = (q * self.scale) @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn_out = attn @ v_shifted.pow(self.gamma)
out = attn_out.pow(1.0 / self.gamma)
else:
if self.fused_attn:
out = F.scaled_dot_product_attention(q, k, v)
else:
attn = (q * self.scale) @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
out = attn @ v
# (B, num_heads, 1, head_dim) -> (B, C)
out = out.transpose(1, 2).reshape(B, C)
return out
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/layers/other_pool.py",
"license": "Apache License 2.0",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:tests/test_layers_drop.py | """Tests for timm.layers.drop module (DropBlock, DropPath)."""
import torch
import pytest
from timm.layers.drop import drop_block_2d, DropBlock2d, drop_path, DropPath
class TestDropBlock2d:
"""Test drop_block_2d function and DropBlock2d module."""
def test_drop_block_2d_output_shape(self):
"""Test that output shape matches input shape."""
for h, w in [(7, 7), (4, 8), (10, 5), (3, 3)]:
x = torch.ones((2, 3, h, w))
result = drop_block_2d(x, drop_prob=0.1, block_size=3)
assert result.shape == x.shape, f"Shape mismatch for input ({h}, {w})"
def test_drop_block_2d_no_drop_when_prob_zero(self):
"""Test that no dropping occurs when drop_prob=0."""
x = torch.ones((2, 3, 8, 8))
result = drop_block_2d(x, drop_prob=0.0, block_size=3)
assert torch.allclose(result, x)
def test_drop_block_2d_approximate_keep_ratio(self):
"""Test that the drop ratio is approximately correct."""
torch.manual_seed(123)
# Use large batch for statistical stability
x = torch.ones((32, 16, 56, 56))
drop_prob = 0.1
# With scale_by_keep=False, kept values stay at 1.0 and dropped are 0.0
# so we can directly measure the drop ratio
result = drop_block_2d(x, drop_prob=drop_prob, block_size=7, scale_by_keep=False)
total_elements = result.numel()
dropped_elements = (result == 0).sum().item()
actual_drop_ratio = dropped_elements / total_elements
# Allow some tolerance since it's stochastic
assert abs(actual_drop_ratio - drop_prob) < 0.03, \
f"Drop ratio {actual_drop_ratio:.3f} not close to expected {drop_prob}"
def test_drop_block_2d_inplace(self):
"""Test inplace operation."""
x = torch.ones((2, 3, 8, 8))
x_clone = x.clone()
torch.manual_seed(42)
result = drop_block_2d(x_clone, drop_prob=0.3, block_size=3, inplace=True)
assert result is x_clone, "Inplace should return the same tensor"
def test_drop_block_2d_couple_channels_true(self):
"""Test couple_channels=True uses same mask for all channels."""
torch.manual_seed(42)
x = torch.ones((2, 4, 16, 16))
result = drop_block_2d(x, drop_prob=0.3, block_size=5, couple_channels=True)
# With couple_channels=True, all channels should have same drop pattern
for b in range(x.shape[0]):
mask_c0 = (result[b, 0] == 0).float()
for c in range(1, x.shape[1]):
mask_c = (result[b, c] == 0).float()
assert torch.allclose(mask_c0, mask_c), f"Channel {c} has different mask than channel 0"
def test_drop_block_2d_couple_channels_false(self):
"""Test couple_channels=False uses independent mask per channel."""
torch.manual_seed(42)
x = torch.ones((2, 4, 16, 16))
result = drop_block_2d(x, drop_prob=0.3, block_size=5, couple_channels=False)
# With couple_channels=False, channels should have different patterns
# (with high probability for reasonable drop_prob)
mask_c0 = (result[0, 0] == 0).float()
mask_c1 = (result[0, 1] == 0).float()
# They might occasionally be the same by chance, but very unlikely
assert not torch.allclose(mask_c0, mask_c1), "Channels should have independent masks"
def test_drop_block_2d_with_noise(self):
"""Test with_noise option adds gaussian noise to dropped regions."""
torch.manual_seed(42)
x = torch.ones((2, 3, 16, 16))
result = drop_block_2d(x, drop_prob=0.3, block_size=5, with_noise=True)
# With noise, dropped regions should have non-zero values from gaussian noise
# The result should contain values other than the scaled kept values
unique_vals = torch.unique(result)
assert len(unique_vals) > 2, "With noise should produce varied values"
def test_drop_block_2d_even_block_size(self):
"""Test that even block sizes work correctly."""
x = torch.ones((2, 3, 16, 16))
for block_size in [2, 4, 6]:
result = drop_block_2d(x, drop_prob=0.1, block_size=block_size)
assert result.shape == x.shape, f"Shape mismatch for block_size={block_size}"
def test_drop_block_2d_asymmetric_input(self):
"""Test with asymmetric H != W inputs."""
for h, w in [(8, 16), (16, 8), (7, 14), (14, 7)]:
x = torch.ones((2, 3, h, w))
result = drop_block_2d(x, drop_prob=0.1, block_size=5)
assert result.shape == x.shape, f"Shape mismatch for ({h}, {w})"
def test_drop_block_2d_scale_by_keep(self):
"""Test scale_by_keep parameter."""
torch.manual_seed(42)
x = torch.ones((2, 3, 16, 16))
# With scale_by_keep=True (default), kept values are scaled up
result_scaled = drop_block_2d(x.clone(), drop_prob=0.3, block_size=5, scale_by_keep=True)
kept_vals_scaled = result_scaled[result_scaled > 0]
# Scaled values should be > 1.0 (scaled up to compensate for drops)
assert kept_vals_scaled.min() > 1.0, "Scaled values should be > 1.0"
# With scale_by_keep=False, kept values stay at original
torch.manual_seed(42)
result_unscaled = drop_block_2d(x.clone(), drop_prob=0.3, block_size=5, scale_by_keep=False)
kept_vals_unscaled = result_unscaled[result_unscaled > 0]
# Unscaled values should be exactly 1.0
assert torch.allclose(kept_vals_unscaled, torch.ones_like(kept_vals_unscaled)), \
"Unscaled values should be 1.0"
class TestDropBlock2dModule:
"""Test DropBlock2d nn.Module."""
def test_deprecated_args_accepted(self):
"""Test that deprecated args (batchwise, fast) are silently accepted."""
# These should not raise
module1 = DropBlock2d(drop_prob=0.1, batchwise=True)
module2 = DropBlock2d(drop_prob=0.1, fast=False)
module3 = DropBlock2d(drop_prob=0.1, batchwise=False, fast=True)
assert module1.drop_prob == 0.1
assert module2.drop_prob == 0.1
assert module3.drop_prob == 0.1
def test_unknown_args_warned(self):
"""Test that unknown kwargs emit a warning."""
with pytest.warns(UserWarning, match="unexpected keyword argument 'unknown_arg'"):
DropBlock2d(drop_prob=0.1, unknown_arg=True)
def test_training_mode(self):
"""Test that dropping only occurs in training mode."""
module = DropBlock2d(drop_prob=0.5, block_size=3)
x = torch.ones((2, 3, 8, 8))
# In eval mode, should return input unchanged
module.eval()
result = module(x)
assert torch.allclose(result, x), "Should not drop in eval mode"
# In train mode, should modify input
module.train()
torch.manual_seed(42)
result = module(x)
assert not torch.allclose(result, x), "Should drop in train mode"
def test_couple_channels_parameter(self):
"""Test couple_channels parameter is passed through."""
x = torch.ones((2, 4, 16, 16))
# couple_channels=True (default)
module_coupled = DropBlock2d(drop_prob=0.3, block_size=5, couple_channels=True)
module_coupled.train()
torch.manual_seed(42)
result_coupled = module_coupled(x)
# All channels should have same pattern
mask_c0 = (result_coupled[0, 0] == 0).float()
mask_c1 = (result_coupled[0, 1] == 0).float()
assert torch.allclose(mask_c0, mask_c1)
# couple_channels=False
module_uncoupled = DropBlock2d(drop_prob=0.3, block_size=5, couple_channels=False)
module_uncoupled.train()
torch.manual_seed(42)
result_uncoupled = module_uncoupled(x)
# Channels should have different patterns
mask_c0 = (result_uncoupled[0, 0] == 0).float()
mask_c1 = (result_uncoupled[0, 1] == 0).float()
assert not torch.allclose(mask_c0, mask_c1)
class TestDropPath:
"""Test drop_path function and DropPath module."""
def test_no_drop_when_prob_zero(self):
"""Test that no dropping occurs when drop_prob=0."""
x = torch.ones((4, 8, 16, 16))
result = drop_path(x, drop_prob=0.0, training=True)
assert torch.allclose(result, x)
def test_no_drop_when_not_training(self):
"""Test that no dropping occurs when not training."""
x = torch.ones((4, 8, 16, 16))
result = drop_path(x, drop_prob=0.5, training=False)
assert torch.allclose(result, x)
def test_drop_path_scaling(self):
"""Test that scale_by_keep properly scales kept paths."""
torch.manual_seed(42)
x = torch.ones((100, 8, 4, 4)) # Large batch for statistical stability
keep_prob = 0.8
drop_prob = 1 - keep_prob
result = drop_path(x, drop_prob=drop_prob, training=True, scale_by_keep=True)
# Kept samples should be scaled by 1/keep_prob = 1.25
kept_mask = (result[:, 0, 0, 0] != 0)
if kept_mask.any():
kept_vals = result[kept_mask, 0, 0, 0]
expected_scale = 1.0 / keep_prob
assert torch.allclose(kept_vals, torch.full_like(kept_vals, expected_scale), atol=1e-5)
def test_drop_path_no_scaling(self):
"""Test that scale_by_keep=False does not scale."""
torch.manual_seed(42)
x = torch.ones((100, 8, 4, 4))
result = drop_path(x, drop_prob=0.2, training=True, scale_by_keep=False)
# Kept samples should remain at 1.0
kept_mask = (result[:, 0, 0, 0] != 0)
if kept_mask.any():
kept_vals = result[kept_mask, 0, 0, 0]
assert torch.allclose(kept_vals, torch.ones_like(kept_vals))
class TestDropPathModule:
"""Test DropPath nn.Module."""
def test_training_mode(self):
"""Test that dropping only occurs in training mode."""
module = DropPath(drop_prob=0.5)
x = torch.ones((32, 8, 4, 4)) # Larger batch for statistical reliability
module.eval()
result = module(x)
assert torch.allclose(result, x), "Should not drop in eval mode"
module.train()
torch.manual_seed(42)
result = module(x)
# With 50% drop prob on 32 samples, very unlikely all survive
# Check that at least one sample has zeros (was dropped)
has_zeros = (result == 0).any()
assert has_zeros, "Should drop some paths in train mode"
def test_extra_repr(self):
"""Test extra_repr for nice printing."""
module = DropPath(drop_prob=0.123)
repr_str = module.extra_repr()
assert "0.123" in repr_str
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "tests/test_layers_drop.py",
"license": "Apache License 2.0",
"lines": 203,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/pytorch-image-models:timm/task/classification.py | """Classification training task."""
import logging
from typing import Callable, Dict, Optional, Union
import torch
import torch.nn as nn
from .task import TrainingTask
_logger = logging.getLogger(__name__)
class ClassificationTask(TrainingTask):
"""Standard supervised classification task.
Simple task that performs a forward pass through the model and computes
the classification loss.
Args:
model: The model to train
criterion: Loss function (e.g., CrossEntropyLoss)
device: Device for task tensors/buffers
dtype: Dtype for task tensors/buffers
verbose: Enable info logging
Example:
>>> task = ClassificationTask(model, nn.CrossEntropyLoss(), device=torch.device('cuda'))
>>> result = task(input, target)
>>> result['loss'].backward()
"""
def __init__(
self,
model: nn.Module,
criterion: Union[nn.Module, Callable],
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
verbose: bool = True,
):
super().__init__(device=device, dtype=dtype, verbose=verbose)
self.model = model
self.criterion = criterion
if self.verbose:
loss_name = getattr(criterion, '__name__', None) or type(criterion).__name__
_logger.info(f"ClassificationTask: criterion={loss_name}")
def prepare_distributed(
self,
device_ids: Optional[list] = None,
**ddp_kwargs
) -> 'ClassificationTask':
"""Prepare task for distributed training.
Wraps the model in DistributedDataParallel (DDP).
Args:
device_ids: List of device IDs for DDP (e.g., [local_rank])
**ddp_kwargs: Additional arguments passed to DistributedDataParallel
Returns:
self (for method chaining)
"""
from torch.nn.parallel import DistributedDataParallel as DDP
self.model = DDP(self.model, device_ids=device_ids, **ddp_kwargs)
return self
def forward(
self,
input: torch.Tensor,
target: torch.Tensor,
) -> Dict[str, torch.Tensor]:
"""Forward pass through model and compute classification loss.
Args:
input: Input tensor [B, C, H, W]
target: Target labels [B]
Returns:
Dictionary containing:
- 'loss': Classification loss
- 'output': Model logits
"""
output = self.model(input)
loss = self.criterion(output, target)
return {
'loss': loss,
'output': output,
}
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/task/classification.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
huggingface/pytorch-image-models:timm/task/distillation.py | """Knowledge distillation training tasks and components."""
import logging
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models import create_model
from timm.utils import unwrap_model
from .task import TrainingTask
_logger = logging.getLogger(__name__)
class DistillationTeacher(nn.Module):
"""Wrapper for a teacher model used in knowledge distillation.
Creates and manages a pre-trained teacher model for knowledge distillation,
handling model creation and normalization differences between teacher and student.
Can be created from:
- A model name string (creates the model internally with pretrained weights)
- An existing nn.Module (wraps it with the necessary interface)
Args:
model_name_or_module: Either a model name string or an nn.Module
num_classes: Number of output classes (required if model_name_or_module is a string)
in_chans: Number of input channels (used if model_name_or_module is a string)
pretrained_path: Optional path to pretrained weights (used if model_name_or_module is a string)
device: Device to place the model on
dtype: Model dtype (uses float32 if None)
"""
def __init__(
self,
model_name_or_module: Union[str, nn.Module],
num_classes: Optional[int] = None,
in_chans: int = 3,
pretrained_path: Optional[str] = None,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
):
super().__init__()
if isinstance(model_name_or_module, str):
_logger.info(f"Creating KD teacher model: '{model_name_or_module}'")
pretrained_kwargs = {'pretrained': True}
if pretrained_path:
pretrained_kwargs['pretrained_cfg_overlay'] = dict(
file=pretrained_path,
num_classes=num_classes,
)
model = create_model(
model_name=model_name_or_module,
num_classes=num_classes,
in_chans=in_chans,
device=device,
dtype=dtype,
**pretrained_kwargs,
)
elif isinstance(model_name_or_module, nn.Module):
model = model_name_or_module
else:
raise TypeError(
f"model_name_or_module must be a string or nn.Module, got {type(model_name_or_module).__name__}"
)
model.eval()
self.model = model
# Get normalization values from pretrained_cfg if available
model_unwrapped = unwrap_model(model)
if hasattr(model_unwrapped, 'pretrained_cfg'):
mean = model_unwrapped.pretrained_cfg.get('mean', (0.485, 0.456, 0.406))
std = model_unwrapped.pretrained_cfg.get('std', (0.229, 0.224, 0.225))
else:
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
mean_kd = torch.tensor(mean, device=device, dtype=dtype).view(1, -1, 1, 1)
std_kd = torch.tensor(std, device=device, dtype=dtype).view(1, -1, 1, 1)
self.register_buffer('mean_kd', mean_kd, persistent=False)
self.register_buffer('std_kd', std_kd, persistent=False)
def forward(
self,
input: torch.Tensor,
return_features: bool = False,
) -> torch.Tensor:
"""Forward pass through teacher model.
Args:
input: Input tensor (should already be normalized for teacher)
return_features: Whether to return pooled pre-logits features instead of logits
Returns:
Logits or pooled pre-logits features depending on return_features flag
"""
if return_features:
if not hasattr(self.model, 'forward_features') or not hasattr(self.model, 'forward_head'):
raise ValueError(
f"Model {self.model.__class__.__name__} does not support feature extraction. "
"Ensure the model has 'forward_features' and 'forward_head' methods."
)
feature_map = self.model.forward_features(input)
return self.model.forward_head(feature_map, pre_logits=True)
else:
return self.model(input)
def normalize_input(
self,
input: torch.Tensor,
student_mean: Optional[torch.Tensor] = None,
student_std: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Normalize input to match teacher's expected normalization.
Args:
input: Input tensor (already normalized for student)
student_mean: Student normalization mean buffer [1, 3, 1, 1]
student_std: Student normalization std buffer [1, 3, 1, 1]
Returns:
Input tensor normalized for the teacher model
"""
if student_mean is None or student_std is None:
return input
if torch.equal(student_mean, self.mean_kd) and torch.equal(student_std, self.std_kd):
return input
return (input * student_std + student_mean - self.mean_kd) / self.std_kd
def _resolve_teacher(
teacher: Union[str, nn.Module, DistillationTeacher],
student_model: nn.Module,
pretrained_path: Optional[str],
device: Optional[torch.device],
dtype: Optional[torch.dtype],
) -> DistillationTeacher:
"""Resolve teacher input to a DistillationTeacher instance.
Args:
teacher: Model name string, nn.Module, or DistillationTeacher
student_model: Student model to infer num_classes/in_chans from
pretrained_path: Optional path to teacher pretrained weights
device: Device for teacher
dtype: Dtype for teacher
Returns:
DistillationTeacher instance
"""
if isinstance(teacher, DistillationTeacher):
return teacher
# Get num_classes and in_chans from student
student_unwrapped = unwrap_model(student_model)
num_classes = student_unwrapped.num_classes
in_chans = student_unwrapped.in_chans
return DistillationTeacher(
model_name_or_module=teacher,
num_classes=num_classes,
in_chans=in_chans,
pretrained_path=pretrained_path,
device=device,
dtype=dtype,
)
class LogitDistillationTask(TrainingTask):
"""Logit-based knowledge distillation task.
Performs distillation by matching student and teacher output logits using
KL divergence with temperature scaling.
Loss weighting supports two modes:
1. Independent weights: loss = task_loss_weight * task_loss + distill_loss_weight * distill_loss
2. Complementary mode: loss = task_loss_weight * task_loss + (1 - task_loss_weight) * distill_loss
(used when only task_loss_weight is specified)
Args:
student_model: Student model to train
teacher_model: Teacher model - can be a model name string, nn.Module, or DistillationTeacher
criterion: Task loss function (default: CrossEntropyLoss)
teacher_pretrained_path: Path to teacher pretrained weights (used when teacher_model is a string)
loss_type: Type of distillation loss (currently only 'kl' supported)
distill_loss_weight: Weight for distillation loss
task_loss_weight: Weight for task loss
temperature: Softmax temperature for distillation (typical values: 1-4)
device: Device for task tensors/buffers
dtype: Dtype for task tensors/buffers
verbose: Enable info logging
Example:
>>> # With model name string (num_classes/in_chans inferred from student)
>>> task = LogitDistillationTask(
... student_model=model, teacher_model='resnet50',
... criterion=nn.CrossEntropyLoss(),
... task_loss_weight=0.3, temperature=4.0,
... device=torch.device('cuda'),
... )
>>> # With raw model
>>> task = LogitDistillationTask(
... student_model=model, teacher_model=my_teacher_model,
... criterion=nn.CrossEntropyLoss(),
... task_loss_weight=0.3, temperature=4.0,
... )
"""
def __init__(
self,
student_model: nn.Module,
teacher_model: Union[str, nn.Module, DistillationTeacher],
criterion: Optional[nn.Module] = None,
teacher_pretrained_path: Optional[str] = None,
loss_type: str = 'kl',
distill_loss_weight: Optional[float] = None,
task_loss_weight: Optional[float] = None,
temperature: float = 1.0,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
verbose: bool = True,
):
super().__init__(device=device, dtype=dtype, verbose=verbose)
# Resolve teacher to DistillationTeacher
teacher = _resolve_teacher(
teacher_model,
student_model,
teacher_pretrained_path,
self.device,
self.dtype,
)
self.student = student_model
self.teacher = teacher
self.criterion = criterion if criterion is not None else nn.CrossEntropyLoss()
self.loss_type = loss_type
self.temperature = temperature
if loss_type != 'kl':
raise ValueError(f"Unsupported loss_type '{loss_type}'. Currently only 'kl' is supported.")
# Register student normalization values as non-persistent buffers
student_unwrapped = unwrap_model(student_model)
student_mean = torch.tensor(
student_unwrapped.pretrained_cfg['mean'],
device=self.device,
dtype=self.dtype,
).view(1, -1, 1, 1)
student_std = torch.tensor(
student_unwrapped.pretrained_cfg['std'],
device=self.device,
dtype=self.dtype,
).view(1, -1, 1, 1)
self.register_buffer('student_mean', student_mean, persistent=False)
self.register_buffer('student_std', student_std, persistent=False)
# Determine weighting mode
if distill_loss_weight is not None:
# Mode 1: distill_weight specified - independent weights (task defaults to 1.0 if not set)
self.distill_loss_weight = distill_loss_weight
self.task_loss_weight = task_loss_weight if task_loss_weight is not None else 1.0
if self.verbose:
_logger.info(
f"LogitDistillationTask: Independent weights - "
f"task_weight={self.task_loss_weight}, distill_weight={distill_loss_weight}"
)
elif task_loss_weight is not None:
# Mode 2: only task_weight specified - complementary mode (distill = 1 - task)
self.task_loss_weight = task_loss_weight
self.distill_loss_weight = 1.0 - task_loss_weight
if self.verbose:
_logger.info(
f"LogitDistillationTask: Complementary mode - "
f"task_weight={task_loss_weight}, distill_weight={self.distill_loss_weight}"
)
else:
# Mode 3: neither specified - equal weights (both 1.0)
self.distill_loss_weight = 1.0
self.task_loss_weight = 1.0
if self.verbose:
_logger.info(
f"LogitDistillationTask: Default equal weights - "
f"task_weight={self.task_loss_weight}, distill_weight={self.distill_loss_weight}"
)
if self.verbose:
_logger.info(
f"LogitDistillationTask: loss_type={loss_type}, temperature={temperature}"
)
def prepare_distributed(
self,
device_ids: Optional[list] = None,
**ddp_kwargs
) -> 'LogitDistillationTask':
"""Prepare task for distributed training.
Wraps the student model in DistributedDataParallel (DDP) while leaving
the frozen teacher model unwrapped.
Args:
device_ids: List of device IDs for DDP (e.g., [local_rank])
**ddp_kwargs: Additional arguments passed to DistributedDataParallel
Returns:
self (for method chaining)
"""
from torch.nn.parallel import DistributedDataParallel as DDP
for param in self.teacher.parameters():
param.requires_grad = False
self.student = DDP(self.student, device_ids=device_ids, **ddp_kwargs)
return self
def forward(
self,
input: torch.Tensor,
target: torch.Tensor,
) -> Dict[str, torch.Tensor]:
"""Forward pass with logit distillation.
Args:
input: Input tensor [B, C, H, W]
target: Target labels [B]
Returns:
Dictionary containing:
- 'loss': Combined training loss (task + distillation)
- 'output': Student logits (for metrics)
- 'task_loss': Classification loss component
- 'kd_loss': Logit distillation loss component
"""
student_logits = self.student(input)
task_loss = self.criterion(student_logits, target)
with torch.no_grad():
input_kd = self.teacher.normalize_input(input, self.student_mean, self.student_std)
teacher_logits = self.teacher(input_kd.detach(), return_features=False)
prob_s = F.log_softmax(student_logits / self.temperature, dim=-1)
prob_t = F.log_softmax(teacher_logits / self.temperature, dim=-1)
kd_loss = F.kl_div(prob_s, prob_t, reduction='batchmean', log_target=True) * (self.temperature ** 2)
total_loss = self.task_loss_weight * task_loss + self.distill_loss_weight * kd_loss
return {
'loss': total_loss,
'output': student_logits,
'task_loss': task_loss,
'kd_loss': kd_loss,
}
class FeatureDistillationTrainableModule(nn.Module):
"""Trainable module for feature distillation.
Wraps student model and projection layer into a single module where all
trainable forward operations happen inside forward(). This ensures proper
DDP wrapping when the module is used with DistributedDataParallel.
"""
def __init__(
self,
student_model: nn.Module,
projection: Optional[nn.Module] = None,
):
""" Create trainable module wrapper for feature distillation.
Args:
student_model: Student model to train
projection: Optional projection layer (Linear layer or None)
"""
super().__init__()
self.student = student_model
self.projection = projection
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward pass through student and projection.
Args:
input: Input tensor [B, C, H, W]
Returns:
Tuple of (student_logits, student_features) where features are
optionally projected to match teacher dimension.
"""
feature_map = self.student.forward_features(input)
student_logits = self.student.forward_head(feature_map)
student_features = self.student.forward_head(feature_map, pre_logits=True)
if self.projection is not None:
student_features = self.projection(student_features)
return student_logits, student_features
class FeatureDistillationTask(TrainingTask):
"""Feature-based knowledge distillation task.
Performs distillation by matching student and teacher intermediate features
(pooled pre-logits) using MSE loss. Automatically creates a projection layer
if student and teacher feature dimensions differ.
Loss weighting supports two modes:
1. Independent weights: loss = task_loss_weight * task_loss + distill_loss_weight * distill_loss
2. Complementary mode: loss = task_loss_weight * task_loss + (1 - task_loss_weight) * distill_loss
(used when only task_loss_weight is specified)
Args:
student_model: Student model to train
teacher_model: Teacher model - can be a model name string, nn.Module, or DistillationTeacher
criterion: Task loss function (default: CrossEntropyLoss)
teacher_pretrained_path: Path to teacher pretrained weights (used when teacher_model is a string)
distill_loss_weight: Weight for distillation loss
task_loss_weight: Weight for task loss
student_feature_dim: Student pre-logits dimension (auto-detected if None)
teacher_feature_dim: Teacher pre-logits dimension (auto-detected if None)
device: Device for task tensors/buffers
dtype: Dtype for task tensors/buffers
verbose: Enable info logging
Example:
>>> # With model name string (num_classes/in_chans inferred from student)
>>> task = FeatureDistillationTask(
... student_model=model, teacher_model='resnet50',
... criterion=nn.CrossEntropyLoss(),
... distill_loss_weight=5.0, task_loss_weight=1.0,
... device=torch.device('cuda'),
... )
"""
def __init__(
self,
student_model: nn.Module,
teacher_model: Union[str, nn.Module, DistillationTeacher],
criterion: Optional[nn.Module] = None,
teacher_pretrained_path: Optional[str] = None,
distill_loss_weight: Optional[float] = None,
task_loss_weight: Optional[float] = None,
student_feature_dim: Optional[int] = None,
teacher_feature_dim: Optional[int] = None,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
verbose: bool = True,
):
super().__init__(device=device, dtype=dtype, verbose=verbose)
# Resolve teacher to DistillationTeacher
teacher = _resolve_teacher(
teacher_model,
student_model,
teacher_pretrained_path,
self.device,
self.dtype,
)
self.teacher = teacher
self.criterion = criterion if criterion is not None else nn.CrossEntropyLoss()
# Determine weighting mode
if distill_loss_weight is not None:
# Mode 1: distill_weight specified - independent weights (task defaults to 1.0 if not set)
self.distill_loss_weight = distill_loss_weight
self.task_loss_weight = task_loss_weight if task_loss_weight is not None else 1.0
if self.verbose:
_logger.info(
f"FeatureDistillationTask: Independent weights - "
f"task_weight={self.task_loss_weight}, distill_weight={distill_loss_weight}"
)
elif task_loss_weight is not None:
# Mode 2: only task_weight specified - complementary mode (distill = 1 - task)
self.task_loss_weight = task_loss_weight
self.distill_loss_weight = 1.0 - task_loss_weight
if self.verbose:
_logger.info(
f"FeatureDistillationTask: Complementary mode - "
f"task_weight={task_loss_weight}, distill_weight={self.distill_loss_weight}"
)
else:
# Mode 3: neither specified - equal weights (both 1.0)
self.distill_loss_weight = 1.0
self.task_loss_weight = 1.0
if self.verbose:
_logger.info(
f"FeatureDistillationTask: Default equal weights - "
f"task_weight={self.task_loss_weight}, distill_weight={self.distill_loss_weight}"
)
# Auto-detect feature dimensions if not provided
if student_feature_dim is None:
student_feature_dim = self._detect_feature_dim(student_model)
if teacher_feature_dim is None:
teacher_feature_dim = self._detect_feature_dim(teacher.model)
# Create projection layer if dimensions differ
projection = None
if student_feature_dim != teacher_feature_dim:
if self.verbose:
_logger.info(
f"Creating projection layer: {student_feature_dim} -> {teacher_feature_dim}"
)
projection = nn.Linear(student_feature_dim, teacher_feature_dim, device=self.device, dtype=self.dtype)
else:
if self.verbose:
_logger.info("Feature dimensions match, no projection needed")
self.trainable_module = FeatureDistillationTrainableModule(student_model, projection)
# Register student normalization values
student_unwrapped = unwrap_model(student_model)
student_mean = torch.tensor(
student_unwrapped.pretrained_cfg['mean'],
device=self.device,
dtype=self.dtype,
).view(1, -1, 1, 1)
student_std = torch.tensor(
student_unwrapped.pretrained_cfg['std'],
device=self.device,
dtype=self.dtype,
).view(1, -1, 1, 1)
self.register_buffer('student_mean', student_mean, persistent=False)
self.register_buffer('student_std', student_std, persistent=False)
if self.verbose:
_logger.info(
f"FeatureDistillationTask: "
f"student_dim={student_feature_dim}, teacher_dim={teacher_feature_dim}"
)
@staticmethod
def _detect_feature_dim(model: nn.Module) -> int:
"""Auto-detect feature dimension from model."""
model = unwrap_model(model)
if hasattr(model, 'head_hidden_size'):
return model.head_hidden_size
elif hasattr(model, 'num_features'):
return model.num_features
else:
raise ValueError(
"Cannot auto-detect feature dimension. Model must have "
"'head_hidden_size' or 'num_features' attribute, or you must "
"specify student_feature_dim and teacher_feature_dim explicitly."
)
def prepare_distributed(
self,
device_ids: Optional[list] = None,
**ddp_kwargs,
) -> 'FeatureDistillationTask':
"""Prepare task for distributed training.
Wraps the trainable module (student + projection) in DistributedDataParallel
(DDP) while leaving the frozen teacher model unwrapped.
Args:
device_ids: List of device IDs for DDP (e.g., [local_rank])
**ddp_kwargs: Additional arguments passed to DistributedDataParallel
Returns:
self (for method chaining)
"""
from torch.nn.parallel import DistributedDataParallel as DDP
for param in self.teacher.parameters():
param.requires_grad = False
self.trainable_module = DDP(self.trainable_module, device_ids=device_ids, **ddp_kwargs)
return self
def forward(
self,
input: torch.Tensor,
target: torch.Tensor,
) -> Dict[str, torch.Tensor]:
"""Forward pass with feature distillation.
Args:
input: Input tensor [B, C, H, W]
target: Target labels [B]
Returns:
Dictionary containing:
- 'loss': Combined training loss (task + distillation)
- 'output': Student logits (for metrics)
- 'task_loss': Classification loss component
- 'kd_loss': Feature distillation loss component
"""
student_logits, student_features = self.trainable_module(input)
task_loss = self.criterion(student_logits, target)
with torch.no_grad():
input_kd = self.teacher.normalize_input(input, self.student_mean, self.student_std)
teacher_features = self.teacher(input_kd.detach(), return_features=True)
kd_loss = F.mse_loss(student_features, teacher_features)
total_loss = self.task_loss_weight * task_loss + self.distill_loss_weight * kd_loss
return {
'loss': total_loss,
'output': student_logits,
'task_loss': task_loss,
'kd_loss': kd_loss,
}
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/task/distillation.py",
"license": "Apache License 2.0",
"lines": 518,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:timm/task/task.py | """Base training task abstraction.
This module provides the base TrainingTask class that encapsulates a complete
forward pass including loss computation. Tasks return a dictionary with loss
components and outputs for logging.
"""
from typing import Dict, Optional
import torch
import torch.nn as nn
class TrainingTask(nn.Module):
"""Base class for training tasks.
A training task encapsulates a complete forward pass including loss computation.
Tasks return a dictionary containing the training loss and other components for logging.
The returned dictionary must contain:
- 'loss': The training loss for backward pass (required)
- 'output': Model output/logits for metric computation (recommended)
- Other task-specific loss components for logging (optional)
Args:
device: Device for task tensors/buffers (defaults to cpu)
dtype: Dtype for task tensors/buffers (defaults to torch default)
verbose: Enable info logging
Example:
>>> task = SomeTask(model, criterion, device=torch.device('cuda'))
>>>
>>> # Prepare for distributed training (if needed)
>>> if distributed:
>>> task.prepare_distributed(device_ids=[local_rank])
>>>
>>> # Training loop
>>> result = task(input, target)
>>> result['loss'].backward()
"""
def __init__(
self,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
verbose: bool = True,
):
super().__init__()
self.device = device if device is not None else torch.device('cpu')
self.dtype = dtype if dtype is not None else torch.get_default_dtype()
self.verbose = verbose
def to(self, *args, **kwargs):
"""Move task to device/dtype, keeping self.device and self.dtype in sync."""
dummy = torch.empty(0).to(*args, **kwargs)
self.device = dummy.device
self.dtype = dummy.dtype
return super().to(*args, **kwargs)
def prepare_distributed(
self,
device_ids: Optional[list] = None,
**ddp_kwargs
) -> 'TrainingTask':
"""Prepare task for distributed training.
This method wraps trainable components in DistributedDataParallel (DDP)
while leaving non-trainable components (like frozen teacher models) unwrapped.
Should be called after task initialization but before training loop.
Args:
device_ids: List of device IDs for DDP (e.g., [local_rank])
**ddp_kwargs: Additional arguments passed to DistributedDataParallel
Returns:
self (for method chaining)
Example:
>>> task = LogitDistillationTask(student, teacher, criterion)
>>> task.prepare_distributed(device_ids=[args.local_rank])
>>> task = torch.compile(task) # Compile after DDP
"""
# Default implementation - subclasses override if they need DDP
return self
def forward(
self,
input: torch.Tensor,
target: torch.Tensor,
) -> Dict[str, torch.Tensor]:
"""Perform forward pass and compute loss.
Args:
input: Input tensor [B, C, H, W]
target: Target labels [B]
Returns:
Dictionary with at least 'loss' key containing the training loss
"""
raise NotImplementedError
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/task/task.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
huggingface/pytorch-image-models:timm/layers/coord_attn.py | """ Coordinate Attention and Variants
Coordinate Attention decomposes channel attention into two 1D feature encoding processes
to capture long-range dependencies with precise positional information. This module includes
the original implementation along with simplified and other variants.
Papers / References:
- Coordinate Attention: `Coordinate Attention for Efficient Mobile Network Design` - https://arxiv.org/abs/2103.02907
- Efficient Local Attention: `Rethinking Local Perception in Lightweight Vision Transformer` - https://arxiv.org/abs/2403.01123
Hacked together by / Copyright 2025 Ross Wightman
"""
from typing import Optional, Type, Union
import torch
from torch import nn
from .create_act import create_act_layer
from .helpers import make_divisible
from .norm import GroupNorm1
class CoordAttn(nn.Module):
def __init__(
self,
channels: int,
rd_ratio: float = 1. / 16,
rd_channels: Optional[int] = None,
rd_divisor: int = 8,
se_factor: float = 2/3,
bias: bool = False,
act_layer: Type[nn.Module] = nn.Hardswish,
norm_layer: Optional[Type[nn.Module]] = nn.BatchNorm2d,
gate_layer: Union[str, Type[nn.Module]] = 'sigmoid',
has_skip: bool = False,
device=None,
dtype=None,
):
"""Coordinate Attention module for spatial feature recalibration.
Introduced in "Coordinate Attention for Efficient Mobile Network Design" (CVPR 2021).
Decomposes channel attention into two 1D feature encoding processes along the height and
width axes to capture long-range dependencies with precise positional information.
Args:
channels: Number of input channels.
rd_ratio: Reduction ratio for bottleneck channel calculation.
rd_channels: Explicit number of bottleneck channels, overrides rd_ratio if set.
rd_divisor: Divisor for making bottleneck channels divisible.
se_factor: Applied to rd_ratio for final channel count (keeps params similar to SE).
bias: Whether to use bias in convolution layers.
act_layer: Activation module class for bottleneck.
norm_layer: Normalization module class, None for no normalization.
gate_layer: Gate activation, either 'sigmoid', 'hardsigmoid', or a module class.
has_skip: Whether to add residual skip connection to output.
device: Device to place tensors on.
dtype: Data type for tensors.
"""
dd = {'device': device, 'dtype': dtype}
super().__init__()
self.has_skip = has_skip
if not rd_channels:
rd_channels = make_divisible(channels * rd_ratio * se_factor, rd_divisor, round_limit=0.)
self.conv1 = nn.Conv2d(channels, rd_channels, kernel_size=1, stride=1, padding=0, bias=bias, **dd)
self.bn1 = norm_layer(rd_channels, **dd) if norm_layer is not None else nn.Identity()
self.act = act_layer()
self.conv_h = nn.Conv2d(rd_channels, channels, kernel_size=1, stride=1, padding=0, bias=bias, **dd)
self.conv_w = nn.Conv2d(rd_channels, channels, kernel_size=1, stride=1, padding=0, bias=bias, **dd)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
identity = x
N, C, H, W = x.size()
# Strip pooling
x_h = x.mean(3, keepdim=True)
x_w = x.mean(2, keepdim=True)
x_w = x_w.transpose(-1, -2)
y = torch.cat([x_h, x_w], dim=2)
y = self.conv1(y)
y = self.bn1(y)
y = self.act(y)
x_h, x_w = torch.split(y, [H, W], dim=2)
x_w = x_w.transpose(-1, -2)
a_h = self.gate(self.conv_h(x_h))
a_w = self.gate(self.conv_w(x_w))
out = identity * a_w * a_h
if self.has_skip:
out = out + identity
return out
class SimpleCoordAttn(nn.Module):
"""Simplified Coordinate Attention variant.
Uses
* linear layers instead of convolutions
* no norm
* additive pre-gating re-combination
for reduced complexity while maintaining the core coordinate attention mechanism
of separate height and width attention.
"""
def __init__(
self,
channels: int,
rd_ratio: float = 0.25,
rd_channels: Optional[int] = None,
rd_divisor: int = 8,
se_factor: float = 2 / 3,
bias: bool = True,
act_layer: Type[nn.Module] = nn.SiLU,
gate_layer: Union[str, Type[nn.Module]] = 'sigmoid',
has_skip: bool = False,
device=None,
dtype=None,
):
"""
Args:
channels: Number of input channels.
rd_ratio: Reduction ratio for bottleneck channel calculation.
rd_channels: Explicit number of bottleneck channels, overrides rd_ratio if set.
rd_divisor: Divisor for making bottleneck channels divisible.
se_factor: Applied to rd_ratio for final channel count (keeps param similar to SE)
bias: Whether to use bias in linear layers.
act_layer: Activation module class for bottleneck.
gate_layer: Gate activation, either 'sigmoid', 'hardsigmoid', or a module class.
has_skip: Whether to add residual skip connection to output.
device: Device to place tensors on.
dtype: Data type for tensors.
"""
dd = {'device': device, 'dtype': dtype}
super().__init__()
self.has_skip = has_skip
if not rd_channels:
rd_channels = make_divisible(channels * rd_ratio * se_factor, rd_divisor, round_limit=0.)
self.fc1 = nn.Linear(channels, rd_channels, bias=bias, **dd)
self.act = act_layer()
self.fc_h = nn.Linear(rd_channels, channels, bias=bias, **dd)
self.fc_w = nn.Linear(rd_channels, channels, bias=bias, **dd)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
identity = x
# Strip pooling
x_h = x.mean(dim=3) # (N, C, H)
x_w = x.mean(dim=2) # (N, C, W)
# Shared bottleneck projection
x_h = self.act(self.fc1(x_h.transpose(1, 2))) # (N, H, rd_c)
x_w = self.act(self.fc1(x_w.transpose(1, 2))) # (N, W, rd_c)
# Separate attention heads
a_h = self.fc_h(x_h).transpose(1, 2).unsqueeze(-1) # (N, C, H, 1)
a_w = self.fc_w(x_w).transpose(1, 2).unsqueeze(-2) # (N, C, 1, W)
out = identity * self.gate(a_h + a_w)
if self.has_skip:
out = out + identity
return out
class EfficientLocalAttn(nn.Module):
"""Efficient Local Attention.
Lightweight alternative to Coordinate Attention that preserves spatial
information without channel reduction. Uses 1D depthwise convolutions
and GroupNorm for better generalization.
Paper: https://arxiv.org/abs/2403.01123
"""
def __init__(
self,
channels: int,
kernel_size: int = 7,
bias: bool = False,
act_layer: Type[nn.Module] = nn.SiLU,
gate_layer: Union[str, Type[nn.Module]] = 'sigmoid',
norm_layer: Optional[Type[nn.Module]] = GroupNorm1,
has_skip: bool = False,
device=None,
dtype=None,
):
"""
Args:
channels: Number of input channels.
kernel_size: Kernel size for 1D depthwise convolutions.
bias: Whether to use bias in convolution layers.
act_layer: Activation module class applied after normalization.
gate_layer: Gate activation, either 'sigmoid', 'hardsigmoid', or a module class.
norm_layer: Normalization module class, None for no normalization.
has_skip: Whether to add residual skip connection to output.
device: Device to place tensors on.
dtype: Data type for tensors.
"""
dd = {'device': device, 'dtype': dtype}
super().__init__()
self.has_skip = has_skip
self.conv_h = nn.Conv2d(
channels, channels,
kernel_size=(kernel_size, 1),
stride=1,
padding=(kernel_size // 2, 0),
groups=channels,
bias=bias,
**dd
)
self.conv_w = nn.Conv2d(
channels, channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, kernel_size // 2),
groups=channels,
bias=bias,
**dd
)
if norm_layer is not None:
self.norm_h = norm_layer(channels, **dd)
self.norm_w = norm_layer(channels, **dd)
else:
self.norm_h = nn.Identity()
self.norm_w = nn.Identity()
self.act = act_layer()
self.gate = create_act_layer(gate_layer)
def forward(self, x):
identity = x
# Strip pooling: (N, C, H, W) -> (N, C, H) and (N, C, W)
x_h = x.mean(dim=3, keepdim=True)
x_w = x.mean(dim=2, keepdim=True)
# 1D conv + norm + act
x_h = self.act(self.norm_h(self.conv_h(x_h))) # (N, C, H, 1)
x_w = self.act(self.norm_w(self.conv_w(x_w))) # (N, C, 1, W)
# Generate attention maps
a_h = self.gate(x_h) # (N, C, H, 1)
a_w = self.gate(x_w) # (N, C, 1, W)
out = identity * a_h * a_w
if self.has_skip:
out = out + identity
return out
class StripAttn(nn.Module):
"""Minimal Strip Attention.
Lightweight spatial attention using strip pooling with optional learned refinement.
"""
def __init__(
self,
channels: int,
use_conv: bool = True,
kernel_size: int = 3,
bias: bool = False,
gate_layer: Union[str, Type[nn.Module]] = 'sigmoid',
has_skip: bool = False,
device=None,
dtype=None,
**_,
):
"""
Args:
channels: Number of input channels.
use_conv: Whether to apply depthwise convolutions for learned spatial refinement.
kernel_size: Kernel size for 1D depthwise convolutions when use_conv is True.
bias: Whether to use bias in convolution layers.
gate_layer: Gate activation, either 'sigmoid', 'hardsigmoid', or a module class.
has_skip: Whether to add residual skip connection to output.
device: Device to place tensors on.
dtype: Data type for tensors.
"""
dd = {'device': device, 'dtype': dtype}
super().__init__()
self.has_skip = has_skip
self.use_conv = use_conv
if use_conv:
self.conv_h = nn.Conv2d(
channels, channels,
kernel_size=(kernel_size, 1),
stride=1,
padding=(kernel_size // 2, 0),
groups=channels,
bias=bias,
**dd
)
self.conv_w = nn.Conv2d(
channels, channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, kernel_size // 2),
groups=channels,
bias=bias,
**dd
)
else:
self.conv_h = nn.Identity()
self.conv_w = nn.Identity()
self.gate = create_act_layer(gate_layer)
def forward(self, x):
identity = x
# Strip pooling
x_h = x.mean(dim=3, keepdim=True) # (N, C, H, 1)
x_w = x.mean(dim=2, keepdim=True) # (N, C, 1, W)
# Optional learned refinement
x_h = self.conv_h(x_h)
x_w = self.conv_w(x_w)
# Combine and gate
a_hw = self.gate(x_h + x_w) # broadcasts to (N, C, H, W)
out = identity * a_hw
if self.has_skip:
out = out + identity
return out
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/layers/coord_attn.py",
"license": "Apache License 2.0",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:timm/optim/muon.py | """ Muon Optimizer
Improved Muon optimizer implementation with flexible handling of high-dimensional tensors.
Combines PyTorch-style structure with options for:
- Batched spatial processing for convolutions in addition to flatten
- Optional spatial normalization
- Selectable coefficient presets
- Automatic fallback to AdamW for 1D / scalar parameters (biases, norms, etc.) and optional fallback via param groups
- AdaMuon (https://arxiv.org/abs/2507.11005)
- mUP eps damping factor (https://arxiv.org/abs/2512.05620v1)
TODO look into mUP LR scaling and independent weight-decay scale
Based on implementation by Keller Jordan, see
- https://github.com/KellerJordan/Muon/blob/master/muon.py
- https://github.com/KellerJordan/modded-nanogpt/blob/master/train_gpt.py
- https://github.com/KellerJordan/modded-nanogpt/blob/master/train_gpt_medium.py
- https://github.com/NoahAmsel/PolarExpress/blob/main/polar_express.py
Hacked together by Ross Wightman
"""
import logging
import numbers
from typing import List, Mapping, Optional, Sequence, Tuple, Union
import torch
try:
from torch.distributed.tensor import DTensor
has_dtensor = True
except ImportError:
has_dtensor = False
from ._types import ParamsT
from .adamw import adamw
from .nadamw import nadamw
_logger = logging.getLogger(__name__)
# Constants from Keller Jordan's Muon
MUON_EPS = 1e-7
DEFAULT_NS_STEPS = 5
_COEFFICIENTS = {
"original": [
# Keller Jordan's Muon https://kellerjordan.github.io/posts/muon/
(3.4445, -4.7750, 2.0315),
],
"quintic": [
# https://leloykun.github.io/ponder/muon-opt-coeffs/#how-do-we-optimize-the-coefficients
# From https://github.com/KellerJordan/modded-nanogpt/blob/master/train_gpt_medium.py#L44
(4.0848, -6.8946, 2.9270),
(3.9505, -6.3029, 2.6377),
(3.7418, -5.5913, 2.3037),
(2.8769, -3.1427, 1.2046),
(2.8366, -3.0525, 1.2012),
],
"polar_express": [
# Polar Express https://arxiv.org/abs/2505.16932
# From https://github.com/NoahAmsel/PolarExpress/tree/main with safety 1e-2
(8.237312490495555, -23.157747414558198, 16.680568411445915),
(4.082441999064835, -2.893047735332586, 0.5252849256975648),
(3.9263479922546582, -2.8547468034765298, 0.5318022422894988),
(3.2982187133085143, -2.424541981026706, 0.48632008358844075),
(2.2970369434552573, -1.63662558125903, 0.4002628455953627),
(1.8763805351440397, -1.2347896577722228, 0.35891887501668385),
(1.8564423485617974, -1.2132449880935525, 0.3568003487825883),
(1.8749994008682747, -1.2499988017229169, 0.3749994008546422),
],
"polar_express_safer": [
# from https://github.com/KellerJordan/modded-nanogpt/blob/master/train_gpt.py
# w/ safety 2e-2
(8.156554524902461, -22.48329292557795, 15.878769915207462),
(4.0429299351667245, -2.808917465908704, 0.5000178451051299),
(3.8916678022926563, -2.7724841532176825, 0.5060648178503389),
(3.285753657755658, -2.3681294933425394, 0.46449024233003117),
(2.3005307116270983, -1.6111665557258408, 0.3833374427545273),
(1.8631210546382593, -1.2042160621002727, 0.3421879560523383),
(1.8382572152247512, -1.1779263289537742, 0.3396513038637379),
(1.8749999923301852, -1.2499999836060613, 0.374999991275876),
],
}
NSCoeff = Union[str, Tuple[float, float, float], List[Tuple[float, float, float]]]
def scale_eps_for_ns(
eps: float,
shape: Tuple[int, ...],
) -> float:
"""Scale epsilon for Newton-Schulz based on matrix dimensions (μP-style).
For μP compatibility, epsilon should scale as eps * sqrt(din/dout) to maintain
consistent damping behavior across different model widths.
Reference: https://arxiv.org/abs/2512.05620
Args:
eps: Base epsilon value
shape: Shape of the matrix (out, in) or (batch, out, in)
Returns:
Scaled epsilon value
"""
# Get din, dout from shape (handle both 2D and 3D batched)
# FIXME TBD paper includes depth in the damping scale, e.g: eps * (din / dout) ** 0.5 / N
dout, din = (shape[-2], shape[-1])
return eps * (din / dout) ** 0.5
def zeropower_via_newtonschulz(
G: torch.Tensor,
steps: int,
coefficients: List[Tuple[float, float, float]],
eps: float = MUON_EPS,
safety_factor: float = 1.0,
dtype: torch.dtype = torch.bfloat16,
scale_eps: bool = False,
) -> torch.Tensor:
"""Newton-Schulz quintic iteration to compute the zeroth power / orthogonalization of gradient.
Supports batched operation over leading dimensions.
See
- https://github.com/KellerJordan/Muon/blob/master/muon.py
- https://github.com/NoahAmsel/PolarExpress/blob/main/polar_express.py
- https://github.com/KellerJordan/modded-nanogpt/blob/master/train_gpt.py
Args:
G: Input gradient tensor of shape (m, n) or (batch, m, n)
steps: Number of Newton-Schulz iterations
coefficients: Coefficients (a, b, c) for the iteration
eps: Numerical stability epsilon for norm
safety_factor: Multiplicative safety factor for norm (1.01 is common safety value in 'polar express' variants)
dtype: Computation dtype
scale_eps: If True, scale epsilon by sqrt(din/dout) for μP compatibility
Returns:
Orthogonalized tensor of same shape as G
"""
assert G.ndim in (2, 3), f"Input must be 2D or 3D, got {G.ndim}D. Flatten batch dims first."
num_cs = len(coefficients)
assert num_cs >= 1 and len(coefficients[0]) == 3
# match coefficients with # of steps, truncate or repeat last
coeff_sequence = coefficients[:steps] if steps <= num_cs else \
coefficients + [coefficients[-1]] * (steps - num_cs)
# Scale epsilon by sqrt(din/dout) for μP compatibility if requested
if scale_eps:
eps = scale_eps_for_ns(eps, G.shape)
X = G.to(dtype=dtype, copy=True)
# Transpose if needed (operate on dimension with fewer elements)
transposed = X.size(-2) > X.size(-1)
if transposed:
X = X.mT
# Normalize spectral norm to at most 1
if scale_eps:
# more of a damping factor in this case, use add instead of clamp
X.div_(X.norm(2, dim=(-2, -1), keepdim=True).mul(safety_factor).add_(eps))
else:
X.div_(X.norm(2, dim=(-2, -1), keepdim=True).mul(safety_factor).clamp_(min=eps))
is_dtensor = has_dtensor and isinstance(G, DTensor)
if is_dtensor:
# Basic, DTensor-friendly Newton-Schulz
for a, b, c in coeff_sequence:
A = X @ X.mT
B = b * A + c * (A @ A)
X = a * X + (B @ X)
else:
# Fast prealloc/out= path
# Batched vs unbatched fused MM
mm_fn = torch.baddbmm if X.ndim > 2 else torch.addmm
# Pre-allocate
X = X.contiguous()
A = torch.empty((*X.shape[:-1], X.size(-2)), device=X.device, dtype=X.dtype)
B = torch.empty_like(A)
C = torch.empty_like(X)
# Perform Newton-Schulz iterations
for a, b, c in coeff_sequence:
mm_fn(A, X, X.mT, beta=0.0, alpha=1.0, out=A) # A = X @ X.mT
mm_fn(A, A, A, beta=b, alpha=c, out=B) # B = b * A + c * A @ A
mm_fn(X, B, X, beta=a, alpha=1.0, out=C) # C = a * X + B @ X
X, C = C, X # swap refs to avoid copy
if transposed:
X = X.mT
return X
def get_lr_scale(
param_shape: torch.Size,
adjust_lr_fn: str = "match_rms_adamw",
) -> float:
"""Adjust learning rate based on parameter shape for Muon.
Args:
param_shape: Shape of the parameter tensor
adjust_lr_fn: Scaling function name
- "original": sqrt(max(1, out/in)) - Original Muon impl
- "match_rms_adamw": 0.2 * sqrt(max(out, in)) - Kimi scaling
- "rms_to_rms": sqrt(out/in) - Scion/Bernstein scaling
"""
out_chs, in_chs = (param_shape[-2], param_shape[-1]) if len(param_shape) > 1 else (1., 1.)
if adjust_lr_fn == "original":
# Original Muon impl (https://kellerjordan.github.io/posts/muon/)
return max(1, out_chs / in_chs) ** 0.5
elif adjust_lr_fn == "match_rms_adamw":
# Kimi (https://arxiv.org/abs/2502.16982)
return 0.2 * max(out_chs, in_chs) ** 0.5
elif adjust_lr_fn == "rms_to_rms":
# Scion (https://arxiv.org/abs/2502.07529, https://github.com/LIONS-EPFL/scion)
# Bernstein et al. (https://jeremybernste.in/writing/deriving-muon)
return (out_chs / in_chs) ** 0.5
else:
assert False, f'Invalid scaling function "{adjust_lr_fn}" for Muon'
def get_adamuon_lr_scale(
param_shape: torch.Size,
adjust_lr_fn: str = "match_rms_adamw",
) -> Tuple[float, bool]:
"""Adjust learning rate based on parameter shape for AdaMuon.
Args:
param_shape: Shape of the parameter tensor
adjust_lr_fn: Scaling function name
Returns:
Tuple of (scale_factor, use_rms_norm)
"""
out_chs, in_chs = (param_shape[-2], param_shape[-1]) if len(param_shape) > 1 else (1., 1.)
if adjust_lr_fn == "match_rms_adamw":
# AdaMuon paper: normalize by RMS, then scale by 0.2 * sqrt(numel)
# https://arxiv.org/abs/2507.11005
return 0.2 * (out_chs * in_chs) ** 0.5, True
elif adjust_lr_fn == "rms_to_rms":
return (out_chs / in_chs) ** 0.5, False
elif adjust_lr_fn == "rsqrt_in":
return in_chs ** -0.5, False
else:
assert False, f'Invalid scaling function "{adjust_lr_fn}" for AdaMuon'
def _is_suitable_for_muon(
param: torch.Tensor,
min_dim_size: int = 4,
max_aspect_ratio: float = 128.,
return_reason: bool = False,
) -> Union[bool, Tuple[bool, str]]:
"""Check if a parameter is suitable for Muon optimization.
Args:
param: Parameter tensor
min_dim_size: Minimum size for non-unit dimensions
max_aspect_ratio: Maximum allowed aspect ratio
return_reason: If True, return (bool, reason_string), else just bool (faster)
Returns:
If return_reason=False: bool indicating suitability
If return_reason=True: Tuple of (is_suitable, reason_string)
Examples:
(64, 128) -> True (or (True, "ok") if return_reason=True)
(96, 3, 4, 4) -> True - will be flattened to (96, 48)
(4, 2048) -> False - extreme aspect ratio
(64,) -> False - insufficient dims
(1, 196, 768) -> False - leading unit dims
NOTE: these rules were created to balance complexity with covering common timm model cases
Please let me know if there are non-optimal cases that you run into.
"""
s = param.shape
# Must have at least 2 non-unit dimensions
if param.ndim < 2 or sum(1 for dim_size in s if dim_size > 1) < 2:
return (False, "insufficient_dims") if return_reason else False
# Unit dimension in first two positions indicates:
# - Position embeddings (1, seq, dim)
# - Depthwise convs (out, 1, h, w)
# - Other degenerate cases possibly not caught by first rule
if s[0] == 1 or s[1] == 1:
return (False, "leading_unit_dims") if return_reason else False
if param.ndim >= 3:
# For 3D+ tensors, check what dimensions will be AFTER flattening
# since that's what gets passed to Newton-Schulz iteration
# Flatten mode: (out, in, *spatial) -> (out, in * spatial_prod)
out_ch = s[0]
in_ch_with_spatial = 1
for d in s[1:]:
in_ch_with_spatial *= d
check_dims = (out_ch, in_ch_with_spatial)
else:
# For 2D tensors, check as-is
check_dims = s
# Both dims should be >= minimum size
min_size = min(check_dims)
if min_size < min_dim_size:
if return_reason:
return False, f"min_dim_too_small:{min_size}"
return False
# Aspect ratio shouldn't be too extreme
max_size = max(check_dims)
aspect_ratio = max_size / min_size
if aspect_ratio > max_aspect_ratio:
if return_reason:
return False, f"extreme_aspect_ratio:{aspect_ratio:.1f}"
return False
return (True, "ok") if return_reason else True
def reshape_for_muon(
tensor: torch.Tensor,
mode: str = "flatten",
) -> Tuple[torch.Tensor, torch.Size]:
"""Reshape high-dimensional tensor for Muon processing.
Args:
tensor: Input tensor of shape (out, in, *spatial)
mode: How to handle spatial dimensions
- "flatten": Flatten spatial into output dimension (out, in*H*W)
- "batched": Batch over spatial positions (spatial_prod, out, in) for per-position orthogonalization
Returns:
Reshaped tensor and original shape for restoration
"""
original_shape = tensor.shape
if tensor.ndim == 2:
return tensor, original_shape
if tensor.ndim < 2:
raise ValueError(f"Tensor must have at least 2 dimensions, got {tensor.ndim}")
out_ch, in_ch = tensor.shape[:2]
if mode == "flatten":
# Flatten: (out, in, *spatial) -> (out, in * spatial_prod)
return tensor.reshape(out_ch, -1), original_shape
elif mode == "batched":
# Batched: (out, in, *spatial) -> (spatial_prod, out, in)
# Move spatial dimension to front so zeropower_via_newtonschulz batches over it
reshaped = tensor.reshape(out_ch, in_ch, -1) # (out, in, spatial_prod)
reshaped = reshaped.permute(2, 0, 1) # (spatial_prod, out, in)
return reshaped, original_shape
else:
raise ValueError(f"Unknown mode: {mode}")
def muon(
params: List[torch.Tensor],
grads: List[torch.Tensor],
momentum_bufs: List[torch.Tensor],
*,
lr: float,
weight_decay: float,
momentum: float,
nesterov: bool,
ns_steps: int,
ns_coefficients: NSCoeff,
eps: float,
safety_factor: float,
adjust_lr_fn: Optional[str],
conv_mode: str,
normalize_spatial: bool,
scale_eps: bool,
) -> None:
"""Functional API that performs Muon algorithm computation."""
_single_tensor_muon(
params,
grads,
momentum_bufs,
lr=lr,
weight_decay=weight_decay,
momentum=momentum,
nesterov=nesterov,
ns_steps=ns_steps,
ns_coefficients=ns_coefficients,
eps=eps,
safety_factor=safety_factor,
adjust_lr_fn=adjust_lr_fn,
conv_mode=conv_mode,
normalize_spatial=normalize_spatial,
scale_eps=scale_eps,
)
def adamuon(
params: List[torch.Tensor],
grads: List[torch.Tensor],
momentum_bufs: List[torch.Tensor],
exp_avg_sqs: List[torch.Tensor],
state_steps: List[torch.Tensor],
*,
lr: float,
weight_decay: float,
momentum: float,
nesterov: bool,
beta2: float,
ns_steps: int,
ns_coefficients: NSCoeff,
eps: float,
safety_factor: float,
adjust_lr_fn: Optional[str],
conv_mode: str,
normalize_spatial: bool,
scale_eps: bool,
) -> None:
"""Functional API that performs AdaMuon algorithm computation.
AdaMuon extends Muon with element-wise second moment estimation applied
to orthogonalized update directions, providing Adam-like adaptive scaling
while preserving Muon's geometric benefits.
Reference: https://arxiv.org/abs/2507.11005
"""
_single_tensor_adamuon(
params,
grads,
momentum_bufs,
exp_avg_sqs,
state_steps,
lr=lr,
weight_decay=weight_decay,
momentum=momentum,
nesterov=nesterov,
beta2=beta2,
ns_steps=ns_steps,
ns_coefficients=ns_coefficients,
eps=eps,
safety_factor=safety_factor,
adjust_lr_fn=adjust_lr_fn,
conv_mode=conv_mode,
normalize_spatial=normalize_spatial,
scale_eps=scale_eps,
)
def _single_tensor_muon(
params: List[torch.Tensor],
grads: List[torch.Tensor],
momentum_bufs: List[torch.Tensor],
*,
lr: float,
weight_decay: float,
momentum: float,
nesterov: bool,
ns_steps: int,
ns_coefficients: NSCoeff,
eps: float,
safety_factor: float,
adjust_lr_fn: Optional[str],
conv_mode: str,
normalize_spatial: bool,
scale_eps: bool,
) -> None:
"""Single tensor Muon update."""
ns_coefficients = resolve_ns_coefficients(ns_coefficients, _COEFFICIENTS)
for i, param in enumerate(params):
grad = grads[i]
momentum_buf = momentum_bufs[i]
# Apply weight decay
param.mul_(1 - lr * weight_decay)
# Update momentum buffer
momentum_buf.lerp_(grad, 1. - momentum)
update = grad.lerp_(momentum_buf, momentum) if nesterov else momentum_buf.clone()
# Reshape for processing (handle 3D+ tensors like conv weights)
if update.ndim >= 3:
update_reshaped, original_shape = reshape_for_muon(update, mode=conv_mode)
else:
update_reshaped = update
original_shape = update.shape
# Apply Newton-Schulz orthogonalization
update_ortho = zeropower_via_newtonschulz(
update_reshaped,
ns_steps,
ns_coefficients,
eps=eps,
safety_factor=safety_factor,
scale_eps=scale_eps,
)
# Adjust learning rate based on parameter shape
if adjust_lr_fn:
scale = get_lr_scale(update_ortho.shape, adjust_lr_fn)
else:
scale = 1.0
# Apply spatial normalization and permute back if in batched mode
if conv_mode == "batched" and update_ortho.ndim >= 3:
if normalize_spatial:
scale *= update_ortho.shape[0] ** -0.5
# Permute back: (spatial_prod, out, in) -> (out, in, spatial_prod)
update_ortho = update_ortho.permute(1, 2, 0)
# Reshape back to original shape
update_ortho = update_ortho.reshape(original_shape)
# Apply update
param.add_(update_ortho, alpha=-lr * scale)
def _single_tensor_adamuon(
params: List[torch.Tensor],
grads: List[torch.Tensor],
momentum_bufs: List[torch.Tensor],
exp_avg_sqs: List[torch.Tensor],
state_steps: List[torch.Tensor],
*,
lr: float,
weight_decay: float,
momentum: float,
nesterov: bool,
beta2: float,
ns_steps: int,
ns_coefficients: NSCoeff,
eps: float,
safety_factor: float,
adjust_lr_fn: Optional[str],
conv_mode: str,
normalize_spatial: bool,
scale_eps: bool,
) -> None:
"""Single tensor AdaMuon update.
AdaMuon applies second-moment estimation to the orthogonalized directions,
then rescales using RMS-alignment to maintain stable step sizes.
Algorithm:
1. Update momentum buffer: M = β₁·M + (1-β₁)·G
2. Orthogonalize: O = Newton-Schulz(M) or Newton-Schulz(nesterov_update)
3. Update second moment: v = β₂·v + (1-β₂)·O²
4. Bias correct: v̂ = v/(1-β₂^t)
5. Adaptive scaling: Ô = O / (√v̂ + ε)
6. RMS-aligned rescaling and apply update
"""
ns_coefficients = resolve_ns_coefficients(ns_coefficients, _COEFFICIENTS)
for i, param in enumerate(params):
grad = grads[i]
momentum_buf = momentum_bufs[i]
exp_avg_sq = exp_avg_sqs[i]
step_t = state_steps[i]
# Increment step
step_t += 1
step = step_t.item()
# Apply weight decay (decoupled)
param.mul_(1 - lr * weight_decay)
# Update momentum buffer
momentum_buf.lerp_(grad, 1. - momentum)
update = grad.lerp_(momentum_buf, momentum) if nesterov else momentum_buf.clone()
# Reshape for processing (handle 3D+ tensors like conv weights)
if update.ndim >= 3:
update_reshaped, original_shape = reshape_for_muon(update, mode=conv_mode)
else:
update_reshaped = update
original_shape = update.shape
# Apply Newton-Schulz orthogonalization
update_ortho = zeropower_via_newtonschulz(
update_reshaped,
ns_steps,
ns_coefficients,
eps=eps,
safety_factor=safety_factor,
scale_eps=scale_eps,
)
# Reshape back to original shape for second moment tracking
if conv_mode == "batched" and update_ortho.ndim >= 3:
# Permute back: (spatial_prod, out, in) -> (out, in, spatial_prod)
update_ortho = update_ortho.permute(1, 2, 0)
update_ortho = update_ortho.reshape(original_shape)
# Update second moment on orthogonalized directions (element-wise)
exp_avg_sq.mul_(beta2).addcmul_(update_ortho, update_ortho, value=1.0 - beta2)
# Get shape-based LR scaling and whether to apply RMS normalization
if adjust_lr_fn:
scale, use_rms_norm = get_adamuon_lr_scale(update_ortho.shape, adjust_lr_fn)
else:
scale, use_rms_norm = 1.0, False
if use_rms_norm:
# Bias correction not needed if scaling by norm
denom = exp_avg_sq.sqrt().add_(eps)
else:
# Bias correction for second moment
bias_correction2 = 1.0 - beta2 ** step
denom = (exp_avg_sq / bias_correction2).sqrt().add_(eps)
# Adaptive scaling: divide by sqrt of bias-corrected second moment
# This is the key AdaMuon modification
update_adaptive = update_ortho / denom
# RMS-aligned rescaling: normalize by update norm, then scale by shape factor
# Used by AdaMuon paper approach (match_rms_adamw), not by μP approach (rms_to_rms)
if use_rms_norm:
# eq(8) in AdaMuon paper, 0.2 / RMS(update) = 0.2 * sqrt(ndim) / frob(update)
update_norm = update_adaptive.norm().add_(eps)
update_adaptive = update_adaptive / update_norm
# Apply spatial normalization if in batched mode
if conv_mode == "batched" and len(original_shape) >= 3:
if normalize_spatial:
spatial_prod = 1
for d in original_shape[2:]:
spatial_prod *= d
scale *= spatial_prod ** -0.5
# Apply update
param.add_(update_adaptive, alpha=-lr * scale)
class Muon(torch.optim.Optimizer):
"""Muon - MomentUm Orthogonalized by Newton-schulz
Combines Muon for 2D+ parameters (weight matrices) with AdamW for 1D parameters (biases, norms) and
parameter groups with 'use_fallback=True' set (or 'use_muon=False' for compatibility).
Supports two algorithms:
- "muon": Standard Muon algorithm with momentum + orthogonalization
- "adamuon": AdaMuon algorithm that adds element-wise second moment estimation
to orthogonalized directions for Adam-like adaptive scaling
"""
def __init__(
self,
params: ParamsT,
lr: float = 0.02,
weight_decay: float = 0,
momentum: float = 0.95,
nesterov: bool = False,
ns_steps: int = DEFAULT_NS_STEPS,
ns_coefficients: NSCoeff = "quintic",
eps: float = MUON_EPS,
safety_factor: float = 1.0,
adjust_lr_fn: Optional[str] = "match_rms_adamw",
conv_mode: str = "flatten",
normalize_spatial: bool = True,
adamw_lr: Optional[float] = None,
betas: Tuple[float, float] = (0.9, 0.95),
algo: str = "muon",
scale_eps: bool = False,
verbose: bool = False,
):
""" Create Muon optimizer.
Args:
params: Iterable of parameters or dicts defining parameter groups
lr: Learning rate (default: 0.02 for Muon parameters)
weight_decay: Weight decay coefficient
momentum: Momentum factor for Muon
nesterov: Whether to use Nesterov momentum
ns_steps: Number of Newton-Schulz iterations
ns_coefficients: Coefficients for NS iteration
eps: Numerical stability epsilon
safety_factor: Multiplicative safety factor for NS norm
adjust_lr_fn: LR adjustment function - "original", "match_rms_adamw", or "rms_to_rms".
For adamuon mode, can set to None to disable (RMS rescaling handles scaling).
conv_mode: How to handle convolutions - "flatten" or "batched"
normalize_spatial: Whether to normalize by sqrt(spatial_size) in batched mode
adamw_lr: Learning rate for AdamW (1D params), defaults to lr if not specified
betas: Beta coefficients - (beta1, beta2) where beta1 is used for AdamW fallback
and beta2 is used for both AdamW fallback and AdaMuon second moment
algo: Algorithm - "muon" for standard Muon, "adamuon" for AdaMuon with
adaptive second moment estimation (https://arxiv.org/abs/2507.11005)
scale_eps: If True, scale epsilon by sqrt(din/dout) in Newton-Schulz for μP
compatibility (https://arxiv.org/abs/2512.05620)
verbose: Log parameter routing decisions (Muon vs AdamW)
Example:
```python
# Simple usage - automatically uses Muon for 2D+ params, AdamW for 1D
optimizer = Muon(model.parameters(), lr=0.02)
# Use AdaMuon algorithm for adaptive scaling
optimizer = Muon(model.parameters(), lr=6e-4, algo="adamuon")
# Manual control over parameter groups
optimizer = Muon([
{'params': weight_matrices, 'lr': 0.02},
{'params': biases, 'use_fallback': True, 'lr': 3e-4}, # use AdamW if use_fallback=True
])
```
"""
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= weight_decay:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
if not 0.0 <= momentum < 1.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if conv_mode not in ["flatten", "batched"]:
raise ValueError(f"Invalid conv_mode: {conv_mode}")
if algo not in ["muon", "adamuon"]:
raise ValueError(f"Invalid algo: {algo}. Must be 'muon' or 'adamuon'")
defaults = dict(
lr=lr,
weight_decay=weight_decay,
momentum=momentum,
nesterov=nesterov,
ns_steps=ns_steps,
ns_coefficients=ns_coefficients,
eps=eps,
safety_factor=safety_factor,
adjust_lr_fn=adjust_lr_fn,
conv_mode=conv_mode,
normalize_spatial=normalize_spatial,
adamw_lr=adamw_lr if adamw_lr is not None else lr,
betas=betas,
algo=algo,
scale_eps=scale_eps,
verbose=verbose,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('algo', 'muon')
group.setdefault('scale_eps', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step."""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
verbose = self.defaults.get("verbose", False)
# Tracking for logging (populated on first encounter of each param)
muon_count = 0
adamw_count = 0
routing_reasons = {} if verbose else None
for group in self.param_groups:
algo = group.get("algo", "muon")
# Separate params into Muon and AdamW groups
muon_params = []
muon_grads = []
muon_momentum_bufs = []
# Additional state for adamuon mode
muon_exp_avg_sqs = []
muon_state_steps = []
adamw_params = []
adamw_grads = []
adamw_exp_avgs = []
adamw_exp_avg_sqs = []
adamw_state_steps = []
for p in group["params"]:
if p.grad is None:
continue
if p.grad.is_sparse:
raise RuntimeError("Muon does not support sparse gradients")
state = self.state[p]
# Determine routing on first encounter (cache in state)
if "use_muon" not in state:
# Check explicit flags first (support both 'use_fallback' and 'use_muon' for compatibility)
reason = None
if group.get("use_fallback", False):
# use_fallback=True means use AdamW (use_muon=False)
state["use_muon"] = False
if verbose:
reason = "use_fallback_flag"
elif "use_muon" in group:
# Explicit use_muon flag for compatibility with other Muon implementations
state["use_muon"] = group["use_muon"]
if verbose:
reason = "use_muon_flag"
else:
# Check shape suitability
if verbose:
suitable, reason = _is_suitable_for_muon(p, return_reason=True)
else:
suitable = _is_suitable_for_muon(p, return_reason=False)
state["use_muon"] = suitable
# Track routing decision for logging
if routing_reasons is not None and reason is not None:
shape_str = "x".join(str(s) for s in p.shape)
if shape_str not in routing_reasons:
routing_reasons[shape_str] = []
routing_reasons[shape_str].append(reason)
# Use cached routing decision
use_muon = state["use_muon"]
if use_muon:
# Collect Muon params
muon_params.append(p)
muon_grads.append(p.grad)
muon_count += 1
# State initialization for Muon/AdaMuon
if "momentum_buffer" not in state:
state["momentum_buffer"] = torch.zeros_like(p, memory_format=torch.preserve_format)
muon_momentum_bufs.append(state["momentum_buffer"])
# Additional state for adamuon mode
if algo == "adamuon":
if "step" not in state:
state["step"] = torch.tensor(0.)
state["exp_avg_sq"] = torch.zeros_like(p, memory_format=torch.preserve_format)
muon_exp_avg_sqs.append(state["exp_avg_sq"])
muon_state_steps.append(state["step"])
else:
# Collect AdamW/NAdamW params
adamw_params.append(p)
adamw_grads.append(p.grad)
adamw_count += 1
# State initialization for AdamW
if "step" not in state:
state["step"] = torch.tensor(0.)
state["exp_avg"] = torch.zeros_like(p, memory_format=torch.preserve_format)
state["exp_avg_sq"] = torch.zeros_like(p, memory_format=torch.preserve_format)
adamw_exp_avgs.append(state["exp_avg"])
adamw_exp_avg_sqs.append(state["exp_avg_sq"])
adamw_state_steps.append(state["step"])
# Apply Muon/AdaMuon updates
if muon_params:
if algo == "adamuon":
_, beta2 = group["betas"]
adamuon(
muon_params,
muon_grads,
muon_momentum_bufs,
muon_exp_avg_sqs,
muon_state_steps,
lr=group["lr"],
weight_decay=group["weight_decay"],
momentum=group["momentum"],
nesterov=group["nesterov"],
beta2=beta2,
ns_steps=group["ns_steps"],
ns_coefficients=group["ns_coefficients"],
eps=group["eps"],
safety_factor=group["safety_factor"],
adjust_lr_fn=group["adjust_lr_fn"],
conv_mode=group["conv_mode"],
normalize_spatial=group["normalize_spatial"],
scale_eps=group["scale_eps"],
)
else:
muon(
muon_params,
muon_grads,
muon_momentum_bufs,
lr=group["lr"],
weight_decay=group["weight_decay"],
momentum=group["momentum"],
nesterov=group["nesterov"],
ns_steps=group["ns_steps"],
ns_coefficients=group["ns_coefficients"],
eps=group["eps"],
safety_factor=group["safety_factor"],
adjust_lr_fn=group["adjust_lr_fn"],
conv_mode=group["conv_mode"],
normalize_spatial=group["normalize_spatial"],
scale_eps=group["scale_eps"],
)
# Apply AdamW updates
if adamw_params:
beta1, beta2 = group["betas"]
if group["nesterov"]:
# use nadamw for fallback optimizer if nesterov is enabled
nadamw(
adamw_params,
adamw_grads,
adamw_exp_avgs,
adamw_exp_avg_sqs,
adamw_state_steps,
foreach=None,
beta1=beta1,
beta2=beta2,
lr=group["adamw_lr"],
weight_decay=group["weight_decay"],
eps=group["eps"],
caution=False,
maximize=False,
capturable=False,
max_lr=None,
)
else:
adamw(
adamw_params,
adamw_grads,
adamw_exp_avgs,
adamw_exp_avg_sqs,
[], # max_exp_avg_sqs (not using amsgrad)
adamw_state_steps,
foreach=None,
amsgrad=False,
beta1=beta1,
beta2=beta2,
lr=group["adamw_lr"],
weight_decay=group["weight_decay"],
eps=group["eps"],
caution=False,
maximize=False,
capturable=False,
max_lr=None,
)
# Log routing summary when we have new routing decisions
if routing_reasons and len(routing_reasons) > 0:
# Concise summary
_logger.info(f"Muon parameter routing: {muon_count} Muon, {adamw_count} AdamW")
# Group by reason for detailed breakdown
reason_groups = {}
for shape_str, reasons in sorted(routing_reasons.items()):
for reason in reasons:
if reason not in reason_groups:
reason_groups[reason] = []
reason_groups[reason].append(shape_str)
# Log summary counts per reason
reason_summary = []
for reason, shapes in sorted(reason_groups.items()):
reason_summary.append(f"{reason}={len(shapes)}")
_logger.info(f" Breakdown: {', '.join(reason_summary)}")
# Detailed breakdown at INFO level
if _logger.isEnabledFor(logging.INFO):
for reason, shapes in sorted(reason_groups.items()):
optimizer_name = "Muon" if reason == "ok" else "AdamW"
_logger.info(f" {reason} -> {optimizer_name}:")
for shape in shapes[:10]:
_logger.info(f" {shape}")
if len(shapes) > 10:
_logger.info(f" ... and {len(shapes) - 10} more")
return loss
def resolve_ns_coefficients(
value: Union[str, Sequence[float], Sequence[Sequence[float]]],
presets: Mapping[str, Sequence[Sequence[float]]]
) -> List[Tuple[float, float, float]]:
# tiny helpers (kept inline for succinctness)
is_seq = lambda x: isinstance(x, Sequence) and not isinstance(x, (str, bytes))
is_real = lambda x: isinstance(x, numbers.Real) and not isinstance(x, bool)
def as_coeff(x: Sequence[float]) -> Tuple[float, float, float]:
if not is_seq(x) or len(x) != 3 or not all(is_real(v) for v in x):
raise ValueError(f"Coefficient must be length-3 of real numbers, got: {x!r}")
a, b, c = x # type: ignore[misc]
return float(a), float(b), float(c)
if isinstance(value, str):
if value not in presets:
valid = ", ".join(sorted(presets.keys()))
raise ValueError(f"Unknown coefficients preset '{value}'. Valid options: {valid}")
seq = presets[value]
if not is_seq(seq) or len(seq) == 0:
raise ValueError(f"Preset '{value}' is empty or invalid")
return [as_coeff(item) for item in seq] # validate & cast
if not is_seq(value):
raise TypeError(
"Coefficients must be a preset name (str), a 3-sequence (a,b,c), "
"or a sequence of 3-sequences."
)
# Decide single triple vs list-of-triples by structure
if len(value) == 3 and all(is_real(v) for v in value): # type: ignore[index]
return [as_coeff(value)] # single triple -> wrap
# Otherwise treat as list/tuple of triples
out = []
for i, item in enumerate(value): # type: ignore[assignment]
if not is_seq(item):
raise TypeError(f"Item {i} is not a sequence: {item!r}")
out.append(as_coeff(item))
if not out:
raise ValueError("Coefficient list cannot be empty")
return out | {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/optim/muon.py",
"license": "Apache License 2.0",
"lines": 876,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:timm/layers/_fx.py | from typing import Callable, Dict, List, Optional, Union, Tuple, Type
import torch
from torch import nn
try:
# NOTE we wrap torchvision fns to use timm leaf / no trace definitions
from torchvision.models.feature_extraction import create_feature_extractor as _create_feature_extractor
from torchvision.models.feature_extraction import get_graph_node_names as _get_graph_node_names
has_fx_feature_extraction = True
except ImportError:
has_fx_feature_extraction = False
__all__ = [
'register_notrace_module',
'is_notrace_module',
'get_notrace_modules',
'register_notrace_function',
'is_notrace_function',
'get_notrace_functions',
'create_feature_extractor',
'get_graph_node_names',
]
# modules to treat as leafs when tracing
_leaf_modules = set()
def register_notrace_module(module: Type[nn.Module]):
"""
Any module not under timm.models.layers should get this decorator if we don't want to trace through it.
"""
_leaf_modules.add(module)
return module
def is_notrace_module(module: Type[nn.Module]):
return module in _leaf_modules
def get_notrace_modules():
return list(_leaf_modules)
# Functions we want to autowrap (treat them as leaves)
_autowrap_functions = set()
def register_notrace_function(name_or_fn):
_autowrap_functions.add(name_or_fn)
return name_or_fn
def is_notrace_function(func: Callable):
return func in _autowrap_functions
def get_notrace_functions():
return list(_autowrap_functions)
def get_graph_node_names(model: nn.Module) -> Tuple[List[str], List[str]]:
return _get_graph_node_names(
model,
tracer_kwargs={
'leaf_modules': list(_leaf_modules),
'autowrap_functions': list(_autowrap_functions)
}
)
def create_feature_extractor(model: nn.Module, return_nodes: Union[Dict[str, str], List[str]]):
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
return _create_feature_extractor(
model, return_nodes,
tracer_kwargs={
'leaf_modules': list(_leaf_modules),
'autowrap_functions': list(_autowrap_functions)
}
) | {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/layers/_fx.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/pytorch-image-models:timm/models/mobilenetv5.py | from functools import partial
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import (
SelectAdaptivePool2d,
Linear,
LayerType,
RmsNorm2d,
ConvNormAct,
create_conv2d,
get_norm_layer,
get_norm_act_layer,
to_2tuple,
)
from ._builder import build_model_with_cfg
from ._efficientnet_blocks import SqueezeExcite, UniversalInvertedResidual
from ._efficientnet_builder import (
BlockArgs,
EfficientNetBuilder,
decode_arch_def,
efficientnet_init_weights,
round_channels,
)
from ._features import feature_take_indices
from ._features_fx import register_notrace_module
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['MobileNetV5', 'MobileNetV5Encoder']
_GELU = partial(nn.GELU, approximate='tanh')
@register_notrace_module
class MobileNetV5MultiScaleFusionAdapter(nn.Module):
"""Multi-layer fusion token adapter.
Args:
in_chs: List of input channel counts for each feature scale.
out_chs: The number of output channels.
output_resolution: The output resolution.
expansion_ratio: The FFN expansion ratio.
interpolation_mode: The upsampling interpolation mode.
layer_scale_init_value: The initial value of the layer scale, no layer scale if None.
"""
def __init__(
self,
in_chs: Union[int, List[int]],
out_chs: int,
output_resolution: int,
expansion_ratio: float = 2.0,
interpolation_mode: str = "nearest",
layer_scale_init_value: Optional[float] = None,
noskip: bool = True,
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
device=None,
dtype=None,
):
dd = {'device': device, 'dtype': dtype}
super().__init__()
self.in_channels = sum(in_chs) if isinstance(in_chs, Sequence) else in_chs
self.out_channels = out_chs
self.output_resolution = to_2tuple(output_resolution)
self.expansion_ratio = expansion_ratio
self.interpolation_mode = interpolation_mode
self.layer_scale_init_value = layer_scale_init_value
self.noskip = noskip
act_layer = act_layer or _GELU
norm_layer = norm_layer or RmsNorm2d
self.ffn = UniversalInvertedResidual(
in_chs=self.in_channels,
out_chs=self.out_channels,
dw_kernel_size_mid=0,
exp_ratio=self.expansion_ratio,
act_layer=act_layer,
norm_layer=norm_layer,
noskip=self.noskip,
layer_scale_init_value=self.layer_scale_init_value,
**dd,
)
self.norm = norm_layer(self.out_channels, **dd)
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
# Inputs list of [B, C, H, W] tensors
high_resolution = inputs[0].shape[-2:] # Assuming the first input is the highest resolution.
resized_inputs = []
for _, img in enumerate(inputs):
feat_size = img.shape[-2:]
if feat_size[0] < high_resolution[0] or feat_size[1] < high_resolution[1]:
img = F.interpolate(img, size=high_resolution, mode=self.interpolation_mode)
resized_inputs.append(img)
channel_cat_imgs = torch.cat(resized_inputs, dim=1) # Cat on channel dim, must equal self.in_channels
img = self.ffn(channel_cat_imgs)
if high_resolution[0] != self.output_resolution[0] or high_resolution[1] != self.output_resolution[1]:
# Interpolate / pool to target output_resolution if highest feature resolution differs
if (
high_resolution[0] % self.output_resolution[0] != 0 or
high_resolution[1] % self.output_resolution[1] != 0
):
img = F.interpolate(img, size=self.output_resolution, mode="bilinear")
else:
h_strides = high_resolution[0] // self.output_resolution[0]
w_strides = high_resolution[1] // self.output_resolution[1]
img = F.avg_pool2d(
img,
kernel_size=(h_strides, w_strides),
stride=(h_strides, w_strides),
)
img = self.norm(img)
return img
class MobileNetV5(nn.Module):
""" MobiletNet-V5
"""
def __init__(
self,
block_args: BlockArgs,
num_classes: int = 1000,
in_chans: int = 3,
stem_size: int = 16,
stem_bias: bool = True,
fix_stem: bool = False,
num_features: int = 2048,
pad_type: str = '',
use_msfa: bool = True,
msfa_indices: List[int] = (-2, -1),
msfa_output_resolution: int = 16,
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
se_from_exp: bool = True,
round_chs_fn: Callable = round_channels,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
layer_scale_init_value: Optional[float] = None,
global_pool: str = 'avg',
device=None,
dtype=None,
):
"""
Args:
block_args: Arguments for blocks of the network.
num_classes: Number of classes for classification head.
in_chans: Number of input image channels.
stem_size: Number of output channels of the initial stem convolution.
fix_stem: If True, don't scale stem by round_chs_fn.
num_features: Number of output channels of the conv head layer.
head_bias: If True, add a learnable bias to the conv head layer.
pad_type: Type of padding to use for convolution layers.
act_layer: Type of activation layer.
norm_layer: Type of normalization layer.
aa_layer: Type of anti-aliasing layer.
se_layer: Type of Squeeze-and-Excite layer.
se_from_exp: If True, calculate SE channel reduction from expanded mid channels.
round_chs_fn: Callable to round number of filters based on depth multiplier.
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth rate.
layer_scale_init_value: Enable layer scale on compatible blocks if not None.
global_pool: Type of pooling to use for global pooling features of the FC head.
"""
super().__init__()
dd = {'device': device, 'dtype': dtype}
act_layer = act_layer or _GELU
norm_layer = get_norm_layer(norm_layer) or RmsNorm2d
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
se_layer = se_layer or SqueezeExcite
self.num_classes = num_classes
self.in_chans = in_chans
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.msfa_indices = msfa_indices
self.msfa_output_resolution = msfa_output_resolution
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = ConvNormAct(
in_chans,
stem_size,
kernel_size=3,
stride=2,
padding=pad_type,
bias=stem_bias,
norm_layer=norm_layer,
act_layer=act_layer,
**dd,
)
# Middle stages (IR/ER/DS Blocks)
builder = EfficientNetBuilder(
output_stride=32,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
**dd,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = builder.features
self.stage_ends = [f['stage'] for f in self.feature_info]
self.num_features = builder.in_chs # features of last stage, output of forward_features()
# Neck (aggregation) + Head + Pooling
if use_msfa:
self.num_features = self.head_hidden_size = num_features # output of msfa is output of forward_features()
# Map msfa indices to feature info and calculate sum of feature channels
self.msfa_indices = feature_take_indices(len(self.feature_info), self.msfa_indices)[0]
self.msfa_in_chs = sum([self.feature_info[mi]['num_chs'] for mi in self.msfa_indices])
self.msfa = MobileNetV5MultiScaleFusionAdapter(
in_chs=self.msfa_in_chs,
out_chs=num_features,
output_resolution=self.msfa_output_resolution,
norm_layer=norm_layer,
act_layer=act_layer,
**dd,
)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = None
self.norm_head = None
else:
self.num_features = builder.in_chs # features of last stage, output of forward_features()
self.head_hidden_size = num_features
self.msfa = None
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
num_pooled_chs = self.num_features * self.global_pool.feat_mult()
# mobilenet-v4 style post-pooling PW conv is followed by a norm+act layer
self.conv_head = create_conv2d(num_pooled_chs, self.head_hidden_size, 1, padding=pad_type, **dd)
self.norm_head = norm_act_layer(self.head_hidden_size, **dd)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.head_hidden_size, num_classes, **dd) if num_classes > 0 else nn.Identity()
efficientnet_init_weights(self)
def as_sequential(self):
layers = [self.conv_stem, self.bn1]
layers.extend(self.blocks)
layers.append(self.global_pool)
if self.conv_head is not None:
layers.append(self.conv_head)
if self.norm_head is not None:
layers.append(self.norm_head)
layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
@torch.jit.ignore
def group_matcher(self, coarse: bool = False):
return dict(
stem=r'^conv_stem|bn1',
blocks=r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)'
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
self.num_classes = num_classes
# NOTE: cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
extra_blocks: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
extra_blocks: Include outputs of all blocks and head conv in output, does not align with feature_info
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
if stop_early:
assert intermediates_only, 'Must use intermediates_only for early stopping.'
intermediates = []
if extra_blocks:
take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices)
else:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
take_indices = [self.stage_ends[i] for i in take_indices]
max_index = self.stage_ends[max_index]
# FIXME MFSA and forward_intermediates overlap, they both take indices from specific features
# When a user wants to grab specific feature maps for a downstream task AND have the msfa output
# what should we do? Accumulate two intermediates? One for msfa and one for take_indices?
# forward pass
feat_idx = 0 # stem is index 0
x = self.conv_stem(x)
if feat_idx in take_indices:
intermediates.append(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index]
for blk in blocks:
feat_idx += 1
x = blk(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
# FIXME see note above
# self.msfa(msfa_intermediatse)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
extra_blocks: bool = False,
):
""" Prune layers not required for specified intermediates.
"""
if extra_blocks:
take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices)
else:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
max_index = self.stage_ends[max_index]
self.blocks = self.blocks[:max_index] # truncate blocks w/ stem as idx 0
if max_index < len(self.blocks):
self.conv_head = None
self.norm_head = None
if prune_head:
self.conv_head = None
self.norm_head = None
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
if self.msfa is not None:
# When MSFA aggregation layer is present, we gather intermediates as is forward_intermediates
feat_idx = 0 # offset by one from blocks index due to stem feature
intermediates = []
x = self.conv_stem(x)
if feat_idx in self.msfa_indices:
intermediates.append(x)
for blk in self.blocks:
feat_idx += 1
# FIXME fix grad checkpointing
x = blk(x)
if feat_idx in self.msfa_indices:
intermediates.append(x)
x = self.msfa(intermediates)
else:
x = self.conv_stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
x = self.global_pool(x)
if self.conv_head is not None:
x = self.conv_head(x)
if self.norm_head is not None:
x = self.norm_head(x)
x = self.flatten(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
if pre_logits:
return x
return self.classifier(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
class MobileNetV5Encoder(nn.Module):
"""MobileNetV5 Vision Encoder"""
def __init__(
self,
block_args: BlockArgs,
in_chans: int = 3,
stem_size: int = 64,
stem_bias: bool = True,
fix_stem: bool = False,
pad_type: str = '',
msfa_indices: Sequence[int] = (-2, -1),
msfa_output_resolution: int = 16,
act_layer: Optional[LayerType] = None,
norm_layer: Optional[LayerType] = None,
aa_layer: Optional[LayerType] = None,
se_layer: Optional[LayerType] = None,
se_from_exp: bool = True,
round_chs_fn: Callable = round_channels,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
layer_scale_init_value: Optional[float] = None,
device=None,
dtype=None,
):
super().__init__()
dd = {'device': device, 'dtype': dtype}
act_layer = act_layer or _GELU
norm_layer = get_norm_layer(norm_layer) or RmsNorm2d
se_layer = se_layer or SqueezeExcite
self.num_classes = 0 # Exists to satisfy ._hub module APIs.
self.in_chans = in_chans
self.drop_rate = drop_rate
self.grad_checkpointing = False
# Stem
if not fix_stem:
stem_size = round_chs_fn(stem_size)
self.conv_stem = ConvNormAct(
in_chans,
stem_size,
kernel_size=3,
stride=2,
padding=pad_type,
bias=stem_bias,
norm_layer=norm_layer,
act_layer=act_layer,
**dd,
)
builder = EfficientNetBuilder(
output_stride=32,
pad_type=pad_type,
round_chs_fn=round_chs_fn,
se_from_exp=se_from_exp,
act_layer=act_layer,
norm_layer=norm_layer,
aa_layer=aa_layer,
se_layer=se_layer,
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
**dd,
)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = builder.features
self.stage_ends = [f['stage'] for f in self.feature_info]
self.num_features = self.head_hidden_size = 2048 # output of msfa is output of forward_features()
# Map msfa indices to feature info and calculate sum of feature channels
self.msfa_indices = feature_take_indices(len(self.feature_info), msfa_indices)[0]
self.msfa_in_chs = sum([self.feature_info[mi]['num_chs'] for mi in self.msfa_indices])
self.msfa_output_resolution = msfa_output_resolution
self.msfa = MobileNetV5MultiScaleFusionAdapter(
in_chs=self.msfa_in_chs,
out_chs=self.num_features,
output_resolution=self.msfa_output_resolution,
norm_layer=norm_layer,
act_layer=act_layer,
**dd,
)
efficientnet_init_weights(self)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
extra_blocks: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: (Unused) Applies norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
extra_blocks: Include outputs of all blocks and head conv in output, does not align with feature_info
Returns:
"""
del norm
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
if stop_early:
assert intermediates_only, 'Must use intermediates_only for early stopping.'
# MobileNet v5's MultiScaleFusionAdapter takes intermediates from specific feature indicies and uses them in
# its computation. These MSFA indices are not guaranteed to be captured by the `indices` parameter passed to
# this function, so we accumulate two sets of indices, one that aligns with the `indices` parameter and one
# that is required by the MSFA block.
intermediates = []
msfa_intermediates = []
if extra_blocks:
take_indices, max_index = feature_take_indices(len(self.blocks) + 1, indices)
else:
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
take_indices = [self.stage_ends[i] for i in take_indices]
max_index = self.stage_ends[max_index]
# forward pass
feat_idx = 0 # stem is index 0
x = self.conv_stem(x)
if feat_idx in take_indices:
intermediates.append(x)
if feat_idx in self.msfa_indices:
msfa_intermediates.append(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index]
for blk in blocks:
feat_idx += 1
x = blk(x)
if feat_idx in take_indices:
intermediates.append(x)
if feat_idx in self.msfa_indices:
msfa_intermediates.append(x)
if intermediates_only:
return intermediates
return self.msfa(msfa_intermediates), intermediates
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
feat_idx = 0 # offset by one from blocks index due to stem feature
intermediates = []
x = self.conv_stem(x)
if feat_idx in self.msfa_indices:
intermediates.append(x)
for blk in self.blocks:
feat_idx += 1
# FIXME fix grad checkpointing
x = blk(x)
if feat_idx in self.msfa_indices:
intermediates.append(x)
return self.msfa(intermediates)
def forward_head(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError("MobileNetV5Encoder does not support classification use cases.")
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.forward_features(x)
def checkpoint_filter_fn(
state_dict: Dict[str, torch.Tensor],
model,
) -> Dict[str, torch.Tensor]:
""" convert weights from gemma encoders """
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('state_dict', state_dict)
if 'model.vision_tower.timm_model.conv_stem.conv.weight' in state_dict:
prefix = 'model.vision_tower.timm_model.'
state_dict = {k.replace(prefix, ''): v for k, v in state_dict.items() if prefix in k}
return state_dict
def _create_mnv5_encoder(variant: str, pretrained: bool = False, **kwargs) -> MobileNetV5Encoder:
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4))
feature_cfg = dict(out_indices=out_indices, feature_cls='getter')
kwargs_filter = (
'num_classes',
'num_features',
'head_conv',
'head_bias',
'head_norm',
'global_pool',
)
model = build_model_with_cfg(
MobileNetV5Encoder,
variant,
pretrained,
pretrained_strict=False,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=feature_cfg,
kwargs_filter=kwargs_filter,
**kwargs,
)
return model
def _create_mnv5(variant: str, pretrained: bool = False, **kwargs) -> MobileNetV5:
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4))
feature_cfg = dict(out_indices=out_indices, feature_cls='getter')
model = build_model_with_cfg(
MobileNetV5,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=feature_cfg,
**kwargs,
)
return model
def _gen_mobilenet_v5(
variant: str,
channel_multiplier: float = 1.0,
group_size=None,
pretrained: bool = False,
encoder: bool = False,
**kwargs,
) -> MobileNetV5Encoder:
if 'mobilenetv5_base' in variant:
arch_def: list[list[str]] = [
# Stage 0: 128x128 in
[
'er_r1_k3_s2_e4_c128',
'er_r1_k3_s1_e4_c128',
'er_r1_k3_s1_e4_c128',
],
# Stage 1: 256x256 in
[
'uir_r1_a3_k5_s2_e6_c256',
'uir_r1_a5_k0_s1_e4_c256',
'uir_r1_a3_k0_s1_e4_c256',
'uir_r1_a5_k0_s1_e4_c256',
'uir_r1_a3_k0_s1_e4_c256',
],
# Stage 2: 640x640 in
[
"uir_r1_a5_k5_s2_e6_c512",
"uir_r1_a5_k0_s1_e4_c512",
"uir_r1_a5_k0_s1_e4_c512",
"uir_r1_a0_k0_s1_e1_c512",
'mqa_r1_k3_h8_s2_d64_c512',
"uir_r1_a0_k0_s1_e2_c512",
'mqa_r1_k3_h8_s2_d64_c512',
"uir_r1_a0_k0_s1_e2_c512",
'mqa_r1_k3_h8_s2_d64_c512',
"uir_r1_a0_k0_s1_e2_c512",
'mqa_r1_k3_h8_s2_d64_c512',
"uir_r1_a0_k0_s1_e2_c512",
'mqa_r1_k3_h8_s2_d64_c512',
"uir_r1_a0_k0_s1_e2_c512",
'mqa_r1_k3_h8_s2_d64_c512',
"uir_r1_a0_k0_s1_e2_c512",
],
# Stage 3: 1280x1280 in
[
"uir_r1_a5_k5_s2_e6_c1024",
'mqa_r1_k3_h16_s1_d64_c1024',
"uir_r1_a0_k0_s1_e2_c1024",
'mqa_r1_k3_h16_s1_d64_c1024',
"uir_r1_a0_k0_s1_e2_c1024",
'mqa_r1_k3_h16_s1_d64_c1024',
"uir_r1_a0_k0_s1_e2_c1024",
'mqa_r1_k3_h16_s1_d64_c1024',
"uir_r1_a0_k0_s1_e2_c1024",
'mqa_r1_k3_h16_s1_d64_c1024',
"uir_r1_a0_k0_s1_e2_c1024",
'mqa_r1_k3_h16_s1_d64_c1024',
"uir_r1_a0_k0_s1_e2_c1024",
'mqa_r1_k3_h16_s1_d64_c1024',
"uir_r1_a0_k0_s1_e2_c1024",
],
]
else:
arch_def: list[list[str]] = [
# Stage 0: 128x128 in
[
'er_r1_k3_s2_e4_c128',
'er_r1_k3_s1_e4_c128',
'er_r1_k3_s1_e4_c128',
],
# Stage 1: 256x256 in
[
'uir_r1_a3_k5_s2_e6_c256',
'uir_r1_a5_k0_s1_e4_c256',
'uir_r1_a3_k0_s1_e4_c256',
'uir_r1_a5_k0_s1_e4_c256',
'uir_r1_a3_k0_s1_e4_c256',
],
# Stage 2: 640x640 in
[
"uir_r1_a5_k5_s2_e6_c640",
"uir_r1_a5_k0_s1_e4_c640",
"uir_r1_a5_k0_s1_e4_c640",
"uir_r1_a5_k0_s1_e4_c640",
"uir_r1_a5_k0_s1_e4_c640",
"uir_r1_a5_k0_s1_e4_c640",
"uir_r1_a5_k0_s1_e4_c640",
"uir_r1_a5_k0_s1_e4_c640",
"uir_r1_a0_k0_s1_e1_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
"mqa_r1_k3_h12_v2_s1_d64_c640",
"uir_r1_a0_k0_s1_e2_c640",
],
# Stage 3: 1280x1280 in
[
"uir_r1_a5_k5_s2_e6_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
"mqa_r1_k3_h16_s1_d96_c1280",
"uir_r1_a0_k0_s1_e2_c1280",
],
]
model_kwargs = dict(
block_args=decode_arch_def(arch_def, group_size=group_size),
stem_size=64,
fix_stem=channel_multiplier < 1.0,
round_chs_fn=partial(round_channels, multiplier=channel_multiplier),
norm_layer=RmsNorm2d,
act_layer=_GELU,
layer_scale_init_value=1e-5,
)
model_kwargs = dict(model_kwargs, **kwargs)
if encoder:
model = _create_mnv5_encoder(variant, pretrained, **model_kwargs)
else:
model = _create_mnv5(variant, pretrained, **model_kwargs)
return model
def _cfg(url: str = '', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (16, 16),
'crop_pct': 1.0, 'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'conv_stem.conv', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
# Encoder-only config for Gemma 3n Transformers integration
'mobilenetv5_300m_enc': _cfg(
mean=(0., 0., 0.), std=(1., 1., 1.),
input_size=(3, 768, 768),
num_classes=0),
# Gemma 3n encoder weights for timm use / fine-tune
'mobilenetv5_300m.gemma3n': _cfg(
hf_hub_id='timm/',
mean=(0., 0., 0.), std=(1., 1., 1.),
input_size=(3, 768, 768),
num_classes=0,
license='gemma'),
# WIP classification configs for testing
'mobilenetv5_base.untrained': _cfg(
# hf_hub_id='timm/',
num_classes=1000)
})
@register_model
def mobilenetv5_300m_enc(pretrained: bool = False, **kwargs) -> MobileNetV5Encoder:
"""MobileNet V5 Vision Encoder"""
pad_type = kwargs.pop('pad_type', 'same')
model = _gen_mobilenet_v5(
'mobilenetv5_300m_enc',
pretrained=pretrained,
encoder=True,
pad_type=pad_type,
**kwargs,
)
return model
@register_model
def mobilenetv5_300m(pretrained: bool = False, **kwargs) -> MobileNetV5:
model = _gen_mobilenet_v5('mobilenetv5_300m', pretrained=pretrained, **kwargs)
return model
@register_model
def mobilenetv5_base(pretrained: bool = False, **kwargs) -> MobileNetV5:
model = _gen_mobilenet_v5('mobilenetv5_base', pretrained=pretrained, **kwargs)
return model
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/models/mobilenetv5.py",
"license": "Apache License 2.0",
"lines": 792,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:timm/data/naflex_mixup.py | """Variable‑size Mixup / CutMix utilities for NaFlex data loaders.
This module provides:
* `mix_batch_variable_size` – pixel‑level Mixup/CutMix that operates on a
list of images whose spatial sizes differ, mixing only their central overlap
so no resizing is required.
* `pairwise_mixup_target` – builds soft‑label targets that exactly match the
per‑sample pixel provenance produced by the mixer.
* `NaFlexMixup` – a callable functor that wraps the two helpers and stores
all augmentation hyper‑parameters in one place, making it easy to plug into
different dataset wrappers.
Hacked together by / Copyright 2025, Ross Wightman, Hugging Face
"""
import math
import random
from typing import Dict, List, Tuple, Union
import torch
def mix_batch_variable_size(
imgs: List[torch.Tensor],
*,
mixup_alpha: float = 0.8,
cutmix_alpha: float = 1.0,
switch_prob: float = 0.5,
local_shuffle: int = 4,
) -> Tuple[List[torch.Tensor], List[float], Dict[int, int]]:
"""Apply Mixup or CutMix on a batch of variable-sized images.
Sorts images by aspect ratio and pairs neighboring samples. Only the mutual
central overlap region of each pair is mixed.
Args:
imgs: List of transformed images shaped (C, H, W).
mixup_alpha: Beta distribution alpha for Mixup. Set to 0 to disable.
cutmix_alpha: Beta distribution alpha for CutMix. Set to 0 to disable.
switch_prob: Probability of using CutMix when both modes are enabled.
local_shuffle: Size of local windows for shuffling after aspect sorting.
Returns:
Tuple of (mixed_imgs, lam_list, pair_to) where:
- mixed_imgs: List of mixed images
- lam_list: Per-sample lambda values representing mixing degree
- pair_to: Mapping i -> j of which sample was mixed with which
"""
if len(imgs) < 2:
raise ValueError("Need at least two images to perform Mixup/CutMix.")
# Decide augmentation mode and raw λ
if mixup_alpha > 0.0 and cutmix_alpha > 0.0:
use_cutmix = torch.rand(()).item() < switch_prob
alpha = cutmix_alpha if use_cutmix else mixup_alpha
elif mixup_alpha > 0.0:
use_cutmix = False
alpha = mixup_alpha
elif cutmix_alpha > 0.0:
use_cutmix = True
alpha = cutmix_alpha
else:
raise ValueError("Both mixup_alpha and cutmix_alpha are zero – nothing to do.")
lam_raw = torch.distributions.Beta(alpha, alpha).sample().item()
lam_raw = max(0.0, min(1.0, lam_raw)) # numerical safety
# Pair images by nearest aspect ratio
order = sorted(range(len(imgs)), key=lambda i: imgs[i].shape[2] / imgs[i].shape[1])
if local_shuffle > 1:
for start in range(0, len(order), local_shuffle):
random.shuffle(order[start:start + local_shuffle])
pair_to: Dict[int, int] = {}
for a, b in zip(order[::2], order[1::2]):
pair_to[a] = b
pair_to[b] = a
odd_one = order[-1] if len(imgs) % 2 else None
mixed_imgs: List[torch.Tensor] = [None] * len(imgs)
lam_list: List[float] = [1.0] * len(imgs)
for i in range(len(imgs)):
if i == odd_one:
mixed_imgs[i] = imgs[i]
continue
j = pair_to[i]
xi, xj = imgs[i], imgs[j]
_, hi, wi = xi.shape
_, hj, wj = xj.shape
dest_area = hi * wi
# Central overlap common to both images
oh, ow = min(hi, hj), min(wi, wj)
overlap_area = oh * ow
top_i, left_i = (hi - oh) // 2, (wi - ow) // 2
top_j, left_j = (hj - oh) // 2, (wj - ow) // 2
xi = xi.clone()
if use_cutmix:
# CutMix: random rectangle inside the overlap
cut_ratio = math.sqrt(1.0 - lam_raw)
ch, cw = int(oh * cut_ratio), int(ow * cut_ratio)
cut_area = ch * cw
y_off = random.randint(0, oh - ch)
x_off = random.randint(0, ow - cw)
yl_i, xl_i = top_i + y_off, left_i + x_off
yl_j, xl_j = top_j + y_off, left_j + x_off
xi[:, yl_i: yl_i + ch, xl_i: xl_i + cw] = xj[:, yl_j: yl_j + ch, xl_j: xl_j + cw]
mixed_imgs[i] = xi
corrected_lam = 1.0 - cut_area / float(dest_area)
lam_list[i] = corrected_lam
else:
# Mixup: blend the entire overlap region
patch_i = xi[:, top_i:top_i + oh, left_i:left_i + ow]
patch_j = xj[:, top_j:top_j + oh, left_j:left_j + ow]
blended = patch_i.mul(lam_raw).add_(patch_j, alpha=1.0 - lam_raw)
xi[:, top_i:top_i + oh, left_i:left_i + ow] = blended
mixed_imgs[i] = xi
corrected_lam = (dest_area - overlap_area) / dest_area + lam_raw * overlap_area / dest_area
lam_list[i] = corrected_lam
return mixed_imgs, lam_list, pair_to
def smoothed_sparse_target(
targets: torch.Tensor,
*,
num_classes: int,
smoothing: float = 0.0,
) -> torch.Tensor:
off_val = smoothing / num_classes
on_val = 1.0 - smoothing + off_val
y_onehot = torch.full(
(targets.size(0), num_classes),
off_val,
dtype=torch.float32,
device=targets.device
)
y_onehot.scatter_(1, targets.unsqueeze(1), on_val)
return y_onehot
def pairwise_mixup_target(
targets: torch.Tensor,
pair_to: Dict[int, int],
lam_list: List[float],
*,
num_classes: int,
smoothing: float = 0.0,
) -> torch.Tensor:
"""Create soft targets that match the pixel‑level mixing performed.
Args:
targets: (B,) tensor of integer class indices.
pair_to: Mapping of sample index to its mixed partner as returned by mix_batch_variable_size().
lam_list: Per‑sample fractions of own pixels, also from the mixer.
num_classes: Total number of classes in the dataset.
smoothing: Label‑smoothing value in the range [0, 1).
Returns:
Tensor of shape (B, num_classes) whose rows sum to 1.
"""
y_onehot = smoothed_sparse_target(targets, num_classes=num_classes, smoothing=smoothing)
targets = y_onehot.clone()
for i, j in pair_to.items():
lam = lam_list[i]
targets[i].mul_(lam).add_(y_onehot[j], alpha=1.0 - lam)
return targets
class NaFlexMixup:
"""Callable wrapper that combines mixing and target generation."""
def __init__(
self,
*,
num_classes: int,
mixup_alpha: float = 0.8,
cutmix_alpha: float = 1.0,
switch_prob: float = 0.5,
prob: float = 1.0,
local_shuffle: int = 4,
label_smoothing: float = 0.0,
) -> None:
"""Configure the augmentation.
Args:
num_classes: Total number of classes.
mixup_alpha: Beta α for Mixup. 0 disables Mixup.
cutmix_alpha: Beta α for CutMix. 0 disables CutMix.
switch_prob: Probability of selecting CutMix when both modes are enabled.
prob: Probability of applying any mixing per batch.
local_shuffle: Window size used to shuffle images after aspect sorting so pairings vary between epochs.
smoothing: Label‑smoothing value. 0 disables smoothing.
"""
self.num_classes = num_classes
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.switch_prob = switch_prob
self.prob = prob
self.local_shuffle = local_shuffle
self.smoothing = label_smoothing
def __call__(
self,
imgs: List[torch.Tensor],
targets: torch.Tensor,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""Apply the augmentation and generate matching targets.
Args:
imgs: List of already transformed images shaped (C, H, W).
targets: Hard labels with shape (B,).
Returns:
mixed_imgs: List of mixed images in the same order and shapes as the input.
targets: Soft‑label tensor shaped (B, num_classes) suitable for cross‑entropy with soft targets.
"""
if not isinstance(targets, torch.Tensor):
targets = torch.tensor(targets)
if random.random() > self.prob:
targets = smoothed_sparse_target(targets, num_classes=self.num_classes, smoothing=self.smoothing)
return imgs, targets.unbind(0)
mixed_imgs, lam_list, pair_to = mix_batch_variable_size(
imgs,
mixup_alpha=self.mixup_alpha,
cutmix_alpha=self.cutmix_alpha,
switch_prob=self.switch_prob,
local_shuffle=self.local_shuffle,
)
targets = pairwise_mixup_target(
targets,
pair_to,
lam_list,
num_classes=self.num_classes,
smoothing=self.smoothing,
)
return mixed_imgs, targets.unbind(0)
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/data/naflex_mixup.py",
"license": "Apache License 2.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:timm/data/naflex_random_erasing.py | """Patch-level random erasing augmentation for NaFlex Vision Transformers.
This module implements random erasing specifically designed for patchified images,
operating at the patch granularity rather than pixel level. It supports two modes:
- 'patch': Randomly erases individual patches (speckle-like noise)
- 'region': Erases contiguous rectangular regions of patches (similar to original RandomErasing)
The implementation is coordinate-aware, respecting valid patch boundaries and supporting
variable patch sizes in NaFlex training.
Hacked together by / Copyright 2025, Ross Wightman, Hugging Face
"""
import random
import math
from typing import Optional, Union, Tuple
import torch
class PatchRandomErasing:
"""Random erasing for patchified images in NaFlex format.
Supports two modes:
1. 'patch': Simple mode that erases randomly selected valid patches
2. 'region': Erases rectangular regions at patch granularity
"""
def __init__(
self,
erase_prob: float = 0.5,
patch_drop_prob: float = 0.0,
min_count: int = 1,
max_count: Optional[int] = None,
min_area: float = 0.02,
max_area: float = 1 / 3,
min_aspect: float = 0.3,
max_aspect: Optional[float] = None,
mode: str = 'const',
value: float = 0.,
spatial_mode: str = 'region',
num_splits: int = 0,
device: Union[str, torch.device] = 'cuda',
) -> None:
"""Initialize PatchRandomErasing.
Args:
erase_prob: Probability that the Random Erasing operation will be performed.
patch_drop_prob: Patch dropout probability. Remove random patches instead of erasing.
min_count: Minimum number of erasing operations.
max_count: Maximum number of erasing operations.
min_area: Minimum percentage of valid patches/area to erase.
max_area: Maximum percentage of valid patches/area to erase.
min_aspect: Minimum aspect ratio of erased area (only used in 'region' mode).
max_aspect: Maximum aspect ratio of erased area (only used in 'region' mode).
mode: Patch content mode, one of 'const', 'rand', or 'pixel'.
value: Constant value for 'const' mode.
spatial_mode: Erasing strategy, one of 'patch' or 'region'.
num_splits: Number of splits to apply erasing to (0 for all).
device: Computation device.
"""
self.erase_prob = erase_prob
self.patch_drop_prob = patch_drop_prob
self.min_count = min_count
self.max_count = max_count or min_count
self.min_area = min_area
self.max_area = max_area
# Aspect ratio params (for region mode)
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
# Number of splits
self.num_splits = num_splits
self.device = device
# Strategy mode
self.spatial_mode = spatial_mode
assert self.spatial_mode in ('patch', 'region')
# Value generation mode flags
self.erase_mode = mode.lower()
assert self.erase_mode in ('rand', 'pixel', 'const')
self.const_value = value
self.unique_noise_per_patch = True
def _get_values(
self,
shape: Union[Tuple[int, ...], torch.Size],
value: Optional[torch.Tensor] = None,
dtype: torch.dtype = torch.float32,
device: Optional[Union[str, torch.device]] = None
) -> torch.Tensor:
"""Generate values for erased patches based on the specified mode.
Args:
shape: Shape of patches to erase.
value: Value to use in const (or rand) mode.
dtype: Data type to use.
device: Device to use.
Returns:
Tensor with values for erasing patches.
"""
device = device or self.device
if self.erase_mode == 'pixel':
# only mode with erase shape that includes pixels
return torch.empty(shape, dtype=dtype, device=device).normal_()
else:
shape = (1, 1, shape[-1]) if len(shape) == 3 else (1, shape[-1])
if self.erase_mode == 'const' or value is not None:
erase_value = value or self.const_value
if isinstance(erase_value, (int, float)):
values = torch.full(shape, erase_value, dtype=dtype, device=device)
else:
erase_value = torch.tensor(erase_value, dtype=dtype, device=device)
values = torch.expand_copy(erase_value, shape)
else:
values = torch.empty(shape, dtype=dtype, device=device).normal_()
return values
def _drop_patches(
self,
patches: torch.Tensor,
patch_coord: torch.Tensor,
patch_valid: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Patch Dropout.
Fully drops patches from datastream. Only mode that saves compute BUT requires support
for non-contiguous patches and associated patch coordinate and valid handling.
Args:
patches: Tensor of patches.
patch_coord: Tensor of patch coordinates.
patch_valid: Tensor indicating which patches are valid.
Returns:
Tuple of (patches, patch_coord, patch_valid) with some patches dropped.
"""
# FIXME WIP, not completed. Downstream support in model needed for non-contiguous valid patches
if random.random() > self.erase_prob:
return
# Get indices of valid patches
valid_indices = torch.nonzero(patch_valid, as_tuple=True)[0].tolist()
# Skip if no valid patches
if not valid_indices:
return patches, patch_coord, patch_valid
num_valid = len(valid_indices)
if self.patch_drop_prob:
# patch dropout mode, completely remove dropped patches (FIXME needs downstream support in model)
num_keep = max(1, int(num_valid * (1. - self.patch_drop_prob)))
keep_indices = torch.argsort(torch.randn(1, num_valid, device=self.device), dim=-1)[:, :num_keep]
# maintain patch order, possibly useful for debug / visualization
keep_indices = keep_indices.sort(dim=-1)[0]
patches = patches.gather(1, keep_indices.unsqueeze(-1).expand((-1, -1) + patches.shape[2:]))
return patches, patch_coord, patch_valid
def _erase_patches(
self,
patches: torch.Tensor,
patch_coord: torch.Tensor,
patch_valid: torch.Tensor,
patch_shape: torch.Size,
dtype: torch.dtype = torch.float32,
) -> None:
"""Apply erasing by selecting individual patches randomly.
The simplest mode, aligned on patch boundaries. Behaves similarly to speckle or 'sprinkles'
noise augmentation at patch size.
Args:
patches: Tensor of patches to modify in-place.
patch_coord: Tensor of patch coordinates.
patch_valid: Tensor indicating which patches are valid.
patch_shape: Shape of individual patches.
dtype: Data type for generated values.
"""
if random.random() > self.erase_prob:
return
# Get indices of valid patches
valid_indices = torch.nonzero(patch_valid, as_tuple=True)[0]
num_valid = len(valid_indices)
if num_valid == 0:
return
count = random.randint(self.min_count, self.max_count)
# Determine how many valid patches to erase from RE min/max count and area args
max_erase = min(num_valid, max(1, int(num_valid * count * self.max_area)))
min_erase = max(1, int(num_valid * count * self.min_area))
num_erase = random.randint(min_erase, max_erase)
# Randomly select valid patches to erase
erase_idx = valid_indices[torch.randperm(num_valid, device=patches.device)[:num_erase]]
if self.unique_noise_per_patch and self.erase_mode == 'pixel':
# generate unique noise for the whole selection of patches
fill_shape = (num_erase,) + patch_shape
else:
fill_shape = patch_shape
patches[erase_idx] = self._get_values(fill_shape, dtype=dtype)
def _erase_region(
self,
patches: torch.Tensor,
patch_coord: torch.Tensor,
patch_valid: torch.Tensor,
patch_shape: torch.Size,
dtype: torch.dtype = torch.float32,
) -> None:
"""Apply erasing by selecting rectangular regions of patches randomly.
Closer to the original RandomErasing implementation. Erases
spatially contiguous rectangular regions of patches (aligned with patches).
Args:
patches: Tensor of patches to modify in-place.
patch_coord: Tensor of patch coordinates.
patch_valid: Tensor indicating which patches are valid.
patch_shape: Shape of individual patches.
dtype: Data type for generated values.
"""
if random.random() > self.erase_prob:
return
# Determine grid dimensions from coordinates
valid_coord = patch_coord[patch_valid]
if len(valid_coord) == 0:
return # No valid patches
max_y = valid_coord[:, 0].max().item() + 1
max_x = valid_coord[:, 1].max().item() + 1
grid_h, grid_w = max_y, max_x
total_area = grid_h * grid_w
ys, xs = patch_coord[:, 0], patch_coord[:, 1]
count = random.randint(self.min_count, self.max_count)
for _ in range(count):
# Try to select a valid region to erase (multiple attempts)
for attempt in range(10):
# Sample random area and aspect ratio
target_area = random.uniform(self.min_area, self.max_area) * total_area
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
# Calculate region height and width
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if h > grid_h or w > grid_w:
continue # try again
# Calculate region patch bounds
top = random.randint(0, grid_h - h)
left = random.randint(0, grid_w - w)
bottom, right = top + h, left + w
# Region test
region_mask = (
(ys >= top) & (ys < bottom) &
(xs >= left) & (xs < right) &
patch_valid
)
num_selected = int(region_mask.sum().item())
if not num_selected:
continue # no patch actually falls inside – try again
if self.unique_noise_per_patch and self.erase_mode == 'pixel':
# generate unique noise for the whole region
fill_shape = (num_selected,) + patch_shape
else:
fill_shape = patch_shape
patches[region_mask] = self._get_values(fill_shape, dtype=dtype)
# Successfully applied erasing, exit the loop
break
def __call__(
self,
patches: torch.Tensor,
patch_coord: torch.Tensor,
patch_valid: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Apply random patch erasing.
Args:
patches: Tensor of shape [B, N, P*P, C] or [B, N, Ph, Pw, C].
patch_coord: Tensor of shape [B, N, 2] with (y, x) coordinates.
patch_valid: Boolean tensor of shape [B, N] indicating which patches are valid.
Returns:
Erased patches tensor of same shape as input.
"""
if patches.ndim == 4:
batch_size, num_patches, patch_dim, channels = patches.shape
elif patches.ndim == 5:
batch_size, num_patches, patch_h, patch_w, channels = patches.shape
else:
assert False
patch_shape = patches.shape[2:]
# patch_shape ==> shape of patches to fill (h, w, c) or (h * w, c)
# Create default valid mask if not provided
if patch_valid is None:
patch_valid = torch.ones((batch_size, num_patches), dtype=torch.bool, device=patches.device)
# Skip the first part of the batch if num_splits is set
batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0
# Apply erasing to each batch element
for i in range(batch_start, batch_size):
if self.patch_drop_prob:
assert False, "WIP, not completed"
self._drop_patches(
patches[i],
patch_coord[i],
patch_valid[i],
)
elif self.spatial_mode == 'patch':
# FIXME we could vectorize patch mode across batch, worth the effort?
self._erase_patches(
patches[i],
patch_coord[i],
patch_valid[i],
patch_shape,
patches.dtype
)
elif self.spatial_mode == 'region':
self._erase_region(
patches[i],
patch_coord[i],
patch_valid[i],
patch_shape,
patches.dtype
)
else:
assert False
return patches
def __repr__(self) -> str:
"""Return string representation of PatchRandomErasing.
Returns:
String representation of the object.
"""
fs = self.__class__.__name__ + f'(p={self.erase_prob}, mode={self.erase_mode}'
fs += f', spatial={self.spatial_mode}, area=({self.min_area}, {self.max_area}))'
fs += f', count=({self.min_count}, {self.max_count}))'
return fs | {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/data/naflex_random_erasing.py",
"license": "Apache License 2.0",
"lines": 301,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:timm/models/naflexvit.py | """ NaFlex Vision Transformer
An improved version of the Vision Transformer with:
1. Encapsulated embedding and position encoding in a single module
2. Support for linear patch embedding on pre-patchified inputs
3. Support for NaFlex variable aspect, variable resolution
4. Support for FlexiViT variable patch size
5. Support for NaViT fractional/factorized position embedding
Based on ideas from:
- Original Vision Transformer: https://arxiv.org/abs/2010.11929
- FlexiViT: https://arxiv.org/abs/2212.08013
- NaViT: https://arxiv.org/abs/2307.06304
- NaFlex (SigLip-2): https://arxiv.org/abs/2502.14786
Hacked together by / Copyright 2025, Ross Wightman, Hugging Face
"""
import logging
import math
from dataclasses import dataclass, fields, replace
from functools import partial
from typing import Callable, Dict, List, Optional, Set, Tuple, Type, Union, Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import (
AttentionPoolLatent,
Mlp,
LayerNorm,
PatchDropoutWithIndices,
PatchEmbedInterpolator,
_assert,
to_2tuple,
get_act_layer,
get_norm_layer,
apply_keep_indices_nlc,
disable_compiler,
calculate_drop_path_rates,
)
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_function, register_notrace_module
from ._manipulate import checkpoint, named_apply
from ._registry import register_model, generate_default_cfgs
from .eva import EvaBlock
from .vision_transformer import Block, global_pool_nlc
__all__ = ['NaFlexVitCfg', 'NaFlexVit']
_logger = logging.getLogger(__name__)
@dataclass
class NaFlexVitCfg:
"""Configuration for FlexVit model.
This dataclass contains the bulk of model configuration parameters,
with core parameters (img_size, in_chans, num_classes, etc.) remaining
as direct constructor arguments for API compatibility.
"""
# Architecture parameters
patch_size: Union[int, Tuple[int, int]] = 16
embed_dim: int = 768
depth: int = 12
num_heads: int = 12
mlp_ratio: float = 4.0
scale_mlp_norm: bool = False # Apply scaling norm to MLP
# Attention parameters
qkv_bias: bool = True
qk_norm: bool = False
proj_bias: bool = True
attn_drop_rate: float = 0.0
scale_attn_inner_norm: bool = False # Apply scaling norm to attn context
# Regularization
init_values: Optional[float] = None # Layer-scale init values (layer-scale enabled if not None)
drop_rate: float = 0.0 # Dropout rate for classifier
pos_drop_rate: float = 0.0 # Dropout rate for position embeddings
patch_drop_rate: float = 0.0 # Dropout rate for patch tokens
proj_drop_rate: float = 0.0 # Dropout rate for linear projections
drop_path_rate: float = 0.0 # Stochastic depth drop rate
# Prefix token configuration
class_token: bool = False # Use class token
reg_tokens: int = 0 # Number of register tokens
# Position embedding configuration
pos_embed: str = 'learned' # Type of position embedding ('learned', 'factorized', 'rope', 'none')
pos_embed_grid_size: Optional[Tuple[int, int]] = (16, 16) # Grid size for position embedding initialization
pos_embed_interp_mode: str = 'bicubic' # Interpolation mode for position embedding resizing
pos_embed_ar_preserving: bool = False # Whether to preserve aspect ratio during position embedding interpolation
pos_embed_use_grid_sample: bool = False # Whether to use grid_sample for naflex position embedding interpolation
# ROPE specific configuration
rope_type: str = '' # ROPE type: '' or 'none' for no ROPE, 'axial' for standard, 'mixed' for learnable frequencies
rope_temperature: float = 10000.0 # Temperature for ROPE frequency computation
rope_ref_feat_shape: Optional[Tuple[int, int]] = None
rope_grid_offset: float = 0. # Grid offset for non-pixel ROPE mode
rope_grid_indexing: str = 'ij' # Grid indexing mode for ROPE ('ij' or 'xy')
# Image processing
dynamic_img_pad: bool = False # Whether to enable dynamic padding for variable resolution
# Other architecture choices
pre_norm: bool = False # Whether to apply normalization before attention/MLP layers (start of blocks)
final_norm: bool = True # Whether to apply final normalization before pooling and classifier (end of blocks)
fc_norm: Optional[bool] = None # Whether to normalize features before final classifier (after pooling)
# Global pooling setup
global_pool: str = 'map' # Type of global pooling for final sequence
pool_include_prefix: bool = False # Whether to include class/register prefix tokens in global pooling
attn_pool_num_heads: Optional[int] = None # Override num_heads for attention pool
attn_pool_mlp_ratio: Optional[float] = None # Override mlp_ratio for attention pool
# Weight initialization
weight_init: str = '' # Weight initialization scheme
fix_init: bool = True # Apply weight initialization fix (scaling w/ layer index)
# Embedding configuration
embed_proj_type: str = 'linear' # Type of embedding layer ('conv' or 'linear')
input_norm_layer: Optional[str] = None # Normalization layer for embeddings input (before input projection)
embed_norm_layer: Optional[str] = None # Normalization layer for embeddings (after input projection)
# Layer implementations
norm_layer: Optional[str] = None # Normalization layer for transformer blocks
act_layer: Optional[str] = None # Activation layer for MLP blocks
block_fn: Optional[str] = None # Transformer block implementation class name
mlp_layer: Optional[str] = None # MLP implementation class name
attn_layer: Optional[str] = None # Attention layer implementation (e.g., 'attn', 'diff')
# EVA-specific parameters
attn_type: str = 'standard' # Attention type: 'standard', 'eva', 'rope'
swiglu_mlp: bool = False # Use SwiGLU MLP variant
qkv_fused: bool = True # Whether to use fused QKV projections
# Variable patch size support
enable_patch_interpolator: bool = False # Enable dynamic patch size support
def _overlay_kwargs(cfg: NaFlexVitCfg, **kwargs) -> NaFlexVitCfg:
"""Overlay kwargs onto config, replacing config values with provided kwargs."""
# Only update fields that exist in the config
config_fields = set(cfg.__dataclass_fields__.keys())
config_kwargs = {k: v for k, v in kwargs.items() if k in config_fields}
if config_kwargs:
cfg = replace(cfg, **config_kwargs)
return cfg
def batch_patchify(
x: torch.Tensor,
patch_size: Tuple[int, int],
pad: bool = True,
) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""Patchify a batch of images.
Args:
x: Input tensor of shape [B, C, H, W].
patch_size: Patch dimensions (patch_h, patch_w).
pad: Whether to pad images to be divisible by patch size.
Returns:
Tuple of (patches, grid_size) where patches has shape [B, N, P*P*C]
and grid_size is (num_patches_h, num_patches_w).
"""
B, C, H, W = x.shape
ph, pw = patch_size
# Ensure the image is divisible by patch size
if pad and (H % ph != 0 or W % pw != 0):
pad_h = (ph - H % ph) % ph
pad_w = (pw - W % pw) % pw
x = F.pad(x, (0, pad_w, 0, pad_h))
nh, nw = H // ph, W // pw
patches = x.view(B, C, nh, ph, nw, pw).permute(0, 2, 4, 3, 5, 1).reshape(B, nh * nw, ph * pw * C)
# FIXME confirm we want 'channels last' in the patch channel layout, egg ph, ph, C instead of C, ph, hw
return patches, (nh, nw)
def calculate_naflex_grid_sizes(_coord: torch.Tensor):
# Calculate the appropriate grid size from coords
max_y = _coord[:, :, 0].amax(dim=1) + 1
max_x = _coord[:, :, 1].amax(dim=1) + 1
return [(int(h.item()), int(w.item())) for h, w in zip(max_y, max_x)]
class NaFlexRopeIterator:
"""Iterator for generating batched ROPE embeddings for mixed mode with multiple grid sizes."""
def __init__(
self,
rope_module,
size_to_indices: Dict[Tuple[int, int], List[int]],
unique_sizes: List[Tuple[int, int]],
batch_size: int,
seq_len: int,
device: torch.device,
dtype: torch.dtype,
):
self.rope = rope_module
self.size_to_indices = size_to_indices
self.unique_sizes = unique_sizes
self.batch_size = batch_size
self.seq_len = seq_len
self.dtype = dtype
self.device = device
self.depth = rope_module.depth
self.num_heads = rope_module.num_heads
self.head_dim = 2 * rope_module.dim // rope_module.num_heads
self._depth_idx = 0
# Pre-compute embeddings for each unique size
self._embeddings_per_size = {}
for grid_size in unique_sizes:
# get_embed returns all depths at once for mixed mode
rope_embed = rope_module.get_embed(shape=grid_size)
self._embeddings_per_size[grid_size] = rope_embed
def __iter__(self):
self._depth_idx = 0
return self
@disable_compiler
def __next__(self):
if self._depth_idx >= self.depth:
raise StopIteration
# Create batch tensor for current depth
batch_embed = torch.zeros(
self.batch_size, self.num_heads, self.seq_len, self.head_dim,
dtype=self.dtype, device=self.device
)
# Fill in embeddings for each unique grid size
for grid_size in self.unique_sizes:
h, w = grid_size
actual_len = h * w
batch_indices = self.size_to_indices[grid_size]
# Get pre-computed embeddings for this size at current depth
embed = self._embeddings_per_size[grid_size][self._depth_idx] # [num_heads, H*W, dim]
# Assign to batch indices
for bi in batch_indices:
batch_embed[bi, :, :actual_len, :] = embed[:, :actual_len, :]
self._depth_idx += 1
return batch_embed
def get_block_fn(cfg: NaFlexVitCfg) -> Callable:
"""Get appropriate block function based on configuration.
Returns a partially applied block constructor with EVA-specific
or conflicting parameters pre-configured if needed.
"""
# Check if we need EVA block features
use_eva_features = (
cfg.attn_type in ('eva', 'rope') or
cfg.rope_type not in ('', 'none') or # Any ROPE type requires EVA blocks
cfg.swiglu_mlp
)
if use_eva_features:
# Determine attention type based on rope_type if not explicitly set
attn_type = cfg.attn_type
if attn_type == 'standard' and cfg.rope_type not in ('', 'none'):
attn_type = 'rope'
num_prefix_tokens = (1 if cfg.class_token else 0) + cfg.reg_tokens
return partial(
EvaBlock,
attn_type=attn_type,
swiglu_mlp=cfg.swiglu_mlp,
scale_mlp=cfg.scale_mlp_norm,
scale_attn_inner=cfg.scale_attn_inner_norm,
qkv_fused=cfg.qkv_fused,
num_prefix_tokens=num_prefix_tokens,
)
else:
# Standard ViT block
block_fn = cfg.block_fn or Block
block_kwargs = {}
if cfg.scale_mlp_norm or cfg.scale_attn_inner_norm:
# param names differ between EVA vs non-EVA block types
block_kwargs['scale_mlp_norm'] = cfg.scale_mlp_norm
block_kwargs['scale_attn_norm'] = cfg.scale_attn_inner_norm
if cfg.attn_layer:
block_kwargs['attn_layer'] = cfg.attn_layer
if block_kwargs:
block_fn = partial(block_fn, **block_kwargs)
return block_fn
@register_notrace_module
class NaFlexEmbeds(nn.Module):
"""NaFlex Embedding module for Vision Transformers.
This module encapsulates the complete embedding process for Vision Transformers,
supporting both standard and NaFlex (NaViT + FlexiViT) functionality:
1. Patch embedding (via Conv2d or Linear)
2. Class and register token preparation
3. Position embedding addition with interpolation support
4. Pre-normalization (if requested)
5. Dropout application
NaFlex capabilities include:
- Variable aspect ratio and resolution via patch coordinates
- Patch type indicators for handling padding tokens in attention
- Flexible position embedding interpolation for arbitrary grid sizes
- Support for factorized position embeddings
The patch embedding can be one of two types:
- Conv2d-based (default): For standard image inputs [B, C, H, W]
- Linear-based: For pre-patchified inputs [B, N, P*P*C]
Args:
patch_size: Size of patches for patch embedding
in_chans: Number of input image channels
embed_dim: Dimensionality of patch embedding
proj_type: Type of embedding projection layer ('conv' or 'linear')
input_norm_layer: Normalization layer applied to input (linear mode only)
proj_norm_layer: Normalization layer applied after projection
pos_embed: Type of position embedding ('learned', 'factorized', 'none')
pos_drop_rate: Dropout rate for position embeddings
class_token: Whether to include a class token
reg_tokens: Number of register tokens to include
bias: Whether to use bias in projection layers
dynamic_img_pad: Whether to enable dynamic padding for variable resolution
pos_embed_grid_size: Grid size for position embedding initialization
pos_embed_interp_mode: Interpolation mode for position embedding resizing
pos_embed_ar_preserving: Whether to preserve aspect ratio during position embedding interpolation
default_img_size: Default image size for position embedding grid calculation
"""
def __init__(
self,
patch_size: Union[int, Tuple[int, int]] = 16,
in_chans: int = 3,
embed_dim: int = 768,
proj_type: Optional[str] = None,
proj_bias: bool = True,
class_token: bool = True,
reg_tokens: int = 0,
dynamic_img_pad: bool = False,
default_img_size: Optional[Union[int, Tuple[int, int]]] = None,
pos_embed: str = 'learned',
pos_embed_grid_size: Optional[Tuple[int, int]] = (14, 14),
pos_embed_interp_mode: str = 'bicubic',
pos_embed_ar_preserving: bool = False,
pos_embed_use_grid_sample: bool = False,
input_norm_layer: Optional[Type[nn.Module]] = None,
proj_norm_layer: Union[bool, Optional[Type[nn.Module]]] = None,
norm_layer: Optional[Type[nn.Module]] = None,
pos_drop_rate: float = 0.,
enable_patch_interpolator: bool = False,
device=None,
dtype=None,
) -> None:
"""Initialize NaFlexEmbeds module.
Args:
patch_size: Size of patches for patch embedding.
in_chans: Number of input image channels.
embed_dim: Dimensionality of patch embedding.
proj_type: Type of embedding projection layer ('conv' or 'linear').
proj_bias: Whether to use bias in projection layers.
class_token: Whether to include a class token.
reg_tokens: Number of register tokens to include.
dynamic_img_pad: Whether to enable dynamic padding for variable resolution.
default_img_size: Default image size for position embedding grid calculation.
pos_embed: Type of position embedding ('learned', 'factorized', 'none').
pos_embed_grid_size: Grid size for position embedding initialization.
pos_embed_interp_mode: Interpolation mode for position embedding resizing.
pos_embed_ar_preserving: Whether to preserve aspect ratio during interpolation.
input_norm_layer: Normalization layer applied to input (linear mode only).
proj_norm_layer: Normalization layer applied after projection.
norm_layer: Default normalization layer.
pos_drop_rate: Dropout rate for position embeddings.
enable_patch_interpolator: Enable dynamic patch size support.
"""
dd = {'device': device, 'dtype': dtype}
super().__init__()
self.has_class_token = class_token
self.num_reg_tokens = reg_tokens
self.pos_embed_interp_mode = pos_embed_interp_mode
self.pos_embed_ar_preserving = pos_embed_ar_preserving
self.pos_embed_use_grid_sample = pos_embed_use_grid_sample
self.patch_size = to_2tuple(patch_size)
self.in_chans = in_chans
self.embed_dim = embed_dim
self.dynamic_img_pad = dynamic_img_pad
self.enable_patch_interpolator = enable_patch_interpolator
# Calculate number of prefix tokens
self.num_prefix_tokens = 1 if class_token else 0
self.num_prefix_tokens += reg_tokens
# Create class and register tokens
self.cls_token = nn.Parameter(torch.empty(1, 1, embed_dim, **dd)) if class_token else None
self.reg_token = nn.Parameter(torch.empty(1, reg_tokens, embed_dim, **dd)) if reg_tokens else None
# Calculate grid size and number of patches
self.default_img_size: Optional[Tuple[int, int]] = None
self.pos_embed_grid_size: Optional[Tuple[int, int]] = None # Grid size used for learned pos embed init
if pos_embed_grid_size is not None:
# Highest priority, use provided pos_embed_grid_size
self.pos_embed_grid_size = pos_embed_grid_size
elif default_img_size is not None:
# Fallback to calculating grid size from img_size + patch_size if img size provided.
self.default_img_size = to_2tuple(default_img_size)
self.pos_embed_grid_size = tuple([s // p for s, p in zip(self.default_img_size, self.patch_size)])
# Determine patch embedding type (linear or conv2d)
if proj_type == 'linear':
# Create linear projection for pre-patchified inputs
# Input dimension is patch_size^2 * in_chans
patch_dim = self.patch_size[0] * self.patch_size[1] * in_chans
assert not (input_norm_layer is True and norm_layer is None), \
"`norm_layer` must be given when input_norm_layer=True"
input_norm_layer = norm_layer if input_norm_layer is True else (input_norm_layer or None)
self.norm_input = input_norm_layer(patch_dim) if input_norm_layer else None
self.proj = nn.Linear(patch_dim, embed_dim, bias=proj_bias, **dd)
self.flatten = False
self.is_linear = True
else:
# Default to convolutional patch embedding for image inputs
assert not input_norm_layer
self.norm_input = None
self.proj = nn.Conv2d(
in_chans,
embed_dim,
kernel_size=patch_size,
stride=patch_size,
bias=proj_bias,
**dd,
)
self.flatten = True
self.is_linear = False
# Create patch embedding interpolator if enabled
if self.enable_patch_interpolator:
self.patch_interpolator = PatchEmbedInterpolator(
base_patch_size=self.patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
interpolation=pos_embed_interp_mode,
antialias=True,
)
else:
self.patch_interpolator = None
# Create normalization layer after the projection
assert not (proj_norm_layer is True and norm_layer is None), \
"`norm_layer` must be given when proj_norm_layer=True"
proj_norm_layer = norm_layer if proj_norm_layer is True else (proj_norm_layer or None)
self.norm = proj_norm_layer(embed_dim) if proj_norm_layer else nn.Identity()
# Create position embedding if needed - only for patches, never for prefix tokens
if pos_embed in ('factorized', 'learned') and self.pos_embed_grid_size is None:
raise ValueError(
"Cannot initialize position embeddings without grid_size."
"Please provide img_size or pos_embed_grid_size.")
self.pos_embed: Optional[torch.Tensor] = None
self.pos_embed_y: Optional[torch.Tensor] = None
self.pos_embed_x: Optional[torch.Tensor] = None
if not pos_embed or pos_embed == 'none':
self.pos_embed_type = 'none'
elif pos_embed == 'factorized':
assert self.pos_embed_grid_size is not None
h, w = self.pos_embed_grid_size
self.pos_embed_type = 'factorized'
self.pos_embed_y = nn.Parameter(torch.empty(1, h, embed_dim, **dd))
self.pos_embed_x = nn.Parameter(torch.empty(1, w, embed_dim, **dd))
else:
assert self.pos_embed_grid_size is not None
h, w = self.pos_embed_grid_size
self.pos_embed = nn.Parameter(torch.empty(1, h, w, embed_dim, **dd))
self.pos_embed_type = 'learned'
# Dropout layer
self.pos_drop = nn.Dropout(p=pos_drop_rate)
# TODO: skip init when on meta device when safe to do so
self.reset_parameters()
def reset_parameters(self) -> None:
if self.cls_token is not None:
nn.init.normal_(self.cls_token, std=1e-6)
if self.reg_token is not None:
nn.init.normal_(self.reg_token, std=1e-6)
if self.pos_embed is not None:
nn.init.normal_(self.pos_embed, std=.02)
if self.pos_embed_y is not None:
nn.init.normal_(self.pos_embed_y, std=.02)
if self.pos_embed_x is not None:
nn.init.normal_(self.pos_embed_x, std=.02)
def feature_info(self, location) -> Dict[str, Any]:
"""Get feature information for feature extraction.
Args:
location: Feature extraction location identifier
Returns:
Dictionary containing feature channel count and reduction factor
"""
return dict(num_chs=self.embed_dim, reduction=self.patch_size)
def feat_ratio(self, as_scalar: bool = True) -> Union[int, Tuple[int, int]]:
"""Get the feature reduction ratio (stride) of the patch embedding.
Args:
as_scalar: Whether to return the maximum dimension as a scalar
Returns:
Feature reduction ratio as scalar or tuple
"""
if as_scalar:
return max(self.patch_size)
else:
return self.patch_size
def dynamic_feat_size(self, img_size: Tuple[int, int]) -> Tuple[int, int]:
"""Calculate grid (feature) size for given image size.
Takes into account dynamic padding when enabled.
Args:
img_size: Input image size as (height, width)
Returns:
Grid size as (grid_height, grid_width)
"""
if self.dynamic_img_pad:
return math.ceil(img_size[0] / self.patch_size[0]), math.ceil(img_size[1] / self.patch_size[1])
else:
return img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1]
@disable_compiler
def _apply_learned_naflex_pos_embed(
self,
x: torch.Tensor,
patch_coord: torch.Tensor,
) -> None:
"""Apply learned position embeddings to NaFlex batch in-place.
Interpolates learned 2D position embeddings for each sample in the batch
based on their individual grid sizes.
Args:
x: Input tensor to add position embeddings to [B, N, C]
patch_coord: Patch coordinates [B, N, 2] with (y, x) values
"""
# Calculate grid sizes from patch coordinates
naflex_grid_sizes = calculate_naflex_grid_sizes(patch_coord)
orig_h, orig_w = self.pos_embed.shape[1:3]
pos_embed_nchw = self.pos_embed.permute(0, 3, 1, 2).float() # B,C,H,W
def _interp2d(size):
"""
Return a flattened positional-embedding grid at an arbitrary spatial resolution.
Converts the learned 2-D table stored in NCHW format (pos_embed_nchw) into
a (1, H*W, C) sequence that matches the requested size.
"""
if (size[0] == orig_h) and (size[1] == orig_w):
pos_embed_flat = self.pos_embed.reshape(1, orig_h * orig_w, -1)
else:
_interp_size = to_2tuple(max(size)) if self.pos_embed_ar_preserving else size
pos_embed_flat = F.interpolate(
pos_embed_nchw,
size=_interp_size,
mode=self.pos_embed_interp_mode,
align_corners=False,
antialias=True,
)[:, :, :size[0], :size[1]].flatten(2).transpose(1, 2)
return pos_embed_flat.to(dtype=x.dtype)
# Determine unique grid sizes to avoid duplicate interpolation
size_to_indices: Dict[Tuple[int, int], List[int]] = {}
for bi, k in enumerate(naflex_grid_sizes):
# k = h << 16 | w # FIXME can get jit compat with this
size_to_indices.setdefault(k, []).append(bi)
for k, batch_indices in size_to_indices.items():
# h, w = k >> 16, k & 0xFFFF # FIXME can get jit compat with this
# Interpolate only once for this (h, w)
pos_embed_flat = _interp2d(k)
seq_len = min(x.shape[1], pos_embed_flat.shape[1])
x[:, :seq_len].index_add_(
0,
torch.as_tensor(batch_indices, device=x.device),
pos_embed_flat[:, :seq_len].expand(len(batch_indices), -1, -1)
)
@disable_compiler
def _apply_learned_naflex_pos_embed_grid_sample(
self,
x: torch.Tensor,
patch_coord: torch.Tensor,
) -> None:
"""Apply learned position embeddings to NaFlex batch using grid_sample.
Uses F.grid_sample for efficient interpolation of learned 2D position embeddings
based on patch coordinates. Based on proposal by https://github.com/stas-sl
Args:
x: Input tensor to add position embeddings to [B, N, C]
patch_coord: Patch coordinates [B, N, 2] with (y, x) values
"""
device = x.device
B, N, C = x.shape
shapes = patch_coord.max(dim=1).values + 1 # (B, 2) containing [h_i, w_i]
if self.pos_embed_ar_preserving:
L_i = shapes.amax(dim=1) # (B,) max(h_i, w_i)
L_global = L_i.amax()
grid_size_y = grid_size_x = L_global
scale_x = scale_y = L_global / L_i # uniform zoom (B,)
else:
grid_size_y, grid_size_x = shapes.amax(dim=0) # (2,)
scale_y = grid_size_y / shapes[:, 0] # vertical zoom (B,)
scale_x = grid_size_x / shapes[:, 1] # horizontal zoom (B,)
theta = torch.zeros(B, 2, 3, device=device, dtype=torch.float32)
theta[:, 0, 0] = scale_x
theta[:, 1, 1] = scale_y
theta[:, 0, 2] = scale_x - 1 # translate x
theta[:, 1, 2] = scale_y - 1 # translate y
grid = F.affine_grid(theta, (B, C, grid_size_y, grid_size_x), align_corners=False)
pos_embed = F.grid_sample(
self.pos_embed.permute(0, 3, 1, 2).expand(B, -1, -1, -1).float(),
grid,
mode=self.pos_embed_interp_mode,
align_corners=False,
padding_mode='border',
).to(dtype=x.dtype) # (B, C, H_out, W_out)
bi = torch.arange(B, device=device, dtype=torch.long).unsqueeze(1)
x += pos_embed[bi, :, patch_coord[..., 0], patch_coord[..., 1]] # NOTE leave as '+='
def _apply_learned_pos_embed(
self,
x: torch.Tensor,
grid_size: List[int],
) -> None:
"""Apply learned position embeddings to standard 2D batch in-place.
Interpolates learned 2D position embeddings to match the specified grid size.
Args:
x: Input tensor to add position embeddings to [B, H*W, C]
grid_size: Target grid size as [height, width]
"""
orig_h, orig_w = self.pos_embed.shape[1:3]
if grid_size[0] == orig_h and grid_size[1] == orig_w:
# No resize needed, just flatten
pos_embed_flat = self.pos_embed.reshape(1, orig_h * orig_w, -1)
else:
# Resize if needed - directly using F.interpolate
if self.pos_embed_ar_preserving:
L = max(grid_size)
_interp_size = L, L
else:
_interp_size = grid_size
pos_embed_flat = F.interpolate(
self.pos_embed.permute(0, 3, 1, 2).float(), # B,C,H,W
size=_interp_size,
mode=self.pos_embed_interp_mode,
align_corners=False,
antialias=True,
)[:, :, :grid_size[0], :grid_size[1]].flatten(2).transpose(1, 2)
pos_embed_flat = pos_embed_flat.to(dtype=x.dtype)
x.add_(pos_embed_flat)
@disable_compiler
def _apply_factorized_naflex_pos_embed(
self,
x: torch.Tensor,
patch_coord: torch.Tensor,
) -> None:
"""Apply factorized position embeddings to NaFlex batch in-place.
Uses separate Y and X position embedding tables that are interpolated
and combined for each sample's grid size.
Args:
x: Input tensor to add position embeddings to [B, N, C]
patch_coord: Patch coordinates [B, N, 2] with (y, x) values
"""
# Calculate grid sizes from patch coordinates
naflex_grid_sizes = calculate_naflex_grid_sizes(patch_coord)
assert len(naflex_grid_sizes) == x.size(0) # one (H,W) per sample
# Handle each batch element separately with its own grid size
orig_h, orig_w = self.pos_embed_y.shape[1], self.pos_embed_x.shape[1]
# bucket samples that share the same (H, W) so we build each grid once
size_to_indices: Dict[Tuple[int, int], List[int]] = {}
for bi, k in enumerate(naflex_grid_sizes):
size_to_indices.setdefault(k, []).append(bi)
def _interp1d(table: torch.Tensor, new_length: int, orig_length: int) -> torch.Tensor:
"""
Resample a 1-D positional-embedding table to specified length
and return it in (1, L, C) layout, dtype matching x.
"""
if new_length == orig_length:
return table.to(dtype=x.dtype)
return F.interpolate(
table.permute(0, 2, 1).float(), # (1,C,L) → (1,C,L_out)
size=new_length,
mode='linear',
align_corners=False,
).permute(0, 2, 1).to(dtype=x.dtype) # → (1,L_out,C)
for k, batch_indices in size_to_indices.items():
target_h, target_w = k
if self.pos_embed_ar_preserving:
len_y = len_x = max(target_h, target_w)
else:
len_y, len_x = target_h, target_w
pe_y = _interp1d(self.pos_embed_y, len_y, orig_h)[:, :target_h] # (1,H,C)
pe_x = _interp1d(self.pos_embed_x, len_x, orig_w)[:, :target_w] # (1,W,C)
# Broadcast, add and flatten to sequence layout (row major)
pos = pe_y.unsqueeze(2) + pe_x.unsqueeze(1) # (1,H,W,C)
pos = pos.flatten(1, 2)
seq_len = min(x.shape[1], pos.shape[1])
x[:, :seq_len].index_add_(
0,
torch.as_tensor(batch_indices, device=x.device),
pos[:, :seq_len].expand(len(batch_indices), -1, -1)
)
@disable_compiler
def _apply_factorized_naflex_pos_embed_grid_sample(
self,
x: torch.Tensor,
patch_coord: torch.Tensor,
) -> None:
"""Apply factorized position embeddings to NaFlex batch using grid_sample.
Uses F.grid_sample for efficient interpolation of separate Y and X position
embedding tables based on patch coordinates. Based on proposal by https://github.com/stas-sl
Args:
x: Input tensor to add position embeddings to [B, N, C]
patch_coord: Patch coordinates [B, N, 2] with (y, x) values
"""
device = x.device
B, _, C = x.shape
shapes = patch_coord.amax(dim=1) + 1
if self.pos_embed_ar_preserving:
# Aspect ratio preserving mode: use square grid with uniform scaling
L_i = shapes.amax(dim=1) # (B,) max(h_i, w_i)
L_global = L_i.amax()
grid_size_y = grid_size_x = L_global
scale_x = scale_y = L_global / L_i # uniform zoom (B,)
else:
# Standard mode: different scaling for x and y
grid_size_y, grid_size_x = shapes.amax(0)
scale_x = grid_size_x / shapes[:, 1] # horizontal zoom (B,)
scale_y = grid_size_y / shapes[:, 0] # vertical zoom (B,)
def _interp1d(table: torch.Tensor, scale: torch.Tensor, out_length: torch.Tensor) -> torch.Tensor:
pe = table.permute(0, 2, 1).unsqueeze(2).expand(B, -1, -1, -1).float() # (1, L, C) -> (B, C, 1, L)
theta = torch.zeros(B, 2, 3, device=x.device)
theta[:, 0, 0] = scale
theta[:, 0, 2] = scale - 1
theta[:, 1, 1] = 1
grid = F.affine_grid(theta, (B, C, 1, out_length), align_corners=False)
pe = F.grid_sample(pe, grid, mode='bilinear', align_corners=False, padding_mode='border')
return pe.to(x.dtype)
# Interpolate along each axis
pe_x = _interp1d(self.pos_embed_x, scale=scale_x, out_length=grid_size_x)
pe_y = _interp1d(self.pos_embed_y, scale=scale_y, out_length=grid_size_y)
bi = torch.arange(B, device=device, dtype=torch.long).unsqueeze(1)
x += pe_x[bi, :, 0, patch_coord[..., 1]] + pe_y[bi, :, 0, patch_coord[..., 0]]
def _apply_factorized_pos_embed(
self,
x: torch.Tensor,
grid_size: List[int],
) -> None:
"""Apply factorized position embeddings to standard 2D batch in-place.
Uses separate Y and X position embedding tables that are interpolated
and combined for the specified grid size.
Args:
x: Input tensor to add position embeddings to [B, H*W, C]
grid_size: Target grid size as [height, width]
"""
orig_h, orig_w = self.pos_embed_y.shape[1], self.pos_embed_x.shape[1]
target_h, target_w = grid_size
if self.pos_embed_ar_preserving:
len_y = len_x = max(target_h, target_w)
else:
len_y, len_x = target_h, target_w
def _interp1d(table: torch.Tensor, new_length: int, orig_length: int) -> torch.Tensor:
if new_length == orig_length:
return table.to(dtype=x.dtype)
return F.interpolate(
table.permute(0, 2, 1).float(), # (1,L,C) -> (1,C,L)
size=new_length,
mode='linear',
align_corners=False,
).permute(0, 2, 1).to(dtype=x.dtype) # (1,L,C)
# Interpolate embeddings
pe_y = _interp1d(self.pos_embed_y, len_y, orig_h)[:, :target_h] # (1,H,C)
pe_x = _interp1d(self.pos_embed_x, len_x, orig_w)[:, :target_w] # (1,W,C)
# Broadcast, add and flatten to sequence layout (row major)
pos_embed = pe_y.unsqueeze(2) + pe_x.unsqueeze(1) # (1, H, W, C)
pos_embed_flat = pos_embed.flatten(1, 2) # (1, H*W, C)
x.add_(pos_embed_flat)
def forward(
self,
x: torch.Tensor,
patch_coord: Optional[torch.Tensor] = None,
patch_valid: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[Tuple[int, int]]]:
"""Forward pass for patch embedding with position encoding.
Args:
x: Input tensor. Supported formats:
- [B, C, H, W] for conv mode
- [B, N, P*P*C] for pre-patchified linear mode (normal)
- [B, N, Ph, Pw, C] for pre-patchified linear mode (variable patch size)
patch_coord: Optional patch coordinates [B, N, 2] for NaFlex mode.
patch_valid: Optional validity mask for patches [B, N] for NaFlex mode.
Returns:
Tuple of (embedded_tensor, grid_size) where:
- embedded_tensor: [B, num_prefix_tokens + N, embed_dim]
- grid_size: (H, W) tuple for standard mode, None for NaFlex mode
"""
grid_size: Optional[Tuple[int, int]] = None
B = x.shape[0]
if self.is_linear:
# Linear embedding path, works with NaFlex mode or standard 2D mode
if patch_coord is None:
# Standard 2D (B, C, H, W) mode
_assert(x.ndim == 4, 'Expecting 2D image input with input ndim == 4')
x, grid_size = batch_patchify(x, self.patch_size, pad=self.dynamic_img_pad)
else:
# Pre-patchified NaFlex mode
# Variable patch size mode: [B, N, Ph, Pw, C], normal mode: [B, N, P*P*C]
_assert(x.ndim == 5 or x.ndim == 3, 'Expecting patchified input with ndim == 3 or 5.')
# Handle variable patch size projection
if self.enable_patch_interpolator and x.ndim == 5:
_assert(self.norm_input is None, 'input norm not supported with patch resizing')
# Apply projection with interpolation
x = self.patch_interpolator(
x,
self.proj.weight,
self.proj.bias,
patch_size=tuple(x.shape[2:4]), # patch size from [B, N, Ph, Pw, C] shape
is_linear=True,
)
else:
# Standard projection
x = x.flatten(2) # ensure [B, N, P*P*C], flatten Ph*Pw*C if separate
if self.norm_input is not None:
x = self.norm_input(x)
x = self.proj(x)
else:
_assert(x.ndim == 4, 'Convolutional input must be 4D')
if self.dynamic_img_pad:
H, W = x.shape[-2:]
pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0]
pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1]
x = F.pad(x, (0, pad_w, 0, pad_h))
x = self.proj(x)
grid_size = x.shape[-2:]
if self.flatten:
x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
# Apply normalization after flattening
x = self.norm(x)
if self.pos_embed_type == 'learned':
if grid_size is not None:
# Standard 2D mode
self._apply_learned_pos_embed(x, grid_size=grid_size)
else:
# NaFlex mode
if self.pos_embed_use_grid_sample:
self._apply_learned_naflex_pos_embed_grid_sample(x, patch_coord=patch_coord)
else:
self._apply_learned_naflex_pos_embed(x, patch_coord=patch_coord)
elif self.pos_embed_type == 'factorized':
if grid_size is not None:
# Standard 2D mode
self._apply_factorized_pos_embed(x, grid_size=grid_size)
else:
# NaFlex mode
if self.pos_embed_use_grid_sample:
self._apply_factorized_naflex_pos_embed_grid_sample(x, patch_coord=patch_coord)
else:
self._apply_factorized_naflex_pos_embed(x, patch_coord=patch_coord)
# Prepare and add class and register tokens
to_cat = []
if self.cls_token is not None:
to_cat.append(self.cls_token.expand(B, -1, -1))
if self.reg_token is not None:
to_cat.append(self.reg_token.expand(B, -1, -1))
# Add tokens to the beginning
if to_cat:
x = torch.cat(to_cat + [x], dim=1)
# Apply dropout
x = self.pos_drop(x)
return x, grid_size
@register_notrace_function
def create_attention_mask(
patch_valid: torch.Tensor,
num_prefix_tokens: int = 0,
symmetric: bool = True,
q_len: Optional[int] = None,
dtype: torch.dtype = torch.float32,
) -> Optional[torch.Tensor]:
"""Creates an attention mask from patch validity information.
Supports two modes controlled by `symmetric`:
1. `symmetric=True` (default): Creates a symmetric mask of shape
[B, 1, seq_len, seq_len]. An attention pair (i, j) is allowed only if
both token i and token j are valid. Suitable for standard self-attention.
2. `symmetric=False`: Creates a potentially non-square mask of shape
[B, 1, q_len, kv_len]. An attention pair (q, k) is allowed only if
the key/value token k is valid. Query token validity is not checked
in the mask itself. Useful for cross-attention or specific self-attention
implementations `q_len` can be specified.
Used for NaFlex mode to handle variable token counts and padding tokens.
Args:
patch_valid: Tensor of shape [B, N] with True for valid patches, False for padding.
num_prefix_tokens: Number of prefix tokens (class token, register tokens)
to prepend, which are always considered valid.
symmetric: If True, create a symmetric mask.
If False, create an expanded mask based only on key/value validity.
q_len: Query sequence length override. Only used when `symmetric` is False.
Defaults to the key/value sequence length (`kv_len`) if None.
dtype: Dtype of the output attention mask (e.g., torch.float32).
Returns:
Attention mask tensor. Additive mask (-inf for masked, 0 for unmasked).
Shape is [B, 1, seq_len, seq_len] if symmetric=True,
or [B, 1, q_len, kv_len] if symmetric=False.
"""
if patch_valid is None:
return None
patch_valid = patch_valid.bool() # Ensure boolean type
B, N = patch_valid.shape
kv_len = N # Initial key/value length is the number of patches
# Prepend prefix tokens if any
if num_prefix_tokens > 0:
# Create prefix validity tensor on the same device/dtype base as patch_valid
prefix_valid = patch_valid.new_ones((B, num_prefix_tokens), dtype=torch.bool)
# Concatenate prefix and patch validity. Shape becomes [B, num_prefix_tokens + N]
patch_valid = torch.cat([prefix_valid, patch_valid], dim=1)
kv_len += num_prefix_tokens # Update total key/value sequence length
if symmetric:
# Symmetric mask is True where BOTH query and key are valid
mask_bool = patch_valid.unsqueeze(-1) & patch_valid.unsqueeze(1)
mask_bool = mask_bool.unsqueeze(1) # Add head dimension: [B, 1, seq_len, seq_len]
else:
# Expanded mask
q_len = q_len or kv_len
mask_bool = patch_valid[:, None, None, :].expand(B, 1, q_len, kv_len)
# Create the float mask and apply masking using additive mask convention
mask_float = torch.zeros_like(mask_bool, dtype=dtype)
# Fill with negative infinity where mask_bool is False (masked positions)
mask_float.masked_fill_(~mask_bool, torch.finfo(dtype).min)
return mask_float
@register_notrace_function
def global_pool_naflex(
x: torch.Tensor,
patch_valid: Optional[torch.Tensor] = None,
pool_type: str = 'token',
num_prefix_tokens: int = 1,
reduce_include_prefix: bool = False,
) -> torch.Tensor:
"""Global pooling with NaFlex support for masked tokens.
Applies global pooling while respecting patch validity masks to exclude
padding tokens from pooling operations.
Args:
x: Input tensor with shape [B, N, C]
patch_valid: Optional validity mask for patches [B, N-num_prefix_tokens]
pool_type: Type of pooling ('token', 'avg', 'avgmax', 'max')
num_prefix_tokens: Number of prefix tokens (class/register)
reduce_include_prefix: Whether to include prefix tokens in pooling reduction
Returns:
Pooled tensor with shape [B, C]
"""
if patch_valid is None or pool_type not in ('avg', 'avgmax', 'max'):
# Fall back to standard pooling
x = global_pool_nlc(
x,
pool_type=pool_type,
num_prefix_tokens=num_prefix_tokens,
reduce_include_prefix=reduce_include_prefix,
)
return x
# For NaFlex mode, we need to apply masked pooling to exclude padding tokens
if num_prefix_tokens > 0:
if reduce_include_prefix:
# Include prefix tokens in pooling - they are always considered valid
# patch_valid only covers patch tokens, so create combined validity mask
prefix_valid = patch_valid.new_ones(x.shape[0], num_prefix_tokens)
patch_valid = torch.cat([prefix_valid, patch_valid], dim=1)
else:
# Exclude prefix tokens from pooling (default behavior)
x = x[:, num_prefix_tokens:]
patch_valid_float = patch_valid.to(x.dtype)
if pool_type == 'avg':
# Compute masked average pooling, sum valid tokens and divide by count of valid tokens
masked_sums = (x * patch_valid_float.unsqueeze(-1)).sum(dim=1)
valid_counts = patch_valid_float.sum(dim=1, keepdim=True).clamp(min=1)
pooled = masked_sums / valid_counts
return pooled
elif pool_type == 'avgmax':
# For avgmax, compute masked average and masked max
masked_sums = (x * patch_valid_float.unsqueeze(-1)).sum(dim=1)
valid_counts = patch_valid_float.sum(dim=1, keepdim=True).clamp(min=1)
masked_avg = masked_sums / valid_counts
# For max pooling we set masked positions to large negative value
masked_x = x.clone()
masked_x[~patch_valid] = torch.finfo(masked_x.dtype).min
masked_max = masked_x.amax(dim=1)
# Combine average and max
return 0.5 * (masked_avg + masked_max)
elif pool_type == 'max':
# For max pooling we set masked positions to large negative value
masked_x = x.clone()
masked_x[~patch_valid] = torch.finfo(masked_x.dtype).min
return masked_x.amax(dim=1)
else:
assert False
class NaFlexVit(nn.Module):
"""NaFlexVit: Vision Transformer with NaFlex support for flexible input handling.
A flexible implementation of Vision Transformer that supports:
- Standard image classification with various pooling strategies
- NaFlex functionality for variable aspect ratios and resolutions
- Linear patch embedding for pre-patchified inputs
- Multiple position embedding strategies (learned, factorized, rope)
- Comprehensive attention masking for efficient batch processing
- Encapsulated embedding and position encoding in FlexEmbeds module
- Compatible with standard ViT checkpoints through checkpoint filtering
"""
def __init__(
self,
cfg: Optional[NaFlexVitCfg] = None,
in_chans: int = 3,
num_classes: int = 1000,
img_size: Optional[Union[int, Tuple[int, int]]] = None,
device=None,
dtype=None,
**kwargs,
) -> None:
"""Initialize NaFlexVit model.
Args:
cfg: Model configuration. If None, uses default NaFlexVitCfg.
in_chans: Number of input image channels.
num_classes: Number of classification classes.
img_size: Input image size (for backwards compatibility with classic vit).
**kwargs: Additional config parameters to override cfg values.
"""
super().__init__()
dd = {'device': device, 'dtype': dtype}
# Initialize config
cfg = cfg or NaFlexVitCfg()
if kwargs:
cfg = _overlay_kwargs(cfg, **kwargs)
# Validate configuration
assert cfg.global_pool in ('', 'avg', 'avgmax', 'max', 'token', 'map')
assert cfg.class_token or cfg.global_pool != 'token'
assert cfg.pos_embed in ('', 'none', 'learned', 'factorized')
# Resolve layer implementations
norm_layer = get_norm_layer(cfg.norm_layer) or LayerNorm
embed_norm_layer = get_norm_layer(cfg.embed_norm_layer)
act_layer = get_act_layer(cfg.act_layer) or nn.GELU
block_fn = get_block_fn(cfg)
mlp_layer = cfg.mlp_layer or Mlp # TODO: Support configurable mlp_layer via string lookup
# Store instance variables
self.num_classes = num_classes
self.in_chans = in_chans
self.global_pool = cfg.global_pool
self.num_features = self.head_hidden_size = self.embed_dim = cfg.embed_dim # for consistency with other models
self.num_prefix_tokens = 1 if cfg.class_token else 0
self.num_prefix_tokens += cfg.reg_tokens
self.num_reg_tokens = cfg.reg_tokens
self.has_class_token = cfg.class_token
self.pool_include_prefix = cfg.pool_include_prefix
self.grad_checkpointing = False
# Initialize embedding module (includes patch, position embedding, and class/reg tokens)
# FlexEmbeds is always used - handles both linear and conv embedding
self.embeds = NaFlexEmbeds(
patch_size=cfg.patch_size,
in_chans=in_chans,
embed_dim=cfg.embed_dim,
proj_type=cfg.embed_proj_type,
proj_bias=not cfg.pre_norm, # disable bias if pre-norm is used (e.g. CLIP)
class_token=cfg.class_token,
reg_tokens=cfg.reg_tokens,
default_img_size=img_size,
dynamic_img_pad=cfg.dynamic_img_pad,
pos_embed=cfg.pos_embed,
pos_embed_grid_size=cfg.pos_embed_grid_size,
pos_embed_interp_mode=cfg.pos_embed_interp_mode,
pos_embed_ar_preserving=cfg.pos_embed_ar_preserving,
pos_embed_use_grid_sample=cfg.pos_embed_use_grid_sample,
proj_norm_layer=embed_norm_layer,
pos_drop_rate=cfg.pos_drop_rate,
enable_patch_interpolator=getattr(cfg, 'enable_patch_interpolator', False),
**dd,
)
self.norm_pre = norm_layer(cfg.embed_dim, **dd) if cfg.pre_norm else nn.Identity()
# ROPE position embeddings at model level
self.rope: Optional[nn.Module] = None
self.rope_is_mixed = False
if cfg.rope_type and cfg.rope_type != 'none':
from timm.layers.pos_embed_sincos import RotaryEmbeddingCat, RotaryEmbeddingMixed
if cfg.rope_type == 'mixed':
self.rope = RotaryEmbeddingMixed(
cfg.embed_dim,
depth=cfg.depth,
num_heads=cfg.num_heads,
temperature=cfg.rope_temperature,
feat_shape=None, # Dynamic shapes for NaFlex
grid_indexing=cfg.rope_grid_indexing,
**dd,
)
self.rope_is_mixed = True
elif cfg.rope_type == 'axial':
self.rope = RotaryEmbeddingCat(
cfg.embed_dim // cfg.num_heads,
temperature=cfg.rope_temperature,
in_pixels=False,
feat_shape=None, # Dynamic shapes for NaFlex
ref_feat_shape=cfg.rope_ref_feat_shape,
grid_offset=cfg.rope_grid_offset,
grid_indexing=cfg.rope_grid_indexing,
**dd,
)
self.rope_is_mixed = False
else:
raise ValueError(f"Unknown rope_type: {cfg.rope_type}")
# Patch dropout
if cfg.patch_drop_rate > 0:
self.patch_drop = PatchDropoutWithIndices(
cfg.patch_drop_rate,
num_prefix_tokens=self.num_prefix_tokens,
)
else:
self.patch_drop = None
# Transformer blocks
dpr = calculate_drop_path_rates(cfg.drop_path_rate, cfg.depth) # stochastic depth decay rule
# Create transformer blocks
self.blocks = nn.Sequential(*[
block_fn(
dim=cfg.embed_dim,
num_heads=cfg.num_heads,
mlp_ratio=cfg.mlp_ratio,
qkv_bias=cfg.qkv_bias,
qk_norm=cfg.qk_norm,
proj_bias=cfg.proj_bias,
init_values=cfg.init_values,
proj_drop=cfg.proj_drop_rate,
attn_drop=cfg.attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
mlp_layer=mlp_layer,
depth=i,
**dd,
)
for i in range(cfg.depth)
])
# Feature info for downstream tasks
patch_reduction = self.embeds.feat_ratio(as_scalar=True)
self.feature_info = [
dict(module=f'blocks.{i}', num_chs=cfg.embed_dim, reduction=patch_reduction)
for i in range(cfg.depth)
]
self.norm = norm_layer(cfg.embed_dim, **dd) if cfg.final_norm and not cfg.fc_norm else nn.Identity()
# Classifier Head
if cfg.global_pool == 'map':
self.attn_pool = AttentionPoolLatent(
self.embed_dim,
num_heads=cfg.attn_pool_num_heads or cfg.num_heads,
mlp_ratio=cfg.attn_pool_mlp_ratio or cfg.mlp_ratio,
norm_layer=norm_layer,
act_layer=act_layer,
**dd,
)
else:
self.attn_pool = None
# Handle fc_norm default value
fc_norm = cfg.fc_norm
if fc_norm is None:
fc_norm = cfg.global_pool == 'avg'
self.fc_norm = norm_layer(cfg.embed_dim, **dd) if cfg.final_norm and fc_norm else nn.Identity()
self.head_drop = nn.Dropout(cfg.drop_rate)
self.head = nn.Linear(self.embed_dim, num_classes, **dd) if num_classes > 0 else nn.Identity()
self.weight_init_mode = cfg.weight_init
self.fix_init = cfg.fix_init
# TODO: skip init when on meta device when safe to do so
self.init_weights(cfg.weight_init, needs_reset=False)
def fix_init_weight(self) -> None:
"""Apply initialization weight fix with layer-wise scaling."""
def rescale(param: torch.Tensor, _layer_id: int) -> None:
with torch.no_grad():
param.div_(math.sqrt(2.0 * _layer_id))
for layer_id, layer in enumerate(self.blocks):
if hasattr(layer, 'attn'):
rescale(layer.attn.proj.weight, layer_id + 1)
if hasattr(layer, 'mlp'):
rescale(layer.mlp.fc2.weight, layer_id + 1)
if hasattr(layer, 'attn_out_proj'):
rescale(layer.attn_out_proj.weight, layer_id + 1)
if hasattr(layer, 'mlp_out_proj'):
rescale(layer.mlp_out_proj.weight, layer_id + 1)
def init_weights(self, mode: str = '', needs_reset: bool = True) -> None:
"""Initialize model weights according to specified scheme.
Args:
mode: Initialization mode ('jax', 'jax_nlhb', 'moco', or '')
needs_reset: If True, call reset_parameters() on modules (default for after to_empty()).
If False, skip reset_parameters() (for __init__ where modules already self-initialized).
"""
mode = mode or self.weight_init_mode
assert mode in ('jax', 'jax_nlhb', 'moco', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
named_apply(get_init_weights_vit(mode, head_bias, needs_reset=needs_reset), self)
if self.fix_init:
self.fix_init_weight()
@torch.jit.ignore()
def load_pretrained(self, checkpoint_path: str, prefix: str = '') -> None:
# Custom loading for the new model structure
from .vision_transformer import _load_weights as _orig_load_weights
def _load_weights_adapter(model, checkpoint_path, prefix=''):
"""Adapter function to handle the different model structure"""
state_dict = torch.load(checkpoint_path, map_location='cpu')
if isinstance(state_dict, dict) and 'state_dict' in state_dict:
state_dict = state_dict['state_dict']
# Map original keys to new structure
for k in list(state_dict.keys()):
if k.startswith('cls_token'):
state_dict['embeds.' + k] = state_dict.pop(k)
elif k.startswith('reg_token'):
state_dict['embeds.' + k] = state_dict.pop(k)
elif k.startswith('pos_embed'):
state_dict['embeds.' + k] = state_dict.pop(k)
elif k.startswith('patch_embed'):
state_dict['embeds.' + k[12:]] = state_dict.pop(k)
return _orig_load_weights(model, state_dict, prefix)
_load_weights_adapter(self, checkpoint_path, prefix)
@torch.jit.ignore
def no_weight_decay(self) -> Set:
"""Get set of parameter names that should not have weight decay applied.
Returns:
Set of parameter names to skip during weight decay
"""
skip_list = {'embeds.pos_embed', 'embeds.cls_token', 'embeds.reg_token'}
if self.rope and hasattr(self.rope, 'no_weight_decay'):
skip_list.update(self.rope.no_weight_decay())
return skip_list
@torch.jit.ignore
def group_matcher(self, coarse: bool = False) -> Dict:
"""Get parameter group matcher for optimizer parameter grouping.
Args:
coarse: Whether to use coarse-grained grouping
Returns:
Dictionary mapping group names to regex patterns
"""
return dict(
stem=r'^embeds', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True) -> None:
"""Enable or disable gradient checkpointing for memory efficiency.
Args:
enable: Whether to enable gradient checkpointing
"""
self.grad_checkpointing = enable
if hasattr(self.embeds, 'patch_embed') and hasattr(self.embeds.patch_embed, 'set_grad_checkpointing'):
self.embeds.patch_embed.set_grad_checkpointing(enable)
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
"""Get the classification head module.
Returns:
Classification head module
"""
return self.head
@disable_compiler
def _generate_rope_naflex(
self,
x: torch.Tensor,
patch_coord: torch.Tensor,
) -> Union[torch.Tensor, List[torch.Tensor], Any]:
"""Generate ROPE position embeddings for NaFlex batch with variable grid sizes.
Args:
x: Input tensor [B, N, C]
patch_coord: Patch coordinates [B, N, 2] with (y, x) values
Returns:
ROPE embeddings:
- Axial mode: Tensor of shape [B, 1, N, dim*2]
- Mixed mode: List of tensors, each of shape [B, num_heads, N, dim], one per depth layer
- Mixed mode with iterator: Iterator yielding tensors per depth
"""
# Calculate grid sizes for each sample
naflex_grid_sizes = calculate_naflex_grid_sizes(patch_coord)
# Build ROPE embeddings for each unique grid size
size_to_indices = {}
unique_sizes = []
for bi, grid_size in enumerate(naflex_grid_sizes):
if grid_size not in size_to_indices:
size_to_indices[grid_size] = []
unique_sizes.append(grid_size)
size_to_indices[grid_size].append(bi)
B, N, C = x.shape
seq_len = N - self.num_prefix_tokens
if self.rope_is_mixed:
# Use an iterator for Mixed mode, returns [batch_size, depth, num_heads, seq_len, dim]
return NaFlexRopeIterator(
self.rope,
size_to_indices,
unique_sizes,
B,
seq_len,
x.dtype,
x.device
)
# Axial mode: [batch_size, seq_len, dim*2]
rope_embeds = torch.zeros(B, seq_len, self.rope.dim * 2, dtype=x.dtype, device=x.device)
if hasattr(self.rope, 'get_batch_embeds'):
# Batch mode - generate unique embeds from one grid and then assign
unique_embeds = self.rope.get_batch_embeds(unique_sizes)
for grid_size, embed, batch_indices in zip(unique_sizes, unique_embeds, size_to_indices.values()):
h, w = grid_size
actual_len = h * w
for bi in batch_indices:
rope_embeds[bi, :actual_len] = embed[:actual_len]
else:
# Generate each unique size separately and assign
for grid_size, bi in size_to_indices.items():
rope_embed = self.rope.get_embed(shape=grid_size)
h, w = grid_size
actual_len = h * w
rope_embeds[bi, :actual_len] = rope_embed[:actual_len]
rope_embeds = rope_embeds.unsqueeze(1)
return rope_embeds
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None:
"""Reset the classification head with new number of classes and pooling.
Args:
num_classes: Number of classes for new classification head
global_pool: Optional new global pooling type
"""
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'avgmax', 'max', 'token', 'map')
if global_pool == 'map' and self.attn_pool is None:
assert False, "Cannot currently add attention pooling in reset_classifier()."
elif global_pool != 'map' and self.attn_pool is not None:
self.attn_pool = None # remove attention pooling
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def _forward_embeds(
self,
x,
patch_coord,
patch_valid,
attn_mask,
) -> Dict[str, torch.Tensor]:
""" Forward pass through patch / abs pos / rope pos embeds and patch dropout
"""
naflex_mode = patch_coord is not None
# patch embed, abs pos embed, returns global grid size as calculated from 'standard' NCHW batches
x, grid_size = self.embeds(
x,
patch_coord=patch_coord,
patch_valid=patch_valid,
)
# Generate ROPE embeddings at model level
rope_embeds = None
if self.rope is not None:
if patch_coord is not None:
# NaFlex mode - variable grid sizes
rope_embeds = self._generate_rope_naflex(x, patch_coord)
elif grid_size is not None:
# Standard mode - fixed grid size
rope_embeds = self.rope.get_embed(shape=grid_size)
else:
assert False, 'Expected one of patch_coord or grid_size to be valid'
# Apply patch dropout with coordinated updates
keep_indices: Optional[torch.Tensor] = None
if self.training and self.patch_drop is not None:
x, keep_indices = self.patch_drop(x)
# keep_indices excludes prefix tokens, can use directly on patch_valid & rope embeds
if patch_valid is not None:
patch_valid = patch_valid.gather(1, keep_indices)
if rope_embeds is not None and not self.rope_is_mixed:
# Update ROPE embeddings to match dropped tokens (only for axial mode)
# Batch dim already present in NaFlex mode, but will be added in standard mode.
rope_embeds = apply_keep_indices_nlc(x, rope_embeds, keep_indices, pos_embed_has_batch=naflex_mode)
if not naflex_mode:
# B, N, dim -> B, 1, N, dim. Need head dim added for standard mode, already added in NaFlex.
rope_embeds = rope_embeds.unsqueeze(1)
# Create attention mask from patch_valid after patch dropout applied
if attn_mask is None:
attn_mask = create_attention_mask(
patch_valid,
num_prefix_tokens=self.num_prefix_tokens,
dtype=x.dtype
)
x = self.norm_pre(x)
return {
'patches': x,
'patch_valid': patch_valid,
'rope_embeds': rope_embeds,
'attn_mask': attn_mask,
'keep_indices': keep_indices,
}
def forward_intermediates(
self,
x: Union[torch.Tensor, Dict[str, torch.Tensor]],
indices: Optional[Union[int, List[int]]] = None,
return_prefix_tokens: bool = False,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
output_dict: bool = False,
patch_coord: Optional[torch.Tensor] = None,
patch_valid: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]], Dict[str, Any]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
return_prefix_tokens: Return both prefix and spatial intermediate tokens
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
output_dict: Return outputs as a dictionary with 'image_features' and 'image_intermediates' keys
patch_coord: Optional patch coordinates [B, N, 2] for NaFlex mode
patch_valid: Optional patch type indicators (1=patch, 0=padding) for NaFlex
attn_mask: Optional attention mask for masked attention
Returns:
A tuple with (final_features, intermediates), a list of intermediate features, or a dictionary containing
'image_features' and 'image_intermediates' (and optionally 'image_intermediates_prefix')
"""
# FIXME unfinished / untested
assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.'
reshape = output_fmt == 'NCHW'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
if isinstance(x, Dict):
# Handle dictionary input from NaFlex collator
patch_coord = x['patch_coord']
patch_valid = x['patch_valid']
patches = x['patches']
assert False, 'WIP, patch mode needs more work'
else:
patches = x
height, width = x.shape[-2:]
H, W = self.embeds.dynamic_feat_size((height, width))
# Forward pass through patch and abs position embedding
embeds = self._forward_embeds(
patches,
patch_coord=patch_coord,
patch_valid=patch_valid,
attn_mask=attn_mask,
)
x = embeds['patches']
rope_embeds = embeds.get('rope_embeds', None)
keep_indices = embeds.get('keep_indices', None)
attn_mask = embeds.get('attn_mask', None)
# Forward pass through blocks
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index + 1]
do_checkpointing = self.grad_checkpointing and not torch.jit.is_scripting()
if self.rope_is_mixed and rope_embeds is not None:
# Mixed mode with per-layer embeddings (list or iterator)
for i, (blk, rope_embed) in enumerate(zip(self.blocks, rope_embeds)):
# Apply patch dropout to rope_embed if needed
if self.training and self.patch_drop is not None and keep_indices is not None:
# Apply patch dropout to rope_embed if needed (batch dim already present in naflex mode)
rope_embed = apply_keep_indices_nlc(
x,
rope_embed,
keep_indices,
pos_embed_has_batch=embeds.get('naflex_mode', False),
)
if do_checkpointing:
x = checkpoint(blk, x, rope=rope_embed, attn_mask=attn_mask)
else:
x = blk(x, rope=rope_embed, attn_mask=attn_mask)
if i in take_indices:
# normalize intermediates with final norm layer if enabled
intermediates.append(self.norm(x) if norm else x)
else:
for i, blk in enumerate(blocks):
# Axial ROPE mode with shared embeddings
if rope_embeds is not None:
if do_checkpointing:
x = checkpoint(blk, x, rope=rope_embeds, attn_mask=attn_mask)
else:
x = blk(x, rope=rope_embeds, attn_mask=attn_mask)
else:
if do_checkpointing:
x = checkpoint(blk, x, attn_mask=attn_mask)
else:
x = blk(x, attn_mask=attn_mask)
if i in take_indices:
# normalize intermediates with final norm layer if enabled
intermediates.append(self.norm(x) if norm else x)
# Process intermediates
if self.num_prefix_tokens:
# split prefix (e.g. class, distill) and spatial feature tokens
prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates]
intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates]
else:
prefix_tokens = None
if reshape:
# reshape to BCHW output format
intermediates = [
y.reshape(y.shape[0], H, W, -1).permute(0, 3, 1, 2).contiguous()
for y in intermediates
]
# FIXME always use dict for NaFlex mode to return masks and more?
# For dictionary output
if output_dict:
result_dict = {}
# Intermediates are always included
result_dict['image_intermediates'] = intermediates
if prefix_tokens is not None and return_prefix_tokens:
result_dict['image_intermediates_prefix'] = prefix_tokens
# Only include features if not intermediates_only
if not intermediates_only:
x_final = self.norm(x)
result_dict['image_features'] = x_final
return result_dict
# For non-dictionary output, maintain the original behavior
if not torch.jit.is_scripting() and return_prefix_tokens and prefix_tokens is not None:
# return_prefix not support in torchscript due to poor type handling
intermediates = list(zip(intermediates, prefix_tokens))
if intermediates_only:
return intermediates
x = self.norm(x)
return x, intermediates
def forward_features(
self,
patches: torch.Tensor,
patch_coord: Optional[torch.Tensor] = None,
patch_valid: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
"""
"""
naflex_mode = patch_coord is not None
# Pass through patch & abs position embedding module with patch coordinate/type support
embeds = self._forward_embeds(
patches,
patch_coord=patch_coord,
patch_valid=patch_valid,
attn_mask=attn_mask,
)
x = embeds['patches']
rope_embeds = embeds.get('rope_embeds', None)
keep_indices = embeds.get('keep_indices', None)
attn_mask = embeds.get('attn_mask', None)
# Apply transformer blocks with masked attention and/or ROPE if provided
do_checkpointing = self.grad_checkpointing and not torch.jit.is_scripting()
if self.rope_is_mixed and rope_embeds is not None:
# Mixed mode with per-layer embeddings (list or iterator)
for i, (blk, rope_embed) in enumerate(zip(self.blocks, rope_embeds)):
if self.training and self.patch_drop is not None and keep_indices is not None:
# Apply patch dropout to rope_embed if needed (batch dim already present in naflex mode)
rope_embed = apply_keep_indices_nlc(
x,
rope_embed,
keep_indices,
pos_embed_has_batch=naflex_mode,
)
if do_checkpointing:
x = checkpoint(blk, x, rope=rope_embed, attn_mask=attn_mask)
else:
x = blk(x, rope=rope_embed, attn_mask=attn_mask)
elif rope_embeds is not None:
# Axial ROPE mode with shared embeddings
for blk in self.blocks:
if do_checkpointing:
x = checkpoint(blk, x, rope=rope_embeds, attn_mask=attn_mask)
else:
x = blk(x, rope=rope_embeds, attn_mask=attn_mask)
else:
for blk in self.blocks:
if do_checkpointing:
x = checkpoint(blk, x, attn_mask=attn_mask)
else:
x = blk(x, attn_mask=attn_mask)
x = self.norm(x)
if naflex_mode:
return {
'patches': x,
'patch_valid': embeds.get('patch_valid', None),
}
return x
def _pool(
self,
x: torch.Tensor,
pool_type: Optional[str] = None,
patch_valid: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if self.attn_pool is not None:
attn_mask = create_attention_mask(
patch_valid,
num_prefix_tokens=self.num_prefix_tokens if self.pool_include_prefix else 0,
symmetric=False,
q_len=1,
dtype=x.dtype,
)
if not self.pool_include_prefix:
x = x[:, self.num_prefix_tokens:]
x = self.attn_pool(x, attn_mask=attn_mask)
return x
pool_type = self.global_pool if pool_type is None else pool_type
x = global_pool_naflex(
x,
patch_valid,
pool_type=pool_type,
num_prefix_tokens=self.num_prefix_tokens,
reduce_include_prefix=self.pool_include_prefix,
)
return x
def forward_head(
self,
patches: torch.Tensor,
pre_logits: bool = False,
patch_valid: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x = self._pool(patches, patch_valid=patch_valid)
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(
self,
x: Union[torch.Tensor, Dict[str, torch.Tensor]],
patch_coord: Optional[torch.Tensor] = None,
patch_valid: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Forward pass with optional NaFlex support.
Args:
x: Input tensor. Supported formats:
- [B, C, H, W] standard image input
- [B, N, P*P*C] pre-patchified tensor (flattened patches)
- [B, N, Ph, Pw, C] pre-patchified tensor (variable patch size)
- Dict from NaFlex collator
patch_coord: Optional patch coordinates [B, N, 2] for NaFlex mode.
patch_valid: Optional patch validity indicators for NaFlex.
attn_mask: Optional attn mask to override defaults generated from patch_valid
Returns:
Model output tensor.
"""
input_is_dict = isinstance(x, Dict)
naflex_mode = input_is_dict or patch_coord is not None
if naflex_mode:
if input_is_dict:
# Handle dictionary input from NaFlex collator, dict inputs take priority over args
patches = x['patches']
patch_valid = x.get('patch_valid', patch_valid)
patch_coord = x.get('patch_coord', patch_coord)
attn_mask = x.get('attn_mask', attn_mask)
else:
patches = x
_assert(patch_coord is not None, "patch_coord is required in naflex mode")
_assert(patch_valid is not None, "patch_valid is required in naflex mode")
features = self.forward_features(
patches=patches,
patch_valid=patch_valid,
patch_coord=patch_coord,
attn_mask=attn_mask,
)
# Pass patches & patch_valid to forward_head for masked pooling
x = self.forward_head(**features)
else:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _debug_dump_patches(x):
# DEBUG, reconstruct patches & save
patch_coord = x['patch_coord']
patch_valid = x['patch_valid']
patches = x['patches']
for i in range(len(patches)):
patch = patches[i][patch_valid[i]]
h = (patch_coord[i, :, 0].max() + 1).item()
w = (patch_coord[i, :, 1].max() + 1).item()
patch = patch.reshape(h, w, 16, 16, 3).permute(4, 0, 2, 1, 3)
patch = patch.reshape(3, h*16, w*16)
from torchvision.utils import save_image
save_image(patch, f'patch_{i}.jpg', normalize=True)
def get_init_weights_vit(mode: str = 'jax', head_bias: float = 0.0, needs_reset: bool = True) -> Callable:
"""Function imported from vision_transformer.py to maintain compatibility"""
from .vision_transformer import (
init_weights_vit_jax,
init_weights_vit_moco,
init_weights_vit_timm,
init_weights_reset_parameters,
)
if 'jax' in mode:
return partial(init_weights_vit_jax, head_bias=head_bias, needs_reset=needs_reset)
elif 'moco' in mode:
return partial(init_weights_vit_moco, needs_reset=needs_reset)
else:
return partial(init_weights_vit_timm, needs_reset=needs_reset)
def checkpoint_filter_fn(state_dict: Dict[str, Any], model: NaFlexVit) -> Dict[str, Any]:
"""Handle state dict conversion from original ViT to the new version with combined embedding."""
# Handle CombinedEmbed module pattern
out_dict = {}
for k, v in state_dict.items():
# Convert tokens and embeddings to combined_embed structure
if k == 'pos_embed':
# Handle position embedding format conversion - from (1, N, C) to (1, H, W, C)
if hasattr(model.embeds, 'pos_embed') and v.ndim == 3:
num_cls_token = 0
num_reg_token = 0
if 'reg_token' in state_dict:
num_reg_token = state_dict['reg_token'].shape[1]
if 'cls_token' in state_dict:
num_cls_token = state_dict['cls_token'].shape[1]
num_prefix_tokens = num_cls_token + num_reg_token
# Original format is (1, N, C), need to reshape to (1, H, W, C)
num_patches = v.shape[1]
num_patches_no_prefix = num_patches - num_prefix_tokens
grid_size_no_prefix = math.sqrt(num_patches_no_prefix)
grid_size = math.sqrt(num_patches)
if (grid_size_no_prefix != grid_size
and (grid_size_no_prefix.is_integer() and not grid_size.is_integer())
):
# make a decision, did the pos_embed of the original include the prefix tokens?
num_patches = num_patches_no_prefix
cls_token_emb = v[:, 0:num_cls_token]
if cls_token_emb.numel():
state_dict['cls_token'] += cls_token_emb
reg_token_emb = v[:, num_cls_token:num_reg_token]
if reg_token_emb.numel():
state_dict['reg_token'] += reg_token_emb
v = v[:, num_prefix_tokens:]
grid_size = grid_size_no_prefix
grid_size = int(grid_size)
# Check if it's a perfect square for a standard grid
if grid_size * grid_size == num_patches:
# Reshape from (1, N, C) to (1, H, W, C)
v = v.reshape(1, grid_size, grid_size, v.shape[2])
else:
# Not a square grid, we need to get the actual dimensions
if hasattr(model.embeds.patch_embed, 'grid_size'):
h, w = model.embeds.patch_embed.grid_size
if h * w == num_patches:
# We have the right dimensions
v = v.reshape(1, h, w, v.shape[2])
else:
# Dimensions don't match, use interpolation
_logger.warning(
f"Position embedding size mismatch: checkpoint={num_patches}, model={(h * w)}. "
f"Using default initialization and will resize in forward pass."
)
# Keep v as is, the forward pass will handle resizing
out_dict['embeds.pos_embed'] = v
elif k == 'cls_token':
out_dict['embeds.cls_token'] = v
elif k == 'reg_token':
out_dict['embeds.reg_token'] = v
# Convert patch_embed.X to embeds.patch_embed.X
elif k.startswith('patch_embed.'):
suffix = k[12:]
if suffix == 'proj.weight':
v = v.permute(0, 2, 3, 1).flatten(1)
new_key = 'embeds.' + suffix
out_dict[new_key] = v
else:
out_dict[k] = v
return out_dict
def _cfg(url: str = '', **kwargs) -> Dict[str, Any]:
return {
'url': url,
'num_classes': 1000,
'input_size': (3, 384, 384),
'pool_size': None,
'crop_pct': 1.0,
'interpolation': 'bicubic',
'mean': IMAGENET_INCEPTION_MEAN,
'std': IMAGENET_INCEPTION_STD,
'first_conv': 'embeds.proj',
'classifier': 'head',
'license': 'apache-2.0',
**kwargs,
}
default_cfgs = generate_default_cfgs({
'naflexvit_base_patch16_gap.e300_s576_in1k': _cfg(
hf_hub_id='timm/',
),
'naflexvit_base_patch16_par_gap.e300_s576_in1k': _cfg(
hf_hub_id='timm/',
),
'naflexvit_base_patch16_parfac_gap.e300_s576_in1k': _cfg(
hf_hub_id='timm/',
),
'naflexvit_base_patch16_map.untrained': _cfg(),
'naflexvit_so150m2_patch16_reg1_gap.untrained': _cfg(),
'naflexvit_so150m2_patch16_reg1_map.untrained': _cfg(),
# SigLIP-2 NaFlex vit encoder weights
'naflexvit_base_patch16_siglip.v2_webli': _cfg(
hf_hub_id='timm/',
num_classes=0),
'naflexvit_so400m_patch16_siglip.v2_webli': _cfg(
hf_hub_id='timm/',
num_classes=0),
})
def _create_naflexvit(variant: str, pretrained: bool = False, **kwargs) -> NaFlexVit:
out_indices = kwargs.pop('out_indices', 3)
cfg = kwargs.pop('cfg', NaFlexVitCfg())
cfg_field_names = {f.name for f in fields(NaFlexVitCfg)}
# pop in-place so the original kwargs is emptied of cfg-specific keys
cfg_updates = {k: kwargs.pop(k) for k in list(kwargs) if k in cfg_field_names}
if cfg_updates:
cfg = _overlay_kwargs(cfg, **cfg_updates)
model = build_model_with_cfg(
NaFlexVit, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
cfg=cfg,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
return model
def _create_naflexvit_from_classic(
variant: str,
pretrained: bool = False,
**kwargs,
) -> NaFlexVit:
"""Create FlexVit model from classic VisionTransformer configuration.
This function handles the parameter mapping and configuration logic needed
to create FlexVit models that are compatible with classic VisionTransformer
configurations and pretrained weights.
Args:
variant: Model variant name
pretrained: Whether to load pretrained weights
**kwargs: Classic VisionTransformer parameters
Returns:
FlexVit model instance
"""
# Remove VisionTransformer-specific parameters that don't apply to FlexVit
kwargs.pop('no_embed_class', None)
kwargs.pop('dynamic_img_size', None)
# Handle global pooling and fc_norm defaults that differ between ViT and FlexVit
gp = kwargs.pop('global_pool', 'token') # Original ViTs default to cls token pooling
fc_norm = kwargs.pop('fc_norm', None) # Original ViTs used fc_norm when not set and avg pooling used
if fc_norm is None and gp == 'avg':
fc_norm = True
# Set FlexVit-specific defaults that differ from VisionTransformer
flex_kwargs = {
'pos_embed_grid_size': None, # rely on img_size (// patch_size) that will be passed through
'class_token': kwargs.get('class_token', True),
'global_pool': gp,
'fc_norm': fc_norm,
'scale_mlp_norm': kwargs.pop('scale_mlp_norm', False),
'scale_attn_inner_norm': kwargs.pop('scale_attn_norm', False),
**kwargs # User overrides take precedence
}
return _create_naflexvit(variant, pretrained, **flex_kwargs)
def _create_naflexvit_from_eva(
variant: str,
pretrained: bool = False,
**kwargs,
) -> NaFlexVit:
"""Create NaFlexVit model from EVA configuration.
This function handles the parameter mapping and configuration logic needed
to create NaFlexVit models that are compatible with EVA configurations
and pretrained weights.
Args:
variant: Model variant name
pretrained: Whether to load pretrained weights
**kwargs: EVA model parameters
Returns:
NaFlexVit model instance
"""
# Handle EVA's unique parameters & block args
kwargs.pop('no_embed_class', None) # EVA specific, not used in NaFlexVit (always no-embed)
# Map EVA's rope parameters
use_rot_pos_emb = kwargs.pop('use_rot_pos_emb', False)
rope_mixed_mode = kwargs.pop('rope_mixed_mode', False)
rope_temperature = kwargs.pop('rope_temperature', 10000.)
rope_grid_offset = kwargs.pop('rope_grid_offset', 0.)
rope_grid_indexing = kwargs.pop('rope_grid_indexing', 'ij')
if use_rot_pos_emb:
rope_type = 'mixed' if rope_mixed_mode else 'axial'
else:
rope_type = 'none'
# Handle norm/pool resolution logic to mirror EVA
gp = kwargs.pop('global_pool', 'avg')
use_pre_transformer_norm = kwargs.pop('use_pre_transformer_norm', False)
use_post_transformer_norm = kwargs.pop('use_post_transformer_norm', True)
use_fc_norm = kwargs.pop('use_fc_norm', None)
if use_fc_norm is None:
use_fc_norm = gp == 'avg' # default on if avg pool used
# Set NaFlexVit-specific parameters
naflex_kwargs = {
'pos_embed_grid_size': None, # rely on img_size (// patch_size)
'class_token': kwargs.get('class_token', True),
'reg_tokens': kwargs.pop('num_reg_tokens', kwargs.get('reg_tokens', 0)),
'global_pool': gp,
'pre_norm': use_pre_transformer_norm,
'final_norm': use_post_transformer_norm,
'fc_norm': use_fc_norm,
'pos_embed': 'learned' if kwargs.pop('use_abs_pos_emb', True) else 'none',
'rope_type': rope_type,
'rope_temperature': rope_temperature,
'rope_grid_offset': rope_grid_offset,
'rope_grid_indexing': rope_grid_indexing,
'rope_ref_feat_shape': kwargs.get('ref_feat_shape', None),
'attn_type': kwargs.pop('attn_type', 'eva'),
'swiglu_mlp': kwargs.pop('swiglu_mlp', False),
'qkv_fused': kwargs.pop('qkv_fused', True),
'scale_mlp_norm': kwargs.pop('scale_mlp', False),
'scale_attn_inner_norm': kwargs.pop('scale_attn_inner', False),
**kwargs # Pass remaining kwargs through
}
return _create_naflexvit(variant, pretrained, **naflex_kwargs)
@register_model
def naflexvit_base_patch16_gap(pretrained: bool = False, **kwargs) -> NaFlexVit:
"""ViT-Base with NaFlex functionality and global average pooling.
"""
cfg = NaFlexVitCfg(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
init_values=1e-5,
global_pool='avg',
reg_tokens=4,
fc_norm=True,
)
model = _create_naflexvit('naflexvit_base_patch16_gap', pretrained=pretrained, cfg=cfg, **kwargs)
return model
@register_model
def naflexvit_base_patch16_par_gap(pretrained: bool = False, **kwargs) -> NaFlexVit:
"""ViT-Base with NaFlex functionality, aspect preserving pos embed, global average pooling.
"""
cfg = NaFlexVitCfg(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
init_values=1e-5,
pos_embed_ar_preserving=True,
global_pool='avg',
reg_tokens=4,
fc_norm=True,
)
model = _create_naflexvit('naflexvit_base_patch16_par_gap', pretrained=pretrained, cfg=cfg, **kwargs)
return model
@register_model
def naflexvit_base_patch16_parfac_gap(pretrained: bool = False, **kwargs) -> NaFlexVit:
"""ViT-Base with NaFlex functionality, aspect preserving & factorized pos embed, global average pooling.
"""
cfg = NaFlexVitCfg(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
init_values=1e-5,
pos_embed_ar_preserving=True,
pos_embed='factorized',
global_pool='avg',
reg_tokens=4,
fc_norm=True,
)
model = _create_naflexvit('naflexvit_base_patch16_parfac_gap', pretrained=pretrained, cfg=cfg, **kwargs)
return model
@register_model
def naflexvit_base_patch16_map(pretrained: bool = False, **kwargs) -> NaFlexVit:
"""ViT-Base with NaFlex functionality and MAP attention pooling.
"""
cfg = NaFlexVitCfg(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
init_values=1e-5,
global_pool='map',
reg_tokens=1,
)
model = _create_naflexvit('naflexvit_base_patch16_map', pretrained=pretrained, cfg=cfg, **kwargs)
return model
@register_model
def naflexvit_so150m2_patch16_reg1_gap(pretrained: bool = False, **kwargs) -> NaFlexVit:
"""ViT-SO150M2 with NaFlex functionality for variable aspect ratios and resolutions.
This model supports:
1. Variable aspect ratios and resolutions via patch coordinates
2. Position embedding interpolation for arbitrary grid sizes
3. Explicit patch coordinates and valid token masking
"""
cfg = NaFlexVitCfg(
patch_size=16,
embed_dim=832,
depth=21,
num_heads=13,
mlp_ratio=34/13,
init_values=1e-5,
qkv_bias=False,
reg_tokens=1,
global_pool='avg',
fc_norm=True,
)
model = _create_naflexvit('naflexvit_so150m2_patch16_reg1_gap', pretrained=pretrained, cfg=cfg, **kwargs)
return model
@register_model
def naflexvit_so150m2_patch16_reg1_map(pretrained: bool = False, **kwargs) -> NaFlexVit:
"""ViT-SO150M2 with NaFlex functionality for variable aspect ratios and resolutions.
This model supports:
1. Variable aspect ratios and resolutions via patch coordinates
2. Position embedding interpolation for arbitrary grid sizes
3. Explicit patch coordinates and valid token masking
"""
cfg = NaFlexVitCfg(
patch_size=16,
embed_dim=832,
depth=21,
num_heads=13,
mlp_ratio=34/13,
init_values=1e-5,
qkv_bias=False,
reg_tokens=1,
global_pool='map',
)
model = _create_naflexvit('naflexvit_so150m2_patch16_reg1_map', pretrained=pretrained, cfg=cfg, **kwargs)
return model
@register_model
def naflexvit_base_patch16_siglip(pretrained: bool = False, **kwargs) -> NaFlexVit:
"""ViT-Base with NaFlex functionality and SigLIP-style configuration.
"""
cfg = NaFlexVitCfg(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
act_layer='gelu_tanh',
global_pool='map',
)
model = _create_naflexvit('naflexvit_base_patch16_siglip', pretrained=pretrained, cfg=cfg, **kwargs)
return model
@register_model
def naflexvit_so400m_patch16_siglip(pretrained: bool = False, **kwargs) -> NaFlexVit:
"""ViT-SO400M with NaFlex functionality for variable aspect ratios and resolutions.
"""
cfg = NaFlexVitCfg(
patch_size=16,
embed_dim=1152,
depth=27,
num_heads=16,
mlp_ratio=3.7362,
act_layer='gelu_tanh',
global_pool='map',
)
model = _create_naflexvit('naflexvit_so400m_patch16_siglip', pretrained=pretrained, cfg=cfg, **kwargs)
return model
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/models/naflexvit.py",
"license": "Apache License 2.0",
"lines": 1964,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:timm/models/fasternet.py | """FasterNet
Run, Don't Walk: Chasing Higher FLOPS for Faster Neural Networks
- paper: https://arxiv.org/abs/2303.03667
- code: https://github.com/JierunChen/FasterNet
@article{chen2023run,
title={Run, Don't Walk: Chasing Higher FLOPS for Faster Neural Networks},
author={Chen, Jierun and Kao, Shiu-hong and He, Hao and Zhuo, Weipeng and Wen, Song and Lee, Chul-Ho and Chan, S-H Gary},
journal={arXiv preprint arXiv:2303.03667},
year={2023}
}
Modifications by / Copyright 2025 Ryan Hou & Ross Wightman, original copyrights below
"""
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from functools import partial
from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d, Linear, DropPath, trunc_normal_, LayerType, calculate_drop_path_rates
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['FasterNet']
class Partial_conv3(nn.Module):
def __init__(self, dim: int, n_div: int, forward: str, device=None, dtype=None):
dd = {'device': device, 'dtype': dtype}
super().__init__()
self.dim_conv3 = dim // n_div
self.dim_untouched = dim - self.dim_conv3
self.partial_conv3 = nn.Conv2d(self.dim_conv3, self.dim_conv3, 3, 1, 1, bias=False, **dd)
if forward == 'slicing':
self.forward = self.forward_slicing
elif forward == 'split_cat':
self.forward = self.forward_split_cat
else:
raise NotImplementedError
def forward_slicing(self, x: torch.Tensor) -> torch.Tensor:
# only for inference
x = x.clone() # !!! Keep the original input intact for the residual connection later
x[:, :self.dim_conv3, :, :] = self.partial_conv3(x[:, :self.dim_conv3, :, :])
return x
def forward_split_cat(self, x: torch.Tensor) -> torch.Tensor:
# for training/inference
x1, x2 = torch.split(x, [self.dim_conv3, self.dim_untouched], dim=1)
x1 = self.partial_conv3(x1)
x = torch.cat((x1, x2), 1)
return x
class MLPBlock(nn.Module):
def __init__(
self,
dim: int,
n_div: int,
mlp_ratio: float,
drop_path: float,
layer_scale_init_value: float,
act_layer: Type[nn.Module] = partial(nn.ReLU, inplace=True),
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
pconv_fw_type: str = 'split_cat',
device=None,
dtype=None,
):
dd = {'device': device, 'dtype': dtype}
super().__init__()
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = nn.Sequential(*[
nn.Conv2d(dim, mlp_hidden_dim, 1, bias=False, **dd),
norm_layer(mlp_hidden_dim, **dd),
act_layer(),
nn.Conv2d(mlp_hidden_dim, dim, 1, bias=False, **dd),
])
self.spatial_mixing = Partial_conv3(dim, n_div, pconv_fw_type, **dd)
if layer_scale_init_value > 0:
self.layer_scale = nn.Parameter(
layer_scale_init_value * torch.ones((dim), **dd), requires_grad=True)
else:
self.layer_scale = None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.spatial_mixing(x)
if self.layer_scale is not None:
x = shortcut + self.drop_path(
self.layer_scale.unsqueeze(-1).unsqueeze(-1) * self.mlp(x))
else:
x = shortcut + self.drop_path(self.mlp(x))
return x
class Block(nn.Module):
def __init__(
self,
dim: int,
depth: int,
n_div: int,
mlp_ratio: float,
drop_path: float,
layer_scale_init_value: float,
act_layer: Type[nn.Module] = partial(nn.ReLU, inplace=True),
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
pconv_fw_type: str = 'split_cat',
use_merge: bool = True,
merge_size: Union[int, Tuple[int, int]] = 2,
device=None,
dtype=None,
):
dd = {'device': device, 'dtype': dtype}
super().__init__()
self.grad_checkpointing = False
self.blocks = nn.Sequential(*[
MLPBlock(
dim=dim,
n_div=n_div,
mlp_ratio=mlp_ratio,
drop_path=drop_path[i],
layer_scale_init_value=layer_scale_init_value,
norm_layer=norm_layer,
act_layer=act_layer,
pconv_fw_type=pconv_fw_type,
**dd,
)
for i in range(depth)
])
self.downsample = PatchMerging(
dim=dim // 2,
patch_size=merge_size,
norm_layer=norm_layer,
**dd,
) if use_merge else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class PatchEmbed(nn.Module):
def __init__(
self,
in_chans: int,
embed_dim: int,
patch_size: Union[int, Tuple[int, int]] = 4,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
device=None,
dtype=None,
):
dd = {'device': device, 'dtype': dtype}
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, patch_size, patch_size, bias=False, **dd)
self.norm = norm_layer(embed_dim, **dd)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.norm(self.proj(x))
class PatchMerging(nn.Module):
def __init__(
self,
dim: int,
patch_size: Union[int, Tuple[int, int]] = 2,
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
device=None,
dtype=None,
):
dd = {'device': device, 'dtype': dtype}
super().__init__()
self.reduction = nn.Conv2d(dim, 2 * dim, patch_size, patch_size, bias=False, **dd)
self.norm = norm_layer(2 * dim, **dd)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.norm(self.reduction(x))
class FasterNet(nn.Module):
def __init__(
self,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 96,
depths: Union[int, Tuple[int, ...]] = (1, 2, 8, 2),
mlp_ratio: float = 2.,
n_div: int = 4,
patch_size: Union[int, Tuple[int, int]] = 4,
merge_size: Union[int, Tuple[int, int]] = 2,
patch_norm: bool = True,
feature_dim: int = 1280,
drop_rate: float = 0.,
drop_path_rate: float = 0.1,
layer_scale_init_value: float = 0.,
act_layer: Type[nn.Module] = partial(nn.ReLU, inplace=True),
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
pconv_fw_type: str = 'split_cat',
device=None,
dtype=None,
):
super().__init__()
dd = {'device': device, 'dtype': dtype}
assert pconv_fw_type in ('split_cat', 'slicing',)
self.num_classes = num_classes
self.in_chans = in_chans
self.drop_rate = drop_rate
if not isinstance(depths, (list, tuple)):
depths = (depths) # it means the model has only one stage
self.num_stages = len(depths)
self.feature_info = []
self.patch_embed = PatchEmbed(
in_chans=in_chans,
embed_dim=embed_dim,
patch_size=patch_size,
norm_layer=norm_layer if patch_norm else nn.Identity,
**dd,
)
# stochastic depth decay rule
dpr = calculate_drop_path_rates(drop_path_rate, depths, stagewise=True)
# build layers
stages_list = []
for i in range(self.num_stages):
dim = int(embed_dim * 2 ** i)
stage = Block(
dim=dim,
depth=depths[i],
n_div=n_div,
mlp_ratio=mlp_ratio,
drop_path=dpr[i],
layer_scale_init_value=layer_scale_init_value,
norm_layer=norm_layer,
act_layer=act_layer,
pconv_fw_type=pconv_fw_type,
use_merge=False if i == 0 else True,
merge_size=merge_size,
**dd,
)
stages_list.append(stage)
self.feature_info += [dict(num_chs=dim, reduction=2**(i+2), module=f'stages.{i}')]
self.stages = nn.Sequential(*stages_list)
# building last several layers
self.num_features = prev_chs = int(embed_dim * 2 ** (self.num_stages - 1))
self.head_hidden_size = out_chs = feature_dim # 1280
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=False, **dd)
self.act = act_layer()
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(out_chs, num_classes, bias=True, **dd) if num_classes > 0 else nn.Identity()
self._initialize_weights()
def _initialize_weights(self):
for name, m in self.named_modules():
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self) -> Set:
return set()
@torch.jit.ignore
def group_matcher(self, coarse: bool = False) -> Dict[str, Any]:
matcher = dict(
stem=r'^patch_embed', # stem and embed
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^conv_head', (99999,)),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.classifier
def reset_classifier(self, num_classes: int, global_pool: str = 'avg', device=None, dtype=None):
dd = {'device': device, 'dtype': dtype}
self.num_classes = num_classes
# cannot meaningfully change pooling of efficient head after creation
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
self.classifier = Linear(self.head_hidden_size, num_classes, **dd) if num_classes > 0 else nn.Identity()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages), indices)
# forward pass
x = self.patch_embed(x)
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index + 1]
for feat_idx, stage in enumerate(stages):
x = stage(x)
if feat_idx in take_indices:
intermediates.append(x)
if intermediates_only:
return intermediates
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages), indices)
self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.stages(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act(x)
x = self.flatten(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
return x if pre_logits else self.classifier(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict: Dict[str, torch.Tensor], model: nn.Module) -> Dict[str, torch.Tensor]:
# if 'avgpool_pre_head' in state_dict:
# return state_dict
#
# out_dict = {
# 'conv_head.weight': state_dict.pop('avgpool_pre_head.1.weight'),
# 'classifier.weight': state_dict.pop('head.weight'),
# 'classifier.bias': state_dict.pop('head.bias')
# }
#
# stage_mapping = {
# 'stages.1.': 'stages.1.downsample.',
# 'stages.2.': 'stages.1.',
# 'stages.3.': 'stages.2.downsample.',
# 'stages.4.': 'stages.2.',
# 'stages.5.': 'stages.3.downsample.',
# 'stages.6.': 'stages.3.'
# }
#
# for k, v in state_dict.items():
# for old_prefix, new_prefix in stage_mapping.items():
# if k.startswith(old_prefix):
# k = k.replace(old_prefix, new_prefix)
# break
# out_dict[k] = v
return state_dict
def _cfg(url: str = '', **kwargs: Any) -> Dict[str, Any]:
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 1.0, 'interpolation': 'bicubic', 'test_crop_pct': 0.9,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'classifier',
'paper_ids': 'arXiv:2303.03667',
'paper_name': "Run, Don't Walk: Chasing Higher FLOPS for Faster Neural Networks",
'origin_url': 'https://github.com/JierunChen/FasterNet',
'license': 'apache-2.0',
**kwargs
}
default_cfgs = generate_default_cfgs({
'fasternet_t0.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_t0-epoch.281-val_acc1.71.9180.pth',
),
'fasternet_t1.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_t1-epoch.291-val_acc1.76.2180.pth',
),
'fasternet_t2.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_t2-epoch.289-val_acc1.78.8860.pth',
),
'fasternet_s.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_s-epoch.299-val_acc1.81.2840.pth',
),
'fasternet_m.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_m-epoch.291-val_acc1.82.9620.pth',
),
'fasternet_l.in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/JierunChen/FasterNet/releases/download/v1.0/fasternet_l-epoch.299-val_acc1.83.5060.pth',
),
})
def _create_fasternet(variant: str, pretrained: bool = False, **kwargs: Any) -> FasterNet:
model = build_model_with_cfg(
FasterNet, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs,
)
return model
@register_model
def fasternet_t0(pretrained: bool = False, **kwargs: Any) -> FasterNet:
model_args = dict(embed_dim=40, depths=(1, 2, 8, 2), drop_path_rate=0.0, act_layer=nn.GELU)
return _create_fasternet('fasternet_t0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fasternet_t1(pretrained: bool = False, **kwargs: Any) -> FasterNet:
model_args = dict(embed_dim=64, depths=(1, 2, 8, 2), drop_path_rate=0.02, act_layer=nn.GELU)
return _create_fasternet('fasternet_t1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fasternet_t2(pretrained: bool = False, **kwargs: Any) -> FasterNet:
model_args = dict(embed_dim=96, depths=(1, 2, 8, 2), drop_path_rate=0.05)
return _create_fasternet('fasternet_t2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fasternet_s(pretrained: bool = False, **kwargs: Any) -> FasterNet:
model_args = dict(embed_dim=128, depths=(1, 2, 13, 2), drop_path_rate=0.1)
return _create_fasternet('fasternet_s', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fasternet_m(pretrained: bool = False, **kwargs: Any) -> FasterNet:
model_args = dict(embed_dim=144, depths=(3, 4, 18, 3), drop_path_rate=0.2)
return _create_fasternet('fasternet_m', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def fasternet_l(pretrained: bool = False, **kwargs: Any) -> FasterNet:
model_args = dict(embed_dim=192, depths=(3, 4, 18, 3), drop_path_rate=0.3)
return _create_fasternet('fasternet_l', pretrained=pretrained, **dict(model_args, **kwargs))
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/models/fasternet.py",
"license": "Apache License 2.0",
"lines": 437,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/pytorch-image-models:timm/layers/pool1d.py | import torch
def global_pool_nlc(
x: torch.Tensor,
pool_type: str = 'token',
num_prefix_tokens: int = 1,
reduce_include_prefix: bool = False,
):
"""Apply global pooling to tensor in NLC format.
Args:
x: Input tensor in (batch, length, channels) format.
pool_type: Pooling type - 'token', 'avg', 'max', 'avgmax', or empty string.
num_prefix_tokens: Number of prefix tokens (e.g., class token) to exclude from pooling.
reduce_include_prefix: Whether to include prefix tokens in reduction.
Returns:
Pooled tensor.
"""
if not pool_type:
return x
if pool_type == 'token':
x = x[:, 0] # class token
else:
x = x if reduce_include_prefix else x[:, num_prefix_tokens:]
if pool_type == 'avg':
x = x.mean(dim=1)
elif pool_type == 'avgmax':
x = 0.5 * (x.amax(dim=1) + x.mean(dim=1))
elif pool_type == 'max':
x = x.amax(dim=1)
else:
assert not pool_type, f'Unknown pool type {pool_type}'
return x | {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/layers/pool1d.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/pytorch-image-models:timm/layers/attention.py | from typing import Final, Optional, Type
import torch
from torch import nn as nn
from torch.nn import functional as F
from ._fx import register_notrace_function
from .config import use_fused_attn
from .pos_embed_sincos import apply_rot_embed_cat
@torch.fx.wrap
@register_notrace_function
def maybe_add_mask(scores: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
return scores if attn_mask is None else scores + attn_mask
class Attention(nn.Module):
"""Standard Multi-head Self Attention module with QKV projection.
This module implements the standard multi-head attention mechanism used in transformers.
It supports both the fused attention implementation (scaled_dot_product_attention) for
efficiency when available, and a manual implementation otherwise. The module includes
options for QK normalization, attention dropout, and projection dropout.
"""
fused_attn: Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 8,
attn_head_dim: Optional[int] = None,
dim_out: Optional[int] = None,
qkv_bias: bool = False,
qk_norm: bool = False,
scale_norm: bool = False,
proj_bias: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
norm_layer: Optional[Type[nn.Module]] = None,
device=None,
dtype=None,
) -> None:
"""Initialize the Attention module.
Args:
dim: Input dimension of the token embeddings.
num_heads: Number of attention heads.
attn_head_dim: Dimension of each attention head. If None, computed as dim // num_heads.
dim_out: Output dimension. If None, same as dim.
qkv_bias: Whether to use bias in the query, key, value projections.
qk_norm: Whether to apply normalization to query and key vectors.
scale_norm: Whether to apply normalization to attention output before projection.
proj_bias: Whether to use bias in the output projection.
attn_drop: Dropout rate applied to the attention weights.
proj_drop: Dropout rate applied after the output projection.
norm_layer: Normalization layer constructor for QK normalization if enabled.
"""
super().__init__()
dd = {'device': device, 'dtype': dtype}
dim_out = dim_out or dim
head_dim = attn_head_dim
if head_dim is None:
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
head_dim = dim // num_heads
if qk_norm or scale_norm:
assert norm_layer is not None, 'norm_layer must be provided if qk_norm or scale_norm is True'
self.num_heads = num_heads
self.head_dim = head_dim
self.attn_dim = num_heads * head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, self.attn_dim * 3, bias=qkv_bias, **dd)
self.q_norm = norm_layer(head_dim, **dd) if qk_norm else nn.Identity()
self.k_norm = norm_layer(head_dim, **dd) if qk_norm else nn.Identity()
self.attn_drop = nn.Dropout(attn_drop)
self.norm = norm_layer(self.attn_dim, **dd) if scale_norm else nn.Identity()
self.proj = nn.Linear(self.attn_dim, dim_out, bias=proj_bias, **dd)
self.proj_drop = nn.Dropout(proj_drop)
def forward(
self,
x: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
q, k = self.q_norm(q), self.k_norm(k)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_mask,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = maybe_add_mask(attn, attn_mask)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, self.attn_dim)
x = self.norm(x)
x = self.proj(x)
x = self.proj_drop(x)
return x
class AttentionRope(nn.Module):
""" A Self Attention module with ROPE support.
Includes options for:
* QK normalization option
* Attention output (scale) normalization
* Fused or unfused QKV projection support
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 8,
dim_out: Optional[int] = None,
qkv_bias: bool = True,
qkv_fused: bool = True,
num_prefix_tokens: int = 1,
attn_drop: float = 0.,
proj_drop: float = 0.,
attn_head_dim: Optional[int] = None,
norm_layer: Type[nn.Module] = None,
qk_norm: bool = False,
scale_norm: bool = False,
proj_bias: bool = True,
rotate_half: bool = False,
device=None,
dtype=None,
):
"""Initialize the Attention module.
Args:
dim: Input dimension of the token embeddings
num_heads: Number of attention heads
dim_out: Output dimension. If None, same as dim.
qkv_bias: Whether to add a bias term to the query, key, and value projections
qkv_fused: Whether to use fused QKV projection (single linear) or separate projections
num_prefix_tokens: Number of reg/cls tokens at the beginning of the sequence that
should not have position embeddings applied
attn_drop: Dropout rate for attention weights
proj_drop: Dropout rate for the output projection
attn_head_dim: Dimension of each attention head. If None, computed as dim // num_heads.
norm_layer: Normalization layer constructor to use for QK and scale normalization
qk_norm: Enable normalization of query (Q) and key (K) vectors with norm_layer
scale_norm: Enable normalization (scaling) of attention output with norm_layer
proj_bias: Whether to use bias in the output projection
rotate_half: Use 'half' ROPE layout instead of default 'interleaved'
"""
super().__init__()
dd = {'device': device, 'dtype': dtype}
dim_out = dim_out or dim
head_dim = attn_head_dim
if head_dim is None:
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
head_dim = dim // num_heads
if scale_norm or qk_norm:
assert norm_layer is not None, 'norm_layer must be provided if qk_norm or scale_norm is True'
self.num_heads = num_heads
self.head_dim = head_dim
self.attn_dim = head_dim * num_heads
self.scale = head_dim ** -0.5
self.num_prefix_tokens = num_prefix_tokens
self.fused_attn = use_fused_attn()
self.rotate_half = rotate_half
if qkv_fused:
self.qkv = nn.Linear(dim, self.attn_dim * 3, bias=qkv_bias, **dd)
self.q_proj = self.k_proj = self.v_proj = None
else:
self.qkv = None
self.q_proj = nn.Linear(dim, self.attn_dim, bias=qkv_bias, **dd)
self.k_proj = nn.Linear(dim, self.attn_dim, bias=qkv_bias, **dd)
self.v_proj = nn.Linear(dim, self.attn_dim, bias=qkv_bias, **dd)
self.q_norm = norm_layer(head_dim, **dd) if qk_norm else nn.Identity()
self.k_norm = norm_layer(head_dim, **dd) if qk_norm else nn.Identity()
self.attn_drop = nn.Dropout(attn_drop)
self.norm = norm_layer(self.attn_dim, **dd) if scale_norm else nn.Identity()
self.proj = nn.Linear(self.attn_dim, dim_out, bias=proj_bias, **dd)
self.proj_drop = nn.Dropout(proj_drop)
def forward(
self,
x,
rope: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
):
"""Forward pass for the attention module.
Args:
x: Input tensor of shape (batch_size, sequence_length, embedding_dim)
rope: Rotary position embeddings tensor for position-aware attention
attn_mask: Optional attention mask to apply during attention computation
Returns:
Tensor of shape (batch_size, sequence_length, dim_out)
"""
B, N, C = x.shape
if self.qkv is not None:
qkv = self.qkv(x)
qkv = qkv.reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim
else:
q = self.q_proj(x).reshape(B, N, self.num_heads, self.head_dim).transpose(1, 2)
k = self.k_proj(x).reshape(B, N, self.num_heads, self.head_dim).transpose(1, 2)
v = self.v_proj(x).reshape(B, N, self.num_heads, self.head_dim).transpose(1, 2)
q, k = self.q_norm(q), self.k_norm(k)
if rope is not None:
npt = self.num_prefix_tokens
half = getattr(self, 'rotate_half', False)
q = torch.cat([q[:, :, :npt, :], apply_rot_embed_cat(q[:, :, npt:, :], rope, half=half)], dim=2).type_as(v)
k = torch.cat([k[:, :, :npt, :], apply_rot_embed_cat(k[:, :, npt:, :], rope, half=half)], dim=2).type_as(v)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_mask,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = maybe_add_mask(attn, attn_mask)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, self.attn_dim)
x = self.norm(x)
x = self.proj(x)
x = self.proj_drop(x)
return x
| {
"repo_id": "huggingface/pytorch-image-models",
"file_path": "timm/layers/attention.py",
"license": "Apache License 2.0",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/smolagents:src/smolagents/serialization.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Safe serialization module for remote executor communication.
Provides JSON-based serialization with optional pickle fallback for types
that cannot be safely serialized.
**Security Note:** Pickle deserialization can execute arbitrary code. This module
defaults to safe JSON-only serialization. Only enable pickle fallback
(allow_insecure_serializer=True) if you fully trust the execution environment.
"""
import base64
import json
import pickle
from io import BytesIO
from typing import Any
__all__ = ["SerializationError", "SafeSerializer"]
class SerializationError(Exception):
"""Raised when a type cannot be safely serialized."""
pass
class SafeSerializer:
"""JSON-based serializer with type markers for safe serialization.
Supports:
- Basic: str, int, float, bool, None, list, dict
- Extended: tuple, set, frozenset, bytes, complex, datetime/date/time/timedelta
- Optional: numpy.ndarray, PIL.Image, dataclasses, Decimal, Path
The serializer uses a prefix system to distinguish between formats:
- "safe:" prefix for JSON-serialized data
- "pickle:" prefix for pickle-serialized data (when allowed)
"""
SAFE_PREFIX = "safe:"
# Cache for optional type classes (avoids repeated import attempts)
_optional_types_cache: dict = {}
@classmethod
def _get_optional_type(cls, module: str, attr: str):
"""Get optional type class with caching to avoid repeated imports."""
key = f"{module}.{attr}"
if key not in cls._optional_types_cache:
try:
mod = __import__(module, fromlist=[attr])
cls._optional_types_cache[key] = getattr(mod, attr)
except (ImportError, AttributeError):
cls._optional_types_cache[key] = None
return cls._optional_types_cache[key]
@staticmethod
def to_json_safe(obj: Any) -> Any:
"""Convert Python objects to JSON-serializable format with type markers.
Args:
obj: Object to convert.
Returns:
JSON-serializable representation.
Raises:
SerializationError: If the object cannot be safely serialized.
"""
# Fast path: use exact type check for primitives (most common case)
obj_type = type(obj)
if obj_type is str or obj_type is int or obj_type is float or obj_type is bool or obj is None:
return obj
# Fast path: list (very common for return values)
if obj_type is list:
return [SafeSerializer.to_json_safe(item) for item in obj]
# Fast path: tuple (common for multiple return values)
if obj_type is tuple:
return {"__type__": "tuple", "data": [SafeSerializer.to_json_safe(item) for item in obj]}
# Fast path: dict (common, check string keys)
if obj_type is dict:
if all(type(k) is str for k in obj):
return {k: SafeSerializer.to_json_safe(v) for k, v in obj.items()}
return {
"__type__": "dict_with_complex_keys",
"data": [[SafeSerializer.to_json_safe(k), SafeSerializer.to_json_safe(v)] for k, v in obj.items()],
}
# Other builtin types - exact type checks
if obj_type is set:
return {"__type__": "set", "data": [SafeSerializer.to_json_safe(item) for item in obj]}
if obj_type is frozenset:
return {"__type__": "frozenset", "data": [SafeSerializer.to_json_safe(item) for item in obj]}
if obj_type is bytes:
return {"__type__": "bytes", "data": base64.b64encode(obj).decode()}
if obj_type is complex:
return {"__type__": "complex", "real": obj.real, "imag": obj.imag}
# Use type module/name for lazy-loaded types (avoids import until needed)
type_module = getattr(obj_type, "__module__", "")
type_name = obj_type.__name__
# datetime module types (check module first to skip unrelated types quickly)
if type_module == "datetime":
if type_name == "datetime":
return {"__type__": "datetime", "data": obj.isoformat()}
if type_name == "date":
return {"__type__": "date", "data": obj.isoformat()}
if type_name == "time":
return {"__type__": "time", "data": obj.isoformat()}
if type_name == "timedelta":
return {"__type__": "timedelta", "total_seconds": obj.total_seconds()}
# decimal.Decimal
if type_module == "decimal" and type_name == "Decimal":
return {"__type__": "Decimal", "data": str(obj)}
# pathlib.Path (and subclasses like PosixPath, WindowsPath)
if type_module.startswith("pathlib") and "Path" in type_name:
return {"__type__": "Path", "data": str(obj)}
# PIL.Image - use cached import
pil_image_cls = SafeSerializer._get_optional_type("PIL.Image", "Image")
if pil_image_cls is not None and isinstance(obj, pil_image_cls):
buffer = BytesIO()
obj.save(buffer, format="PNG")
return {"__type__": "PIL.Image", "data": base64.b64encode(buffer.getvalue()).decode()}
# numpy types - use cached import
if type_module == "numpy" or type_module.startswith("numpy."):
np_ndarray = SafeSerializer._get_optional_type("numpy", "ndarray")
if np_ndarray is not None and obj_type is np_ndarray:
return {"__type__": "ndarray", "data": obj.tolist(), "dtype": str(obj.dtype)}
np_integer = SafeSerializer._get_optional_type("numpy", "integer")
np_floating = SafeSerializer._get_optional_type("numpy", "floating")
if (np_integer and isinstance(obj, np_integer)) or (np_floating and isinstance(obj, np_floating)):
return obj.item()
# dataclass - check last as is_dataclass() has overhead
import dataclasses
if dataclasses.is_dataclass(obj) and not isinstance(obj, type):
return {
"__type__": "dataclass",
"class_name": type_name,
"module": type_module,
"data": {f.name: SafeSerializer.to_json_safe(getattr(obj, f.name)) for f in dataclasses.fields(obj)},
}
raise SerializationError(f"Cannot safely serialize object of type {type_name}")
@staticmethod
def from_json_safe(obj: Any) -> Any:
"""
Convert JSON-safe format back to Python objects.
Args:
obj: JSON-safe representation
Returns:
Original Python object
"""
if isinstance(obj, dict):
if "__type__" in obj:
obj_type = obj["__type__"]
if obj_type == "bytes":
return base64.b64decode(obj["data"])
elif obj_type == "PIL.Image":
try:
import PIL.Image
img_bytes = base64.b64decode(obj["data"])
return PIL.Image.open(BytesIO(img_bytes))
except ImportError:
return {"__type__": "PIL.Image", "data": obj["data"]}
elif obj_type == "set":
return set(SafeSerializer.from_json_safe(item) for item in obj["data"])
elif obj_type == "tuple":
return tuple(SafeSerializer.from_json_safe(item) for item in obj["data"])
elif obj_type == "complex":
return complex(obj["real"], obj["imag"])
elif obj_type == "frozenset":
return frozenset(SafeSerializer.from_json_safe(item) for item in obj["data"])
elif obj_type == "dict_with_complex_keys":
return {SafeSerializer.from_json_safe(k): SafeSerializer.from_json_safe(v) for k, v in obj["data"]}
elif obj_type == "datetime":
from datetime import datetime
return datetime.fromisoformat(obj["data"])
elif obj_type == "date":
from datetime import date
return date.fromisoformat(obj["data"])
elif obj_type == "time":
from datetime import time
return time.fromisoformat(obj["data"])
elif obj_type == "timedelta":
from datetime import timedelta
return timedelta(seconds=obj["total_seconds"])
elif obj_type == "Decimal":
from decimal import Decimal
return Decimal(obj["data"])
elif obj_type == "Path":
from pathlib import Path
return Path(obj["data"])
elif obj_type == "ndarray":
try:
import numpy as np
return np.array(obj["data"], dtype=obj["dtype"])
except ImportError:
return obj["data"] # Return as list if numpy not available
elif obj_type == "dataclass":
# For dataclasses, we return a dict representation
# since we can't reconstruct the actual class without access to it
return {
"__dataclass__": obj["class_name"],
"__module__": obj["module"],
**{k: SafeSerializer.from_json_safe(v) for k, v in obj["data"].items()},
}
return {k: SafeSerializer.from_json_safe(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [SafeSerializer.from_json_safe(item) for item in obj]
return obj
@staticmethod
def dumps(obj: Any, allow_pickle: bool = False) -> str:
"""
Serialize object to string.
Args:
obj: Object to serialize
allow_pickle: If False (default), use ONLY safe JSON serialization (error if fails).
If True, try safe first, fallback to pickle with warning.
Returns:
str: Serialized string ("safe:..." for JSON, "pickle:..." for pickle)
Raises:
SerializationError: If allow_pickle=False and object cannot be safely serialized
"""
if not allow_pickle:
# Safe ONLY mode - no pickle fallback
json_safe = SafeSerializer.to_json_safe(obj) # Raises SerializationError if fails
return SafeSerializer.SAFE_PREFIX + json.dumps(json_safe)
else:
# Try safe first, fallback to pickle
try:
json_safe = SafeSerializer.to_json_safe(obj)
return SafeSerializer.SAFE_PREFIX + json.dumps(json_safe)
except SerializationError:
# Warn about insecure pickle usage
import warnings
warnings.warn(
"Falling back to insecure pickle serialization. "
"This is a security risk and will be removed in a future version. "
"Consider using only safe serializable types (primitives, lists, dicts, "
"numpy arrays, PIL images, datetime objects, dataclasses).",
FutureWarning,
stacklevel=2,
)
# Fallback to pickle (with prefix)
try:
return "pickle:" + base64.b64encode(pickle.dumps(obj)).decode()
except (pickle.PicklingError, TypeError, AttributeError) as e:
raise SerializationError(f"Cannot serialize object: {e}") from e
@staticmethod
def loads(data: str, allow_pickle: bool = False) -> Any:
"""
Deserialize string with format detection.
Args:
data: Serialized string (with "safe:" or "pickle:" prefix)
allow_pickle: If False (default), reject pickle data (strict safe mode).
If True, accept both safe and pickle formats.
Returns:
Deserialized object
Raises:
SerializationError: If pickle data received but allow_pickle=False
"""
if data.startswith(SafeSerializer.SAFE_PREFIX):
json_data = json.loads(data[len(SafeSerializer.SAFE_PREFIX) :])
return SafeSerializer.from_json_safe(json_data)
elif data.startswith("pickle:"):
# Explicit pickle prefix
if not allow_pickle:
raise SerializationError(
"Pickle data rejected: allow_pickle=False requires safe-only data. "
"This data is pickle-serialized. To deserialize it, set "
"allow_pickle=True (not recommended for untrusted data)."
)
# Warn about insecure pickle deserialization
import warnings
warnings.warn(
"Deserializing pickle data. This is a security risk if the data is untrusted.",
FutureWarning,
stacklevel=2,
)
return pickle.loads(base64.b64decode(data[7:]))
else:
# No prefix - legacy format, assume pickle
if not allow_pickle:
raise SerializationError(
"Pickle data rejected: allow_pickle=False requires safe-only data. "
"This data appears to be pickle-serialized (legacy format). To deserialize it, set "
"allow_pickle=True (not recommended for untrusted data)."
)
# Warn about insecure pickle deserialization
import warnings
warnings.warn(
"Deserializing pickle data. This is a security risk if the data is untrusted.",
FutureWarning,
stacklevel=2,
)
return pickle.loads(base64.b64decode(data))
@staticmethod
def _extract_method_body(method) -> str:
"""Extract method body without the def line and dedent it."""
import inspect
import textwrap
source = inspect.getsource(method)
lines = source.split("\n")
# Skip the def line and docstring
body_start = 0
for i, line in enumerate(lines):
if '"""' in line and i > 0:
# Find end of docstring
if line.count('"""') == 2:
body_start = i + 1
break
for j in range(i + 1, len(lines)):
if '"""' in lines[j]:
body_start = j + 1
break
break
elif line.strip() and not line.strip().startswith("def ") and not line.strip().startswith("@"):
body_start = i
break
body = "\n".join(lines[body_start:])
return textwrap.dedent(body)
@staticmethod
def get_safe_serializer_code() -> str:
"""
Returns the SafeSerializer class definition as string for injection into sandbox.
This generates a standalone version from the actual implementation to avoid duplication.
"""
import inspect
# Generate to_json_safe from actual implementation
to_json_safe_source = inspect.getsource(SafeSerializer.to_json_safe)
# Make it standalone (remove @staticmethod, change self references)
to_json_safe_source = to_json_safe_source.replace("@staticmethod\n ", "")
to_json_safe_source = to_json_safe_source.replace("SafeSerializer.to_json_safe", "to_json_safe")
# Generate from_json_safe from actual implementation
from_json_safe_source = inspect.getsource(SafeSerializer.from_json_safe)
from_json_safe_source = from_json_safe_source.replace("@staticmethod\n ", "")
from_json_safe_source = from_json_safe_source.replace("SafeSerializer.from_json_safe", "from_json_safe")
return f'''
class SerializationError(Exception):
"""Raised when a type cannot be safely serialized."""
pass
class SafeSerializer:
"""Safe JSON-based serializer for sandbox use."""
SAFE_PREFIX = "safe:"
{to_json_safe_source}
{from_json_safe_source}
@staticmethod
def dumps(obj, allow_pickle=False):
import json
import base64
import pickle
if not allow_pickle:
# Safe ONLY - no pickle fallback
json_safe = to_json_safe(obj) # Raises SerializationError if fails
return SafeSerializer.SAFE_PREFIX + json.dumps(json_safe)
else:
# Try safe first, fallback to pickle if allowed
try:
json_safe = to_json_safe(obj)
return SafeSerializer.SAFE_PREFIX + json.dumps(json_safe)
except SerializationError:
try:
return "pickle:" + base64.b64encode(pickle.dumps(obj)).decode()
except (pickle.PicklingError, TypeError, AttributeError) as e:
raise SerializationError(f"Cannot serialize object: {{e}}") from e
@staticmethod
def loads(data, allow_pickle=False):
import json
import base64
import pickle
if data.startswith(SafeSerializer.SAFE_PREFIX):
json_data = json.loads(data[len(SafeSerializer.SAFE_PREFIX):])
return from_json_safe(json_data)
elif data.startswith("pickle:"):
if not allow_pickle:
raise SerializationError("Pickle data rejected: allow_pickle=False")
return pickle.loads(base64.b64decode(data[7:]))
else:
# Legacy format (no prefix) - assume pickle
if not allow_pickle:
raise SerializationError("Pickle data rejected: allow_pickle=False")
return pickle.loads(base64.b64decode(data))
'''
@staticmethod
def get_deserializer_code(allow_pickle: bool) -> str:
"""
Generate deserializer function for remote execution with setting baked in.
This generates code from the actual implementation to avoid duplication.
Args:
allow_pickle: Whether to allow pickle deserialization
Returns:
Python code string with _deserialize function
"""
import inspect
import textwrap
# Build a standalone _from_json_safe function from the source of from_json_safe.
from_json_safe_source = inspect.getsource(SafeSerializer.from_json_safe)
from_json_safe_source = textwrap.dedent(from_json_safe_source)
if from_json_safe_source.startswith("@staticmethod\n"):
from_json_safe_source = from_json_safe_source[len("@staticmethod\n") :]
from_json_safe_source = from_json_safe_source.replace("def from_json_safe(", "def _from_json_safe(")
from_json_safe_source = from_json_safe_source.replace("SafeSerializer.from_json_safe", "_from_json_safe")
if allow_pickle:
prefixed_pickle_branch = [
" import pickle",
" return pickle.loads(base64.b64decode(data[7:]))",
]
legacy_pickle_branch = [
" import pickle",
" return pickle.loads(base64.b64decode(data))",
]
else:
prefixed_pickle_branch = [
' raise SerializationError("Pickle data rejected: allow_pickle=False")',
]
legacy_pickle_branch = [
' raise SerializationError("Pickle data rejected: allow_pickle=False")',
]
lines = [
"import base64",
"from io import BytesIO",
"from typing import Any",
"",
"class SerializationError(Exception):",
" pass",
"",
from_json_safe_source.rstrip(),
"",
"def _deserialize(data):",
" import json",
' if isinstance(data, str) and data.startswith("safe:"):',
" json_data = json.loads(data[5:])",
" return _from_json_safe(json_data)",
' elif isinstance(data, str) and data.startswith("pickle:"):',
*prefixed_pickle_branch,
" else:",
" # No safe prefix - legacy format, assume pickle",
*legacy_pickle_branch,
"",
]
return "\n".join(lines)
| {
"repo_id": "huggingface/smolagents",
"file_path": "src/smolagents/serialization.py",
"license": "Apache License 2.0",
"lines": 435,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
huggingface/smolagents:tests/test_serialization.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Comprehensive tests for SafeSerializer covering edge cases, error handling,
performance, and integration scenarios.
"""
import base64
import json
import pickle
import warnings
from datetime import datetime, timedelta
from decimal import Decimal
from pathlib import Path
import pytest
from smolagents.serialization import SafeSerializer, SerializationError
# Module-level class for pickle tests (local classes can't be pickled)
class PicklableCustomClass:
"""A simple class that can be pickled."""
def __init__(self):
self.value = 42
class TestSafeSerializationSecurity:
"""Test that safe mode properly blocks pickle."""
def test_safe_mode_blocks_custom_classes(self):
"""Verify custom classes cannot be serialized in safe mode."""
class CustomClass:
def __init__(self):
self.value = 42
obj = CustomClass()
# Should raise SerializationError in safe mode
with pytest.raises(SerializationError, match="Cannot safely serialize"):
SafeSerializer.dumps(obj, allow_pickle=False)
def test_safe_mode_blocks_pickle_deserialization(self):
"""Verify pickle data is rejected in safe mode."""
# Create pickle data (no "safe:" prefix)
pickle_data = base64.b64encode(pickle.dumps({"test": "data"})).decode()
# Should raise error in safe mode
with pytest.raises(SerializationError, match="Pickle data rejected"):
SafeSerializer.loads(pickle_data, allow_pickle=False)
def test_pickle_fallback_with_warning(self):
"""Verify pickle fallback works but warns in legacy mode."""
obj = PicklableCustomClass()
# Should work but emit warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
serialized = SafeSerializer.dumps(obj, allow_pickle=True)
# Check warning was raised
assert len(w) == 1
assert issubclass(w[0].category, FutureWarning)
assert "insecure pickle" in str(w[0].message).lower()
# Should deserialize successfully (with warning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = SafeSerializer.loads(serialized, allow_pickle=True)
assert result.value == 42
assert len(w) == 1
assert "pickle data" in str(w[0].message).lower()
class TestSafeSerializationRoundtrip:
"""Test that safe types serialize and deserialize correctly."""
def test_primitives(self):
"""Test basic Python types."""
test_cases = [
None,
True,
False,
42,
3.14,
"hello",
b"bytes",
complex(1, 2),
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
assert serialized.startswith("safe:")
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_collections(self):
"""Test collections."""
test_cases = [
[1, 2, 3],
{"key": "value", "nested": {"a": 1}},
(1, 2, 3),
{1, 2, 3},
frozenset([1, 2, 3]),
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_datetime_types(self):
"""Test datetime module types."""
now = datetime.now()
test_cases = [
now,
now.date(),
now.time(),
timedelta(days=1, hours=2, minutes=3),
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_special_types(self):
"""Test Decimal and Path."""
test_cases = [
Decimal("3.14159"),
Path("/tmp/test.txt"),
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_complex_nested_structure(self):
"""Test deeply nested structures."""
obj = {
"primitives": [1, 2.5, "string", None, True],
"collections": {
"list": [1, 2, 3],
"tuple": (4, 5, 6),
"set": {7, 8, 9},
},
"datetime": datetime.now(),
"path": Path("/tmp"),
"bytes": b"binary data",
}
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
assert serialized.startswith("safe:")
result = SafeSerializer.loads(serialized, allow_pickle=False)
# Check structure is preserved
assert result["primitives"] == obj["primitives"]
assert result["collections"]["list"] == obj["collections"]["list"]
assert result["datetime"] == obj["datetime"]
assert result["path"] == obj["path"]
assert result["bytes"] == obj["bytes"]
class TestBackwardCompatibility:
"""Test that legacy pickle data can still be read when explicitly allowed."""
def test_read_legacy_pickle_data(self):
"""Verify we can read old pickle data when allow_insecure=True."""
# Simulate legacy pickle data (no "safe:" prefix)
legacy_data = {"key": "value", "number": 42}
pickle_encoded = base64.b64encode(pickle.dumps(legacy_data)).decode()
# Should work with allow_pickle=True
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = SafeSerializer.loads(pickle_encoded, allow_pickle=True)
assert result == legacy_data
assert len(w) == 1 # Warning emitted
assert "pickle data" in str(w[0].message).lower()
def test_safe_data_is_preferred(self):
"""Verify safe serialization is used even when pickle is allowed."""
# Basic dict should use safe serialization
obj = {"key": [1, 2, 3]}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
serialized = SafeSerializer.dumps(obj, allow_pickle=True)
# Should use safe format (no warning)
assert serialized.startswith("safe:")
assert len(w) == 0 # No warning because safe was used
class TestDefaultBehavior:
"""Test that defaults are secure."""
def test_dumps_defaults_to_safe(self):
"""Verify dumps defaults to safe mode."""
obj = {"key": "value"}
# Call without safe_serialization parameter - should default to True
serialized = SafeSerializer.dumps(obj)
assert serialized.startswith("safe:")
# Should be deserializable in safe mode
result = SafeSerializer.loads(serialized)
assert result == obj
def test_loads_defaults_to_safe(self):
"""Verify loads defaults to safe mode."""
# Create safe data
obj = {"key": "value"}
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
# Call without safe_serialization parameter - should default to True
result = SafeSerializer.loads(serialized)
assert result == obj
# Create pickle data
pickle_data = base64.b64encode(pickle.dumps(obj)).decode()
# Should reject pickle data by default
with pytest.raises(SerializationError, match="Pickle data rejected"):
SafeSerializer.loads(pickle_data)
class TestEdgeCases:
"""Test edge cases and boundary conditions."""
def test_empty_data(self):
"""Test serialization of empty collections."""
test_cases = [
[],
{},
(),
set(),
frozenset(),
"",
b"",
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_nested_empty_structures(self):
"""Test deeply nested empty structures."""
obj = {
"empty_list": [],
"empty_dict": {},
"nested": {
"empty_tuple": (),
"empty_set": set(),
"deeply_nested": {"still_empty": []},
},
}
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_very_large_numbers(self):
"""Test handling of very large integers and floats."""
test_cases = [
10**100, # Very large int
-(10**100), # Very large negative int
1.7976931348623157e308, # Near max float
2.2250738585072014e-308, # Near min positive float
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_special_float_values(self):
"""Test special float values (infinity, nan)."""
import math
# Note: NaN != NaN, so we handle it specially
test_cases = [
(float("inf"), float("inf")),
(float("-inf"), float("-inf")),
]
for obj, expected in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == expected
# NaN special case
nan_obj = float("nan")
serialized = SafeSerializer.dumps(nan_obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert math.isnan(result)
def test_unicode_strings(self):
"""Test handling of various unicode strings."""
test_cases = [
"Hello 世界", # Mixed ASCII and Chinese
"🚀🎉💎", # Emojis
"Ñoño", # Accented characters
"\u0000", # Null character
"Line1\nLine2\tTabbed", # Escape sequences
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_very_long_strings(self):
"""Test handling of very long strings."""
long_string = "a" * 1_000_000 # 1MB string
serialized = SafeSerializer.dumps(long_string, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == long_string
def test_deeply_nested_structures(self):
"""Test deeply nested data structures."""
# Create nested structure
obj = {"level": 0}
current = obj
for i in range(1, 100): # 100 levels deep
current["nested"] = {"level": i}
current = current["nested"]
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_dict_with_tuple_keys(self):
"""Test dictionaries with tuple keys."""
obj = {
(1, 2): "tuple_key",
(3, 4, 5): "longer_tuple",
("a", "b"): "string_tuple",
}
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_dict_with_integer_keys(self):
"""Test dictionaries with non-string keys."""
obj = {
1: "one",
2: "two",
100: "hundred",
}
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_mixed_collection_types(self):
"""Test mixed collection types in one structure."""
obj = {
"list": [1, 2, 3],
"tuple": (4, 5, 6),
"set": {7, 8, 9},
"frozenset": frozenset([10, 11, 12]),
"nested_list": [[1, 2], [3, 4]],
"list_of_tuples": [(1, 2), (3, 4)],
}
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
# Compare each field
assert result["list"] == obj["list"]
assert result["tuple"] == obj["tuple"]
assert result["set"] == obj["set"]
assert result["frozenset"] == obj["frozenset"]
assert result["nested_list"] == obj["nested_list"]
assert result["list_of_tuples"] == obj["list_of_tuples"]
class TestErrorHandling:
"""Test error handling and malformed data."""
def test_invalid_json_data(self):
"""Test handling of invalid JSON data."""
with pytest.raises((json.JSONDecodeError, SerializationError)):
SafeSerializer.loads("safe:invalid json", allow_pickle=False)
def test_corrupted_safe_prefix(self):
"""Test handling of data with safe prefix but invalid JSON."""
with pytest.raises((json.JSONDecodeError, SerializationError)):
SafeSerializer.loads("safe:{broken", allow_pickle=False)
def test_missing_type_field(self):
"""Test handling of malformed type markers."""
# Valid JSON but missing required fields
malformed = "safe:" + json.dumps({"data": [1, 2, 3]}) # Missing __type__
# Should still work as regular dict
result = SafeSerializer.loads(malformed, allow_pickle=False)
assert result == {"data": [1, 2, 3]}
def test_unknown_type_marker(self):
"""Test handling of unknown type markers."""
unknown_type = "safe:" + json.dumps({"__type__": "unknown_type", "data": "something"})
# Should return as dict with type marker
result = SafeSerializer.loads(unknown_type, allow_pickle=False)
assert "__type__" in result
def test_invalid_base64_in_bytes(self):
"""Test handling of invalid base64 in bytes type."""
invalid_bytes = "safe:" + json.dumps({"__type__": "bytes", "data": "not-valid-base64!!!"})
with pytest.raises(Exception): # Will raise base64 decode error
SafeSerializer.loads(invalid_bytes, allow_pickle=False)
def test_serialization_of_none_type(self):
"""Test that None type is handled correctly."""
obj = None
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result is None
def test_serialization_of_function(self):
"""Test that functions cannot be serialized safely."""
def my_function():
pass
with pytest.raises(SerializationError):
SafeSerializer.dumps(my_function, allow_pickle=False)
def test_serialization_of_class(self):
"""Test that classes cannot be serialized safely."""
class MyClass:
pass
with pytest.raises(SerializationError):
SafeSerializer.dumps(MyClass, allow_pickle=False)
def test_serialization_of_module(self):
"""Test that modules cannot be serialized safely."""
import os
with pytest.raises(SerializationError):
SafeSerializer.dumps(os, allow_pickle=False)
class TestTypeCoverage:
"""Test all supported types comprehensively."""
def test_all_datetime_types(self):
"""Test all datetime module types."""
from datetime import date, datetime, time
test_cases = [
datetime(2024, 1, 1, 12, 30, 45),
date(2024, 1, 1),
time(12, 30, 45),
timedelta(days=5, hours=3, minutes=30, seconds=15),
datetime.min,
datetime.max,
date.min,
date.max,
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_decimal_precision(self):
"""Test Decimal type with various precisions."""
from decimal import getcontext
# Set high precision
getcontext().prec = 50
test_cases = [
Decimal("3.14159265358979323846264338327950288419716939937510"),
Decimal("0.1") + Decimal("0.2"), # Famous float precision issue
Decimal("1e-100"),
Decimal("1e100"),
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_pathlib_types(self):
"""Test various Path types."""
test_cases = [
Path("/tmp/test.txt"),
Path("relative/path/file.py"),
Path("/"),
Path("."),
Path(".."),
Path("/path/with spaces/file.txt"),
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_complex_numbers(self):
"""Test complex number handling."""
test_cases = [
complex(1, 2),
complex(0, 0),
complex(-5, 10),
complex(3.14, 2.71),
1 + 2j,
-5 + 10j,
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_bytes_types(self):
"""Test various bytes objects."""
test_cases = [
b"hello",
b"\x00\x01\x02\x03",
b"Binary\xff\xfe\xfd",
bytes(range(256)), # All byte values
b"", # Empty bytes
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
class TestNumpySupport:
"""Test numpy array serialization (optional, skip if not installed)."""
def test_numpy_array(self):
"""Test numpy array roundtrip."""
pytest.importorskip("numpy")
import numpy as np
arr = np.array([[1, 2], [3, 4]], dtype=np.float32)
serialized = SafeSerializer.dumps(arr, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
np.testing.assert_array_equal(result, arr)
assert result.dtype == arr.dtype
def test_numpy_scalars(self):
"""Test numpy scalar types."""
pytest.importorskip("numpy")
import numpy as np
test_cases = [
np.int32(42),
np.float64(3.14),
]
for obj in test_cases:
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj.item()
def test_numpy_various_dtypes(self):
"""Test numpy arrays with various dtypes."""
pytest.importorskip("numpy")
import numpy as np
# Test numeric dtypes (non-complex)
dtypes = [
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.float16,
np.float32,
np.float64,
np.bool_,
]
for dtype in dtypes:
arr = np.array([1, 2, 3], dtype=dtype)
serialized = SafeSerializer.dumps(arr, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
np.testing.assert_array_equal(result, arr)
assert result.dtype == arr.dtype
# Complex dtypes need special handling - test separately
# Note: numpy complex arrays are not fully supported in safe mode
# as they require custom complex number serialization
def test_numpy_multidimensional(self):
"""Test multidimensional numpy arrays."""
pytest.importorskip("numpy")
import numpy as np
test_cases = [
np.array([[1, 2], [3, 4]]), # 2D
np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]), # 3D
np.zeros((10, 10, 10)), # Large 3D
np.ones((5, 5)), # 2D ones
]
for arr in test_cases:
serialized = SafeSerializer.dumps(arr, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
np.testing.assert_array_equal(result, arr)
def test_numpy_empty_array(self):
"""Test empty numpy array."""
pytest.importorskip("numpy")
import numpy as np
arr = np.array([])
serialized = SafeSerializer.dumps(arr, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
np.testing.assert_array_equal(result, arr)
class TestPILSupport:
"""Test PIL Image serialization (optional, skip if not installed)."""
def test_pil_image(self):
"""Test PIL Image roundtrip."""
pytest.importorskip("PIL")
from PIL import Image
# Create a simple test image
img = Image.new("RGB", (10, 10), color="red")
serialized = SafeSerializer.dumps(img, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert isinstance(result, Image.Image)
assert result.size == img.size
assert result.mode == img.mode
def test_pil_various_modes(self):
"""Test PIL images in various modes."""
pytest.importorskip("PIL")
from PIL import Image
modes = ["RGB", "RGBA", "L", "1"] # Color, Alpha, Grayscale, Binary
for mode in modes:
img = Image.new(mode, (10, 10), color="red" if mode != "1" else 1)
serialized = SafeSerializer.dumps(img, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert isinstance(result, Image.Image)
assert result.mode == img.mode
assert result.size == img.size
def test_pil_various_sizes(self):
"""Test PIL images of various sizes."""
pytest.importorskip("PIL")
from PIL import Image
sizes = [(1, 1), (100, 100), (500, 300)]
for size in sizes:
img = Image.new("RGB", size, color="blue")
serialized = SafeSerializer.dumps(img, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result.size == img.size
class TestDataclasses:
"""Test dataclass serialization."""
def test_simple_dataclass(self):
"""Test simple dataclass."""
from dataclasses import dataclass
@dataclass
class Person:
name: str
age: int
person = Person(name="Alice", age=30)
serialized = SafeSerializer.dumps(person, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
# Result is a dict representation
assert result["__dataclass__"] == "Person"
assert result["name"] == "Alice"
assert result["age"] == 30
def test_nested_dataclass(self):
"""Test nested dataclasses."""
from dataclasses import dataclass
@dataclass
class Address:
street: str
city: str
@dataclass
class Person:
name: str
address: Address
person = Person(name="Bob", address=Address(street="123 Main St", city="NYC"))
serialized = SafeSerializer.dumps(person, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result["name"] == "Bob"
assert result["address"]["street"] == "123 Main St"
class TestPerformance:
"""Performance tests for large data."""
def test_large_list(self):
"""Test serialization of large list."""
large_list = list(range(100_000))
serialized = SafeSerializer.dumps(large_list, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == large_list
def test_large_dict(self):
"""Test serialization of large dictionary."""
large_dict = {f"key_{i}": i for i in range(10_000)}
serialized = SafeSerializer.dumps(large_dict, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == large_dict
def test_deeply_nested_performance(self):
"""Test performance with deeply nested structures."""
obj = {"level": 0}
current = obj
for i in range(1, 100): # 100 levels (avoid recursion limit)
current["nested"] = {"level": i}
current = current["nested"]
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
class TestPrefixHandling:
"""Test handling of different prefix formats."""
def test_safe_prefix_detection(self):
"""Test detection of safe: prefix."""
obj = {"test": "data"}
serialized = SafeSerializer.dumps(obj, allow_pickle=False)
assert serialized.startswith("safe:")
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == obj
def test_pickle_prefix_with_allow_pickle(self):
"""Test pickle: prefix when pickle is allowed."""
# Create an object that needs pickle
obj = PicklableCustomClass()
serialized = SafeSerializer.dumps(obj, allow_pickle=True)
# Should have pickle prefix
assert serialized.startswith("pickle:")
result = SafeSerializer.loads(serialized, allow_pickle=True)
assert result.value == 42
def test_legacy_format_detection(self):
"""Test detection and handling of legacy format (no prefix)."""
# Simulate legacy pickle data (no prefix)
legacy_data = {"key": "value"}
legacy_encoded = base64.b64encode(pickle.dumps(legacy_data)).decode()
# Should work with allow_pickle=True
result = SafeSerializer.loads(legacy_encoded, allow_pickle=True)
assert result == legacy_data
class TestRealWorldScenarios:
"""Test real-world usage scenarios."""
def test_agent_variables_scenario(self):
"""Test typical agent variables scenario."""
import numpy as np
from PIL import Image
# Typical variables an agent might use
variables = {
"search_results": ["result1", "result2", "result3"],
"config": {
"temperature": 0.7,
"max_tokens": 100,
"model": "gpt-4",
},
"data_array": np.array([1.0, 2.0, 3.0]),
"image": Image.new("RGB", (50, 50)),
"timestamp": datetime.now(),
"status": "running",
"counter": 42,
}
serialized = SafeSerializer.dumps(variables, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result["search_results"] == variables["search_results"]
assert result["config"] == variables["config"]
assert result["status"] == variables["status"]
assert result["counter"] == variables["counter"]
def test_final_answer_scenario(self):
"""Test typical final answer serialization."""
final_answers = [
"Simple string answer",
{"answer": "structured", "confidence": 0.95},
["multiple", "results", "returned"],
42,
3.14159,
True,
]
for answer in final_answers:
serialized = SafeSerializer.dumps(answer, allow_pickle=False)
result = SafeSerializer.loads(serialized, allow_pickle=False)
assert result == answer
class TestGeneratedDeserializerCode:
"""Regression tests for generated deserializer code used by remote executors."""
def test_generated_deserializer_executes_for_safe_payload(self):
code = SafeSerializer.get_deserializer_code(allow_pickle=False)
namespace = {}
exec(code, namespace, namespace)
payload = SafeSerializer.dumps(
{
"count": 3,
"items": (1, 2, 3),
"raw": b"bytes",
},
allow_pickle=False,
)
result = namespace["_deserialize"](payload)
assert result == {"count": 3, "items": (1, 2, 3), "raw": b"bytes"}
def test_generated_deserializer_handles_pickle_prefix_when_enabled(self):
code = SafeSerializer.get_deserializer_code(allow_pickle=True)
namespace = {}
exec(code, namespace, namespace)
payload = "pickle:" + base64.b64encode(pickle.dumps({"hello": "world"})).decode()
result = namespace["_deserialize"](payload)
assert result == {"hello": "world"}
class TestConcurrency:
"""Test thread safety and concurrent access."""
def test_concurrent_serialization(self):
"""Test concurrent serialization operations."""
import threading
results = []
errors = []
def serialize_data(data, index):
try:
serialized = SafeSerializer.dumps(data, allow_pickle=False)
deserialized = SafeSerializer.loads(serialized, allow_pickle=False)
results.append((index, deserialized == data))
except Exception as e:
errors.append((index, e))
threads = []
test_data = [{"thread": i, "data": list(range(100))} for i in range(10)]
for i, data in enumerate(test_data):
thread = threading.Thread(target=serialize_data, args=(data, i))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
assert len(errors) == 0, f"Errors occurred: {errors}"
assert len(results) == 10
assert all(success for _, success in results)
| {
"repo_id": "huggingface/smolagents",
"file_path": "tests/test_serialization.py",
"license": "Apache License 2.0",
"lines": 727,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/smolagents:docs/source/es/_config.py | # docstyle-ignore
INSTALL_CONTENT = """
# Installation
! pip install smolagents
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/smolagents.git
"""
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
black_avoid_patterns = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| {
"repo_id": "huggingface/smolagents",
"file_path": "docs/source/es/_config.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
huggingface/smolagents:tests/test_vision_web_browser.py | """Test XPath injection vulnerability fix in vision_web_browser.py"""
from unittest.mock import Mock, patch
import pytest
from smolagents.vision_web_browser import _escape_xpath_string, search_item_ctrl_f
@pytest.fixture
def mock_driver():
"""Mock Selenium WebDriver"""
driver = Mock()
driver.find_elements.return_value = [Mock()] # Mock found elements
driver.execute_script.return_value = None
return driver
class TestXPathEscaping:
"""Test XPath string escaping functionality"""
@pytest.mark.parametrize(
"input_text,expected_pattern",
[
("normal text", "'normal text'"),
("text with 'quote'", "\"text with 'quote'\""),
('text with "quote"', "'text with \"quote\"'"),
("text with one single'quote", '"text with one single\'quote"'),
('text with one double"quote', "'text with one double\"quote'"),
(
"text with both 'single' and \"double\" quotes",
"concat('text with both ', \"'\", 'single', \"'\", ' and \"double\" quotes')",
),
("", "''"),
("'", '"\'"'),
('"', "'\"'"),
],
)
def test_escape_xpath_string_basic(self, input_text, expected_pattern):
"""Test basic XPath escaping cases"""
result = _escape_xpath_string(input_text)
assert result == expected_pattern
@pytest.mark.parametrize(
"input_text",
[
"text with both 'single' and \"double\" quotes",
'it\'s a "test" case',
"'mixed\" quotes'",
],
)
def test_escape_xpath_string_mixed_quotes(self, input_text):
"""Test XPath escaping with mixed quotes uses concat()"""
result = _escape_xpath_string(input_text)
assert result.startswith("concat(")
assert result.endswith(")")
@pytest.mark.parametrize(
"malicious_input",
[
"')] | //script[@src='evil.js'] | foo[contains(text(), '",
"') or 1=1 or ('",
"')] | //user[contains(@role,'admin')] | foo[contains(text(), '",
"') and substring(//user[1]/password,1,1)='a",
],
)
def test_escape_prevents_injection(self, malicious_input):
"""Test that malicious XPath injection attempts are safely escaped"""
result = _escape_xpath_string(malicious_input)
# Should either be wrapped in quotes or use concat()
assert (
(result.startswith("'") and result.endswith("'"))
or (result.startswith('"') and result.endswith('"'))
or result.startswith("concat(")
)
class TestSearchItemCtrlF:
"""Test the search_item_ctrl_f function with XPath injection protection"""
@pytest.mark.parametrize(
"search_text",
[
"normal search",
"search with 'quotes'",
'search with "quotes"',
"')] | //script[@src='evil.js'] | foo[contains(text(), '",
"') or 1=1 or ('",
],
)
def test_search_item_prevents_injection(self, search_text, mock_driver):
"""Test that search_item_ctrl_f prevents XPath injection"""
with patch("smolagents.vision_web_browser.driver", mock_driver, create=True):
# Call the function
result = search_item_ctrl_f(search_text)
# Verify driver.find_elements was called
mock_driver.find_elements.assert_called_once()
# Get the actual XPath query that was generated
call_args = mock_driver.find_elements.call_args
xpath_query = call_args[0][1] # Second positional argument
# Verify the query doesn't contain unescaped injection
if "')] | //" in search_text:
# For injection attempts, verify they're properly escaped
# The query should either use concat() or be properly quoted
is_concat = "concat(" in xpath_query
is_properly_quoted = xpath_query.count('"') >= 2 or xpath_query.count("'") >= 2
assert is_concat or is_properly_quoted, f"XPath injection not prevented: {xpath_query}"
# Verify we got a result
assert "Found" in result
def test_search_item_nth_result(self, mock_driver):
"""Test nth_result parameter works correctly"""
mock_driver.find_elements.return_value = [Mock(), Mock(), Mock()] # 3 elements
with patch("smolagents.vision_web_browser.driver", mock_driver, create=True):
result = search_item_ctrl_f("test", nth_result=2)
# Should find 3 matches and focus on element 2
assert "Found 3 matches" in result
assert "Focused on element 2 of 3" in result
def test_search_item_not_found(self, mock_driver):
"""Test exception when nth_result exceeds available matches"""
mock_driver.find_elements.return_value = [Mock()] # Only 1 element
with patch("smolagents.vision_web_browser.driver", mock_driver, create=True):
with pytest.raises(Exception, match="Match n°3 not found"):
search_item_ctrl_f("test", nth_result=3)
| {
"repo_id": "huggingface/smolagents",
"file_path": "tests/test_vision_web_browser.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/smolagents:examples/structured_output_tool.py | # How to run with uv:
# uv run structured_output_tool.py
#
# Modify the smolagents dependency to point to the local smolagents repo or
# remove `@ file:///<path-to-smolagents>`
#
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "smolagents[mcp,litellm] @ file:///<path-to-smolagents>",
# "pydantic",
# ]
# ///
from textwrap import dedent
from mcp import StdioServerParameters
from smolagents import CodeAgent, InferenceClientModel, LiteLLMModel, MCPClient # noqa: F401
def weather_server_script() -> str:
"""Return an inline MCP server script that exposes a weather tool."""
return dedent(
'''
from pydantic import BaseModel, Field
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("Weather Service")
class WeatherInfo(BaseModel):
location: str = Field(description="The location name")
temperature: float = Field(description="Temperature in Celsius")
conditions: str = Field(description="Weather conditions")
humidity: int = Field(description="Humidity percentage", ge=0, le=100)
@mcp.tool(
name="get_weather_info",
description="Get weather information for a location as structured data.",
)
def get_weather_info(city: str) -> WeatherInfo:
"""Get weather information for a city."""
return WeatherInfo(
location=city,
temperature=22.5,
conditions="partly cloudy",
humidity=65
)
mcp.run()
'''
)
def main() -> None:
# Configure your inference model
# model = InferenceClientModel()
model = LiteLLMModel(
model_id="mistral/mistral-small-latest",
# model_id="openai/gpt-4o-mini",
)
# Start the Weather MCP server from an inline script in this same file
serverparams = StdioServerParameters(command="python", args=["-c", weather_server_script()])
# Bridge MCP tools into SmolAgents with structured outputs enabled
with MCPClient(
serverparams,
structured_output=True,
) as tools:
agent = CodeAgent(tools=tools, model=model)
# Example query that encourages tool use and unit conversion
agent.run("What is the temperature in Tokyo in Fahrenheit?")
if __name__ == "__main__":
main()
| {
"repo_id": "huggingface/smolagents",
"file_path": "examples/structured_output_tool.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/smolagents:tests/test_telemetry.py | # coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Source: https://github.com/Arize-ai/openinference/blob/main/python/instrumentation/openinference-instrumentation-smolagents/tests/openinference/instrumentation/smolagents/test_instrumentor.py
from typing import Generator
import pytest
from .utils.markers import require_run_all
# Add this at the module level to skip all tests if OpenTelemetry is not available
pytest.importorskip("opentelemetry", reason="requires opentelemetry")
pytest.importorskip(
"openinference.instrumentation.smolagents", reason="requires openinference.instrumentation.smolagents"
)
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
from opentelemetry import trace as trace_api
from opentelemetry.sdk import trace as trace_sdk
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
from smolagents.models import InferenceClientModel
@pytest.fixture
def in_memory_span_exporter() -> InMemorySpanExporter:
return InMemorySpanExporter()
@pytest.fixture
def tracer_provider(in_memory_span_exporter: InMemorySpanExporter) -> trace_api.TracerProvider:
resource = Resource(attributes={})
tracer_provider = trace_sdk.TracerProvider(resource=resource)
span_processor = SimpleSpanProcessor(span_exporter=in_memory_span_exporter)
tracer_provider.add_span_processor(span_processor=span_processor)
return tracer_provider
@pytest.fixture(autouse=True)
def instrument(
tracer_provider: trace_api.TracerProvider,
in_memory_span_exporter: InMemorySpanExporter,
) -> Generator[None, None, None]:
SmolagentsInstrumentor().instrument(tracer_provider=tracer_provider, skip_dep_check=True)
yield
SmolagentsInstrumentor().uninstrument()
in_memory_span_exporter.clear()
@require_run_all
class TestOpenTelemetry:
def test_model(self, in_memory_span_exporter: InMemorySpanExporter):
model = InferenceClientModel()
_ = model(
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Who won the World Cup in 2018? Answer in one word with no punctuation.",
}
],
}
]
)
spans = in_memory_span_exporter.get_finished_spans()
assert len(spans) == 1
span = spans[0]
assert span.name == "InferenceClientModel.generate"
assert span.status.is_ok
assert span.attributes
| {
"repo_id": "huggingface/smolagents",
"file_path": "tests/test_telemetry.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/smolagents:examples/async_agent/main.py | """
Async CodeAgent Example with Starlette
This example demonstrates how to use a CodeAgent in an async Starlette app,
running the agent in a background thread using anyio.to_thread.run_sync.
"""
import anyio.to_thread
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.routing import Route
from smolagents import CodeAgent, InferenceClientModel
# Create a simple agent instance (customize as needed)
def get_agent():
# You can set custom model, or tools as needed
return CodeAgent(
model=InferenceClientModel(model_id="Qwen/Qwen3-Next-80B-A3B-Thinking"),
tools=[],
)
async def run_agent_in_thread(task: str):
agent = get_agent()
# The agent's run method is synchronous
result = await anyio.to_thread.run_sync(agent.run, task)
return result
async def run_agent_endpoint(request: Request):
data = await request.json()
task = data.get("task")
if not task:
return JSONResponse({"error": 'Missing "task" in request body.'}, status_code=400)
try:
result = await run_agent_in_thread(task)
return JSONResponse({"result": result})
except Exception as e:
return JSONResponse({"error": str(e)}, status_code=500)
routes = [
Route("/run-agent", run_agent_endpoint, methods=["POST"]),
]
app = Starlette(debug=True, routes=routes)
| {
"repo_id": "huggingface/smolagents",
"file_path": "examples/async_agent/main.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
huggingface/smolagents:examples/plan_customization/plan_customization.py | """
Plan Customization Example
This example demonstrates how to use step callbacks to interrupt the agent after
plan creation, allow user interaction to approve or modify the plan, and then
resume execution while preserving agent memory.
Key concepts demonstrated:
1. Step callbacks to interrupt after PlanningStep
2. Extracting and modifying the current plan
3. Resuming execution with reset=False to preserve memory
4. User interaction for plan approval/modification
"""
from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel, PlanningStep
def display_plan(plan_content):
"""Display the plan in a formatted way"""
print("\n" + "=" * 60)
print("🤖 AGENT PLAN CREATED")
print("=" * 60)
print(plan_content)
print("=" * 60)
def get_user_choice():
"""Get user's choice for plan approval"""
while True:
choice = input("\nChoose an option:\n1. Approve plan\n2. Modify plan\n3. Cancel\nYour choice (1-3): ").strip()
if choice in ["1", "2", "3"]:
return int(choice)
print("Invalid choice. Please enter 1, 2, or 3.")
def get_modified_plan(original_plan):
"""Allow user to modify the plan"""
print("\n" + "-" * 40)
print("MODIFY PLAN")
print("-" * 40)
print("Current plan:")
print(original_plan)
print("-" * 40)
print("Enter your modified plan (press Enter twice to finish):")
lines = []
empty_line_count = 0
while empty_line_count < 2:
line = input()
if line.strip() == "":
empty_line_count += 1
else:
empty_line_count = 0
lines.append(line)
# Remove the last two empty lines
modified_plan = "\n".join(lines[:-2])
return modified_plan if modified_plan.strip() else original_plan
def interrupt_after_plan(memory_step, agent):
"""
Step callback that interrupts the agent after a planning step is created.
This allows for user interaction to review and potentially modify the plan.
"""
if isinstance(memory_step, PlanningStep):
print("\n🛑 Agent interrupted after plan creation...")
# Display the created plan
display_plan(memory_step.plan)
# Get user choice
choice = get_user_choice()
if choice == 1: # Approve plan
print("✅ Plan approved! Continuing execution...")
# Don't interrupt - let the agent continue
return
elif choice == 2: # Modify plan
# Get modified plan from user
modified_plan = get_modified_plan(memory_step.plan)
# Update the plan in the memory step
memory_step.plan = modified_plan
print("\nPlan updated!")
display_plan(modified_plan)
print("✅ Continuing with modified plan...")
# Don't interrupt - let the agent continue with modified plan
return
elif choice == 3: # Cancel
print("❌ Execution cancelled by user.")
agent.interrupt()
return
def main():
"""Run the complete plan customization example"""
print("🚀 Starting Plan Customization Example")
print("=" * 60)
# Create agent with planning enabled and step callback
agent = CodeAgent(
model=InferenceClientModel(),
tools=[DuckDuckGoSearchTool()], # Add a search tool for more interesting plans
planning_interval=5, # Plan every 5 steps for demonstration
step_callbacks={PlanningStep: interrupt_after_plan},
max_steps=10,
verbosity_level=1, # Show agent thoughts
)
# Define a task that will benefit from planning
task = """Search for recent developments in artificial intelligence and provide a summary
of the top 3 most significant breakthroughs in 2024. Include the source of each breakthrough."""
try:
print(f"\n📋 Task: {task}")
print("\n🤖 Agent starting execution...")
# First run - will create plan and potentially get interrupted
result = agent.run(task)
# If we get here, the plan was approved or execution completed
print("\n✅ Task completed successfully!")
print("\n📄 Final Result:")
print("-" * 40)
print(result)
except Exception as e:
if "interrupted" in str(e).lower():
print("\n🛑 Agent execution was cancelled by user.")
print("\nTo resume execution later, you could call:")
print("agent.run(task, reset=False) # This preserves the agent's memory")
# Demonstrate resuming with reset=False
print("\n" + "=" * 60)
print("DEMONSTRATION: Resuming with reset=False")
print("=" * 60)
# Show current memory state
print(f"\n📚 Current memory contains {len(agent.memory.steps)} steps:")
for i, step in enumerate(agent.memory.steps):
step_type = type(step).__name__
print(f" {i + 1}. {step_type}")
# Ask if user wants to see resume demonstration
resume_choice = input("\nWould you like to see resume demonstration? (y/n): ").strip().lower()
if resume_choice == "y":
print("\n🔄 Resuming execution...")
try:
# Resume without resetting - preserves memory
agent.run(task, reset=False)
print("\n✅ Task completed after resume!")
print("\n📄 Final Result:")
print("-" * 40)
except Exception as resume_error:
print(f"\n❌ Error during resume: {resume_error}")
else:
print(f"\n❌ An error occurred: {e}")
if __name__ == "__main__":
# Run the main example
main()
| {
"repo_id": "huggingface/smolagents",
"file_path": "examples/plan_customization/plan_customization.py",
"license": "Apache License 2.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
huggingface/smolagents:examples/server/main.py | from anyio import to_thread
from starlette.applications import Starlette
from starlette.responses import HTMLResponse, JSONResponse
from starlette.routing import Route
from smolagents import CodeAgent, InferenceClientModel, MCPClient
# Create an MCP client to connect to the MCP server
mcp_server_parameters = {
"url": "https://evalstate-hf-mcp-server.hf.space/mcp",
"transport": "streamable-http",
}
mcp_client = MCPClient(server_parameters=mcp_server_parameters)
# Create a CodeAgent with a specific model and the tools from the MCP client
agent = CodeAgent(
model=InferenceClientModel(model_id="Qwen/Qwen3-Next-80B-A3B-Thinking"),
tools=mcp_client.get_tools(),
)
# Define the shutdown handler to disconnect the MCP client
async def shutdown():
mcp_client.disconnect()
async def homepage(request):
return HTMLResponse(
r"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Smolagents Demo</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 20px;
background-color: #f5f5f5;
}
.container {
background: white;
border-radius: 12px;
padding: 30px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}
h1 {
color: #333;
text-align: center;
margin-bottom: 30px;
}
.chat-container {
border: 1px solid #ddd;
border-radius: 8px;
height: 400px;
overflow-y: auto;
padding: 15px;
margin-bottom: 20px;
background-color: #fafafa;
}
.message {
margin-bottom: 15px;
padding: 10px;
border-radius: 6px;
}
.user-message {
background-color: #007bff;
color: white;
margin-left: 50px;
}
.agent-message {
background-color: #e9ecef;
color: #333;
margin-right: 50px;
}
.input-container {
display: flex;
gap: 10px;
}
input[type="text"] {
flex: 1;
padding: 12px;
border: 1px solid #ddd;
border-radius: 6px;
font-size: 16px;
}
button {
padding: 12px 24px;
background-color: #007bff;
color: white;
border: none;
border-radius: 6px;
cursor: pointer;
font-size: 16px;
}
button:hover {
background-color: #0056b3;
}
button:disabled {
background-color: #ccc;
cursor: not-allowed;
}
.loading {
color: #666;
font-style: italic;
}
</style>
</head>
<body>
<div class="container">
<h1>🤖 Smolagents Demo</h1>
<div class="chat-container" id="chat-container">
<div class="message agent-message">
Hello! I'm a code agent with access to MCP tools. Ask me anything!
</div>
</div>
<div class="input-container">
<input type="text" id="message-input" placeholder="Ask me anything..." autofocus>
<button onclick="sendMessage()" id="send-button">Send</button>
</div>
</div>
<script>
const chatContainer = document.getElementById('chat-container');
const messageInput = document.getElementById('message-input');
const sendButton = document.getElementById('send-button');
function addMessage(content, isUser = false) {
const messageDiv = document.createElement('div');
messageDiv.className = `message ${isUser ? 'user-message' : 'agent-message'}`;
messageDiv.textContent = content;
chatContainer.appendChild(messageDiv);
chatContainer.scrollTop = chatContainer.scrollHeight;
}
async function sendMessage() {
const message = messageInput.value.trim();
if (!message) return;
// Add user message
addMessage(message, true);
messageInput.value = '';
sendButton.disabled = true;
sendButton.textContent = 'Sending...';
// Add loading indicator
const loadingDiv = document.createElement('div');
loadingDiv.className = 'message agent-message loading';
loadingDiv.textContent = 'Thinking...';
chatContainer.appendChild(loadingDiv);
chatContainer.scrollTop = chatContainer.scrollHeight;
try {
const response = await fetch('/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ message }),
});
const data = await response.json();
// Remove loading indicator
chatContainer.removeChild(loadingDiv);
// Add agent response
addMessage(data.reply);
} catch (error) {
// Remove loading indicator
chatContainer.removeChild(loadingDiv);
addMessage(`Error: ${error.message}`);
} finally {
sendButton.disabled = false;
sendButton.textContent = 'Send';
messageInput.focus();
}
}
// Send message on Enter key
messageInput.addEventListener('keypress', function(e) {
if (e.key === 'Enter') {
sendMessage();
}
});
</script>
</body>
</html>
"""
)
async def chat(request):
data = await request.json()
message = data.get("message", "").strip()
# Run in a thread to avoid blocking the event loop
result = await to_thread.run_sync(agent.run, message)
# Format the result if it's a complex data structure
reply = str(result)
return JSONResponse({"reply": reply})
app = Starlette(
debug=True,
routes=[
Route("/", homepage),
Route("/chat", chat, methods=["POST"]),
],
on_shutdown=[shutdown], # Register the shutdown handler: disconnect the MCP client
)
| {
"repo_id": "huggingface/smolagents",
"file_path": "examples/server/main.py",
"license": "Apache License 2.0",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
huggingface/transformers:tests/trainer/test_trainer_accelerator.py | # Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Trainer AcceleratorConfig tests: creation from dict/YAML/dataclass, partial overrides,
gradient accumulation settings, custom AcceleratorState, and validation.
"""
import dataclasses
import json
import tempfile
from pathlib import Path
from typing import Any
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from transformers import Trainer, TrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch
from transformers.trainer_pt_utils import AcceleratorConfig
from .trainer_test_utils import (
RegressionModelConfig,
RegressionPreTrainedModel,
RegressionTrainingArguments,
SampleIterableDataset,
)
@require_torch
class TrainerAcceleratorConfigTest(TestCasePlus):
def test_accelerator_config_empty(self):
# Checks that a config can be made with the defaults if not passed
with tempfile.TemporaryDirectory() as tmp_dir:
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
eval_dataset = SampleIterableDataset()
# Leaves one option as something *not* basic
args = RegressionTrainingArguments(output_dir=tmp_dir)
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset)
self.assertEqual(trainer.accelerator.split_batches, False)
self.assertEqual(trainer.accelerator.dispatch_batches, None)
self.assertEqual(trainer.accelerator.even_batches, True)
self.assertEqual(trainer.accelerator.use_seedable_sampler, True)
# gradient accumulation kwargs configures gradient_state
self.assertNotIn("sync_each_batch", trainer.accelerator.gradient_state.plugin_kwargs)
def test_accelerator_config_from_dict(self):
# Checks that accelerator kwargs can be passed through
# and the accelerator is initialized respectively
with tempfile.TemporaryDirectory() as tmp_dir:
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
eval_dataset = SampleIterableDataset()
accelerator_config: dict[str, Any] = {
"split_batches": True,
"dispatch_batches": True,
"even_batches": False,
"use_seedable_sampler": True,
}
accelerator_config["gradient_accumulation_kwargs"] = {"sync_each_batch": True}
# Leaves all options as something *not* basic
args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=accelerator_config)
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset)
self.assertEqual(trainer.accelerator.split_batches, True)
self.assertEqual(trainer.accelerator.dispatch_batches, True)
self.assertEqual(trainer.accelerator.even_batches, False)
self.assertEqual(trainer.accelerator.use_seedable_sampler, True)
def test_accelerator_config_from_yaml(self):
# Checks that accelerator kwargs can be passed through
# and the accelerator is initialized respectively
with tempfile.TemporaryDirectory() as tmp_dir:
path_file = Path(tmp_dir) / "accelerator_config.json"
with open(path_file, "w") as f:
accelerator_config = {
"split_batches": True,
"dispatch_batches": True,
"even_batches": False,
"use_seedable_sampler": False,
}
json.dump(accelerator_config, f)
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
eval_dataset = SampleIterableDataset()
# Leaves all options as something *not* basic
args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=path_file)
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset)
self.assertEqual(trainer.accelerator.split_batches, True)
self.assertEqual(trainer.accelerator.dispatch_batches, True)
self.assertEqual(trainer.accelerator.even_batches, False)
self.assertEqual(trainer.accelerator.use_seedable_sampler, False)
def test_accelerator_config_from_dataclass(self):
# Checks that accelerator kwargs can be passed through
# and the accelerator is initialized respectively
accelerator_config = AcceleratorConfig(
split_batches=True,
dispatch_batches=True,
even_batches=False,
use_seedable_sampler=False,
)
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
eval_dataset = SampleIterableDataset()
with tempfile.TemporaryDirectory() as tmp_dir:
args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=accelerator_config)
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset)
self.assertEqual(trainer.accelerator.split_batches, True)
self.assertEqual(trainer.accelerator.dispatch_batches, True)
self.assertEqual(trainer.accelerator.even_batches, False)
self.assertEqual(trainer.accelerator.use_seedable_sampler, False)
def test_accelerate_config_from_dataclass_grad_accum(self):
# Checks that accelerator kwargs can be passed through
# and the accelerator is initialized respectively
grad_acc_kwargs = {
"num_steps": 10,
"adjust_scheduler": False,
"sync_with_dataloader": False,
"sync_each_batch": True,
}
accelerator_config = AcceleratorConfig(
split_batches=True,
dispatch_batches=True,
even_batches=False,
use_seedable_sampler=False,
gradient_accumulation_kwargs=grad_acc_kwargs,
)
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
eval_dataset = SampleIterableDataset()
with tempfile.TemporaryDirectory() as tmp_dir:
args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=accelerator_config)
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset)
self.assertEqual(trainer.args.gradient_accumulation_steps, 10)
def test_accelerator_config_from_partial(self):
# Checks that accelerator kwargs can be passed through
# and the accelerator is initialized respectively
with tempfile.TemporaryDirectory() as tmp_dir:
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
eval_dataset = SampleIterableDataset()
# Leaves one option as something *not* basic
args = RegressionTrainingArguments(
output_dir=tmp_dir,
accelerator_config={
"split_batches": True,
},
)
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset)
self.assertEqual(trainer.accelerator.split_batches, True)
self.assertEqual(trainer.accelerator.dispatch_batches, None)
self.assertEqual(trainer.accelerator.even_batches, True)
self.assertEqual(trainer.accelerator.use_seedable_sampler, True)
def test_accelerator_custom_state(self):
AcceleratorState._reset_state(reset_partial_state=True)
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertRaises(ValueError) as cm:
_ = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config={"use_configured_state": True})
self.assertIn("Please define this beforehand", str(cm.warnings[0].message))
_ = Accelerator()
_ = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config={"use_configured_state": True})
AcceleratorState._reset_state(reset_partial_state=True)
def test_accelerator_config_from_dict_grad_accum_num_steps(self):
with tempfile.TemporaryDirectory() as tmp_dir:
config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(config)
eval_dataset = SampleIterableDataset()
# case - TrainingArguments.gradient_accumulation_steps == 1
# - gradient_accumulation_kwargs['num_steps] == 1
# results in grad accum set to 1
args = RegressionTrainingArguments(
output_dir=tmp_dir,
gradient_accumulation_steps=1,
accelerator_config={
"gradient_accumulation_kwargs": {
"num_steps": 1,
}
},
)
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset)
self.assertEqual(trainer.accelerator.gradient_state.plugin_kwargs["num_steps"], 1)
# case - TrainingArguments.gradient_accumulation_steps > 1
# - gradient_accumulation_kwargs['num_steps] specified
# results in exception raised
args = RegressionTrainingArguments(
output_dir=tmp_dir,
gradient_accumulation_steps=2,
accelerator_config={
"gradient_accumulation_kwargs": {
"num_steps": 10,
}
},
)
with self.assertRaises(Exception) as context:
trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset)
self.assertTrue("The `AcceleratorConfig`'s `num_steps` is set but" in str(context.exception))
def test_accelerator_config_not_instantiated(self):
# Checks that accelerator kwargs can be passed through
# and the accelerator is initialized respectively
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertRaises(NotImplementedError) as context:
_ = RegressionTrainingArguments(
output_dir=tmp_dir,
accelerator_config=AcceleratorConfig,
)
self.assertTrue("Tried passing in a callable to `accelerator_config`" in str(context.exception))
# Now test with a custom subclass
@dataclasses.dataclass
class CustomAcceleratorConfig(AcceleratorConfig):
pass
@dataclasses.dataclass
class CustomTrainingArguments(TrainingArguments):
accelerator_config: dict = dataclasses.field(
default=CustomAcceleratorConfig,
)
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertRaises(NotImplementedError) as context:
_ = CustomTrainingArguments(
output_dir=tmp_dir,
)
self.assertTrue("Tried passing in a callable to `accelerator_config`" in str(context.exception))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/trainer/test_trainer_accelerator.py",
"license": "Apache License 2.0",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/trainer/test_trainer_checkpointing.py | # Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Trainer checkpoint saving, loading, and resume tests: save intervals, sharded checkpoints,
auto batch size finder, resume with frozen params/gradient accumulation/different batch sizes,
checkpoint sorting and rotation, interrupted training recovery, JIT checkpointing (signal-based
checkpoint management), model/tokenizer/processor saving with best model selection, and Hub
push/tags/revision integration.
"""
import dataclasses
import math
import os
import re
import signal
import subprocess
import sys
import tempfile
import unittest
from pathlib import Path
from typing import Any
from unittest.mock import Mock, patch
import safetensors
import torch
from huggingface_hub import ModelCard, create_branch, list_repo_commits, list_repo_files
from torch import nn
from transformers import (
AutoFeatureExtractor,
AutoImageProcessor,
AutoModelForCausalLM,
AutoProcessor,
AutoTokenizer,
Trainer,
TrainerState,
TrainingArguments,
default_data_collator,
is_torch_available,
)
from transformers.testing_utils import (
ENDPOINT_STAGING,
TOKEN,
USER,
CaptureLogger,
TemporaryHubRepo,
TestCasePlus,
backend_device_count,
evaluate_side_effect_factory,
get_steps_per_epoch,
is_staging_test,
require_accelerate,
require_deepspeed,
require_non_hpu,
require_peft,
require_tensorboard,
require_torch,
require_torch_non_multi_accelerator,
require_torch_up_to_2_accelerators,
require_vision,
run_first,
run_test_using_subprocess,
slow,
torch_device,
)
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
get_last_checkpoint,
rotate_checkpoints,
set_seed,
sort_checkpoints,
)
from transformers.utils import SAFE_WEIGHTS_NAME, logging
from .trainer_test_utils import (
PATH_SAMPLE_TEXT,
AlmostAccuracy,
MockCudaOOMCallback,
RegressionDataset,
RegressionModelConfig,
RegressionPreTrainedModel,
RegressionRandomPreTrainedModel,
RegressionTrainingArguments,
TrainerIntegrationCommon,
get_dataset,
get_language_model_trainer,
get_regression_trainer,
)
if is_torch_available():
from transformers.trainer_jit_checkpoint import CheckpointManager, JITCheckpointCallback
# ---------------------------------------------------------------------------
# Checkpoint save/load tests
# ---------------------------------------------------------------------------
@require_torch
class TrainerCheckpointSaveTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_save_checkpoints(self):
tmp_dir = self.get_auto_remove_tmp_dir()
trainer = get_regression_trainer(output_dir=tmp_dir, save_steps=5)
trainer.train()
self.check_saved_checkpoints(tmp_dir, 5, int(self.n_epochs * 64 / self.batch_size))
# With a regular model that is not a PreTrainedModel
tmp_dir = self.get_auto_remove_tmp_dir()
trainer = get_regression_trainer(output_dir=tmp_dir, save_steps=5, pretrained=False)
trainer.train()
self.check_saved_checkpoints(tmp_dir, 5, int(self.n_epochs * 64 / self.batch_size), False)
def test_save_collator_tokenizer_by_default(self):
class FakeCollator:
def __init__(self):
self.tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
self.tokenizer.add_tokens(["<NEW_TOKEN1>", "<NEW_TOKEN2>"])
def __call__(self, features: list[Any], return_tensors="pt") -> dict[str, Any]:
return default_data_collator(features, return_tensors)
data_collator = FakeCollator()
tmp_dir = self.get_auto_remove_tmp_dir()
trainer = get_regression_trainer(output_dir=tmp_dir, save_steps=5, data_collator=data_collator)
trainer.train()
loaded_tokenizer = AutoTokenizer.from_pretrained(os.path.join(tmp_dir, os.listdir(tmp_dir)[0]))
assert len(loaded_tokenizer) == len(trainer.data_collator.tokenizer), "Failed to load updated tokenizer"
# ---------------------------------------------------------------------------
# Resume from checkpoint tests
# ---------------------------------------------------------------------------
@require_torch
class TrainerResumeTrainingTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
@require_torch_non_multi_accelerator
def test_can_resume_training(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
tmp_dir = self.get_auto_remove_tmp_dir()
kwargs = {
"output_dir": tmp_dir,
"train_len": 128,
"save_steps": 5,
"learning_rate": 0.1,
"logging_steps": 5,
}
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmp_dir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(tmp_dir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# With a regular model that is not a PreTrainedModel
tmp_dir = self.get_auto_remove_tmp_dir()
kwargs = {
"output_dir": tmp_dir,
"train_len": 128,
"save_steps": 5,
"learning_rate": 0.1,
"pretrained": False,
}
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmp_dir, "checkpoint-5")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(tmp_dir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check failures
# 1. fail to find a bogus checkpoint
tmp_dir = self.get_auto_remove_tmp_dir()
trainer = get_regression_trainer(output_dir=tmp_dir)
with self.assertRaises(Exception) as context:
trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus")
self.assertTrue("Can't find a valid checkpoint at" in str(context.exception))
# 2. fail to find any checkpoint - due a fresh output_dir
tmp_dir = self.get_auto_remove_tmp_dir()
trainer = get_regression_trainer(output_dir=tmp_dir)
with self.assertRaises(Exception) as context:
trainer.train(resume_from_checkpoint=True)
self.assertTrue("No valid checkpoint found in output directory" in str(context.exception))
# require_torch_non_multi_accelerator is necessary because this worker blocks runs when using multiple GPUs, making
# the test slower.
@require_torch_non_multi_accelerator
@run_test_using_subprocess
@run_first
@slow
def test_can_resume_training_lm(self):
# Check if it works for a simple language modeling example
training_steps = 10
resume_from_step = 8
with tempfile.TemporaryDirectory() as tmpdir:
kwargs = {
"output_dir": tmpdir,
"fp16": True,
"max_steps": training_steps,
"per_device_train_batch_size": 1,
"learning_rate": 1e-5,
"lr_scheduler_type": "cosine",
"save_strategy": "steps",
"save_steps": 1,
"logging_strategy": "steps",
"logging_steps": 1,
}
trainer = get_language_model_trainer(**kwargs)
trainer.train(resume_from_checkpoint=False)
# Get the parameter length of the model
model_params = torch.cat([p.cpu().flatten() for p in trainer.model.parameters()])
model_param_len = len(model_params)
# Sample uniform indexes and save the values of the parameters (considering an unrolled vector with
# all of them)
indices = torch.randint(0, model_param_len, (1000,))
# Save the values of the parameters for later comparison
model_params_sample = model_params[indices].detach().clone()
state1 = dataclasses.asdict(trainer.state)
# Delete the reference
del model_params, trainer
# Checks if all checkpoints are there, +1 is necessary because range is 1-indexed
self.check_saved_checkpoints(tmpdir, freq=1, total=training_steps + 1, is_pretrained=True, use_scaler=True)
# Checkpoint at intermediate step
checkpoint = os.path.join(tmpdir, f"checkpoint-{resume_from_step + 1}")
trainer = get_language_model_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
model_params = torch.cat([p.cpu().flatten() for p in trainer.model.parameters()])
# Check that the parameters are the same
self.assertTrue(torch.allclose(model_params[indices], model_params_sample))
state2 = dataclasses.asdict(trainer.state)
self.check_trainer_state_are_the_same(state1, state2)
del model_params, trainer
@unittest.skip(
reason="@muellerzr: Fix once Trainer can take an accelerate configuration. Need to set `seedable_sampler=True`."
)
def test_resume_training_with_randomness(self):
# For more than 1 GPUs, since the randomness is introduced in the model and with DataParallel (which is used
# in this test for more than 2 GPUs), the calls to the torch RNG will happen in a random order (sometimes
# GPU 0 will call first and sometimes GPU 1).
random_torch = not torch.cuda.is_available() or backend_device_count(torch_device) <= 1
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
train_dataset = RegressionDataset(length=128)
eval_dataset = RegressionDataset()
with self.subTest("Test every step"):
config = RegressionModelConfig(a=0, b=2, random_torch=random_torch)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
model = RegressionRandomPreTrainedModel(config)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, "checkpoint-15"))
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
self.assertAlmostEqual(a, a1, delta=1e-5)
self.assertAlmostEqual(b, b1, delta=1e-5)
with self.subTest("Test every epoch"):
config = RegressionModelConfig(a=0, b=2, random_torch=random_torch)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
args = RegressionTrainingArguments(tmp_dir, save_strategy="epoch", learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
model = RegressionRandomPreTrainedModel(config)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
checkpoints = [d for d in os.listdir(tmp_dir) if d.startswith("checkpoint-")]
# There should be one checkpoint per epoch.
self.assertEqual(len(checkpoints), 3)
checkpoint_dir = min(checkpoints, key=lambda x: int(x.replace("checkpoint-", "")))
trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, checkpoint_dir))
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
self.assertAlmostEqual(a, a1, delta=1e-5)
self.assertAlmostEqual(b, b1, delta=1e-5)
def test_resume_training_with_different_batch_size(self):
# Regression test for https://github.com/huggingface/transformers/issues/43708
# When resuming from checkpoint without auto_find_batch_size, user's new batch size should be used
train_dataset = RegressionDataset(length=64)
config = RegressionModelConfig(a=0, b=2)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
# First training run with batch_size=2
args = RegressionTrainingArguments(
tmp_dir,
do_train=True,
max_steps=2,
save_steps=1,
per_device_train_batch_size=2,
auto_find_batch_size=False,
)
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
# Verify the checkpoint saved with the effective batch size (per_device * n_gpu)
checkpoint = os.path.join(tmp_dir, "checkpoint-1")
state = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json"))
self.assertEqual(state.train_batch_size, args.train_batch_size)
# Resume with a different batch_size=4 (without auto_find_batch_size)
# The trainer should use the new batch_size, not the checkpoint's
args2 = RegressionTrainingArguments(
tmp_dir,
do_train=True,
max_steps=4,
save_steps=1,
per_device_train_batch_size=4,
auto_find_batch_size=False,
)
trainer2 = Trainer(model, args2, train_dataset=train_dataset)
trainer2.train(resume_from_checkpoint=checkpoint)
# The trainer should be using the new batch size (4), not the checkpoint's (2)
self.assertEqual(trainer2._train_batch_size, 4 * max(trainer2.args.n_gpu, 1))
# regression for this issue: https://github.com/huggingface/transformers/issues/12970
def test_training_with_resume_from_checkpoint_false(self):
train_dataset = RegressionDataset(length=128)
eval_dataset = RegressionDataset()
config = RegressionModelConfig(a=0, b=2)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train(resume_from_checkpoint=False)
@require_torch_up_to_2_accelerators
def test_resume_training_with_shard_checkpoint(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
self.convert_to_sharded_checkpoint(checkpoint)
# Reinitialize trainer
trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
@require_torch_up_to_2_accelerators
def test_resume_training_with_checkpoint(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
save_steps=5,
learning_rate=0.1,
)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
self.convert_to_sharded_checkpoint(checkpoint)
# Reinitialize trainer
trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
@require_torch_up_to_2_accelerators
def test_resume_training_with_gradient_accumulation(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
@require_torch_up_to_2_accelerators
def test_resume_training_with_frozen_params(self):
# This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of
# save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model
# won't be the same since the training dataloader is shuffled).
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.model.a.requires_grad_(False)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(tmpdir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.model.a.requires_grad_(False)
trainer.train(resume_from_checkpoint=checkpoint)
self.assertFalse(trainer.model.a.requires_grad)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
@require_peft
def test_multiple_peft_adapters(self):
from peft import LoraConfig, get_peft_model
# Tests if resuming from checkpoint works if the model has multiple adapters
MODEL_ID = "hf-internal-testing/tiny-random-LlamaForCausalLM"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
tiny_model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
peft_config = LoraConfig(
r=4,
lora_alpha=16,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
tiny_model = get_peft_model(tiny_model, peft_config, "adapter1")
tiny_model.add_adapter("adapter2", peft_config)
train_dataset = get_dataset(PATH_SAMPLE_TEXT, tokenizer, 100)
tokenizer.pad_token = tokenizer.eos_token
tmp_dir = self.get_auto_remove_tmp_dir()
args = TrainingArguments(
tmp_dir,
per_device_train_batch_size=1,
learning_rate=1e-9,
save_steps=5,
logging_steps=5,
max_steps=10,
use_cpu=True,
)
trainer = Trainer(tiny_model, args, processing_class=tokenizer, train_dataset=train_dataset)
trainer.train()
parameters = dict(tiny_model.named_parameters())
state = dataclasses.asdict(trainer.state)
# Reinitialize trainer
trainer = Trainer(tiny_model, args, processing_class=tokenizer, train_dataset=train_dataset)
checkpoint = os.path.join(tmp_dir, "checkpoint-5")
trainer.train(resume_from_checkpoint=checkpoint)
parameters1 = dict(tiny_model.named_parameters())
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(parameters, parameters1)
self.check_trainer_state_are_the_same(state, state1)
# ---------------------------------------------------------------------------
# Auto batch size finder tests
# ---------------------------------------------------------------------------
@require_torch
class TrainerAutoBatchSizeTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
@slow
@require_non_hpu
@require_accelerate
@require_torch_non_multi_accelerator
def test_auto_batch_size_finder(self):
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "examples", "pytorch", "text-classification")
)
sys.path.append(SRC_DIR)
import run_glue
with tempfile.TemporaryDirectory() as tmpdir:
testargs = f"""
run_glue.py
--model_name_or_path distilbert/distilbert-base-uncased
--task_name mrpc
--do_train
--do_eval
--max_seq_len 128
--per_device_train_batch_size 4096
--learning_rate 2e-5
--num_train_epochs 1
--output_dir {tmpdir}
--auto_find_batch_size 0
""".split()
with self.assertRaises(RuntimeError):
with patch.object(sys, "argv", testargs):
run_glue.main()
testargs[-1] = "1"
with patch.object(sys, "argv", testargs):
run_glue.main()
@require_deepspeed
def test_auto_batch_size_with_deepspeed(self):
train_dataset = RegressionDataset(length=128)
config = RegressionModelConfig(a=0, b=2)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
for stage in [1, 2]:
deepspeed = {
"zero_optimization": {
"stage": stage,
},
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
}
args = RegressionTrainingArguments(
tmp_dir,
do_train=True,
max_steps=2,
save_strategy="no",
per_device_train_batch_size=16,
auto_find_batch_size=True,
deepspeed=deepspeed,
)
trainer = Trainer(model, args, train_dataset=train_dataset, callbacks=[MockCudaOOMCallback()])
trainer.train()
self.assertEqual(trainer._train_batch_size, 14)
def test_auto_batch_size_with_resume_from_checkpoint(self):
train_dataset = RegressionDataset(length=128)
config = RegressionModelConfig(a=0, b=2)
model = RegressionRandomPreTrainedModel(config)
tmp_dir = self.get_auto_remove_tmp_dir()
args = RegressionTrainingArguments(
tmp_dir,
do_train=True,
max_steps=2,
save_steps=1,
per_device_train_batch_size=16,
auto_find_batch_size=True,
)
trainer = Trainer(model, args, train_dataset=train_dataset, callbacks=[MockCudaOOMCallback()])
trainer.train()
previous_batch_size = trainer._train_batch_size
# Depends on the number of gpus so it is easier to just check that the batch_size decreased as expected
self.assertEqual(trainer._train_batch_size < 16, True)
# We can then make a new Trainer
trainer = Trainer(model, args, train_dataset=train_dataset)
# Check we are at 16 to start
self.assertEqual(trainer._train_batch_size, 16 * max(trainer.args.n_gpu, 1))
trainer.train(resume_from_checkpoint=True)
# We should be back to 14 again, picking up based upon the last ran Trainer
self.assertEqual(trainer._train_batch_size, previous_batch_size)
# ---------------------------------------------------------------------------
# Checkpoint sorting, rotation, and logging tests
# ---------------------------------------------------------------------------
@require_torch
class TrainerCheckpointRotationTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_checkpoint_sorting(self):
with tempfile.TemporaryDirectory() as tmp_dir:
# Create fake checkpoints in non-sorted order
for n in [20, 5, 15, 25, 10]:
os.makedirs(os.path.join(tmp_dir, f"{PREFIX_CHECKPOINT_DIR}-{n}"))
# Test sorting by step number (oldest first)
sorted_cps = sort_checkpoints(tmp_dir)
values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in sorted_cps]
self.assertEqual(values, [5, 10, 15, 20, 25])
# Test with best_model_checkpoint - moved to second-to-last to protect from deletion
best = os.path.join(tmp_dir, f"{PREFIX_CHECKPOINT_DIR}-5")
sorted_cps = sort_checkpoints(tmp_dir, best_model_checkpoint=best)
values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in sorted_cps]
self.assertEqual(values, [10, 15, 20, 5, 25])
# Test with best_model_checkpoint already at end (stays at end)
best = os.path.join(tmp_dir, f"{PREFIX_CHECKPOINT_DIR}-25")
sorted_cps = sort_checkpoints(tmp_dir, best_model_checkpoint=best)
values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in sorted_cps]
self.assertEqual(values, [5, 10, 15, 20, 25])
def check_checkpoint_deletion(self, trainer, output_dir, expected):
# Make fake checkpoints
for n in [5, 10, 15, 20, 25]:
os.makedirs(os.path.join(output_dir, f"{PREFIX_CHECKPOINT_DIR}-{n}"), exist_ok=True)
rotate_checkpoints(
output_dir=output_dir,
save_total_limit=trainer.args.save_total_limit,
best_model_checkpoint=trainer.state.best_model_checkpoint,
)
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{PREFIX_CHECKPOINT_DIR}-*")]
values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in glob_checkpoints]
self.assertSetEqual(set(values), set(expected))
def test_checkpoint_rotation(self):
with tempfile.TemporaryDirectory() as tmp_dir:
# Without best model at end
trainer = get_regression_trainer(output_dir=tmp_dir, save_total_limit=2)
self.check_checkpoint_deletion(trainer, tmp_dir, [20, 25])
# With best model at end
trainer = get_regression_trainer(
output_dir=tmp_dir, eval_strategy="steps", load_best_model_at_end=True, save_total_limit=2
)
trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5")
self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25])
# Edge case: we don't always honor save_total_limit=1 if load_best_model_at_end=True to be able to resume
# from checkpoint
trainer = get_regression_trainer(
output_dir=tmp_dir, eval_strategy="steps", load_best_model_at_end=True, save_total_limit=1
)
trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-25")
self.check_checkpoint_deletion(trainer, tmp_dir, [25])
trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5")
self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25])
def test_compare_trainer_and_checkpoint_args_logging(self):
logger = logging.get_logger()
with tempfile.TemporaryDirectory() as tmpdir, CaptureLogger(logger) as cl:
trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=128,
eval_steps=5,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
save_steps=5,
learning_rate=0.1,
)
trainer.train()
checkpoint = os.path.join(tmpdir, "checkpoint-5")
checkpoint_trainer = get_regression_trainer(
output_dir=tmpdir,
train_len=256,
eval_steps=10,
gradient_accumulation_steps=4,
per_device_train_batch_size=8,
save_steps=10,
learning_rate=0.1,
)
checkpoint_trainer.train(resume_from_checkpoint=checkpoint)
self.assertIn("save_steps: 10 (from args) != 5 (from trainer_state.json)", cl.out)
self.assertIn(
"per_device_train_batch_size: 8 (from args) != 4 (from trainer_state.json)",
cl.out,
)
self.assertIn(
"eval_steps: 10 (from args) != 5 (from trainer_state.json)",
cl.out,
)
# ---------------------------------------------------------------------------
# Interrupted training and batch order tests
# ---------------------------------------------------------------------------
@require_torch
class TrainerInterruptedTrainingTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_resume_from_interrupted_training(self):
"""
Tests resuming training from a checkpoint after a simulated interruption.
"""
# --- Helper classes and functions defined locally for this test ---
class DummyModel(nn.Module):
def __init__(self, input_dim=10, num_labels=2):
super().__init__()
self.linear = nn.Linear(input_dim, num_labels)
def forward(self, input_ids=None, attention_mask=None, labels=None):
logits = self.linear(input_ids.float())
loss = None
if labels is not None:
loss_fn = nn.CrossEntropyLoss()
loss = loss_fn(logits, labels)
return {"loss": loss, "logits": logits}
class DummyDictDataset(torch.utils.data.Dataset):
def __init__(self, input_ids, attention_mask, labels):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.labels = labels
def __len__(self):
return len(self.input_ids)
def __getitem__(self, idx):
return {
"input_ids": self.input_ids[idx],
"attention_mask": self.attention_mask[idx],
"labels": self.labels[idx],
}
def create_dummy_dataset():
"""Creates a dummy dataset for this specific test."""
num_samples = 100
input_dim = 10
dummy_input_ids = torch.rand(num_samples, input_dim)
dummy_attention_mask = torch.ones(num_samples, input_dim)
dummy_labels = torch.randint(0, 2, (num_samples,))
return DummyDictDataset(dummy_input_ids, dummy_attention_mask, dummy_labels)
# 1. Set up a dummy model and dataset
model = DummyModel(input_dim=10, num_labels=2)
dummy_dataset = create_dummy_dataset()
# 2. First training phase (simulating an interruption)
output_dir_initial = self.get_auto_remove_tmp_dir()
training_args_initial = TrainingArguments(
output_dir=output_dir_initial,
num_train_epochs=1,
per_device_train_batch_size=2,
gradient_accumulation_steps=3,
save_strategy="steps",
save_steps=1, # Save at every step
max_steps=2, # Stop after step 2 to simulate interruption
)
trainer_initial = Trainer(
model=model,
args=training_args_initial,
train_dataset=dummy_dataset,
)
trainer_initial.train()
# 3. Verify that a checkpoint was created before the "interruption"
checkpoint_path = os.path.join(output_dir_initial, "checkpoint-2")
self.assertTrue(os.path.exists(checkpoint_path), f"Checkpoint not found at {checkpoint_path}")
# 4. Second training phase (resuming from the checkpoint)
output_dir_resumed = self.get_auto_remove_tmp_dir()
# Total steps for one epoch is ceil(100 / (train_batch_size * 3)).
# We stopped at step 2, so the resumed training should finish the remaining steps.
training_args_resumed = TrainingArguments(
output_dir=output_dir_resumed,
num_train_epochs=1,
per_device_train_batch_size=2,
gradient_accumulation_steps=3,
save_strategy="steps",
save_steps=1,
)
trainer_resumed = Trainer(
model=model,
args=training_args_resumed,
train_dataset=dummy_dataset,
)
# Resume from the interrupted checkpoint and finish the remaining training
trainer_resumed.train(resume_from_checkpoint=checkpoint_path)
# 5. Assertions: Check if the training completed and the final model was saved
# Total steps per epoch = ceil(num_samples / (train_batch_size * grad_accum))
steps_per_epoch = math.ceil(
100 / (training_args_resumed.train_batch_size * training_args_resumed.gradient_accumulation_steps)
)
self.assertEqual(trainer_resumed.state.global_step, steps_per_epoch)
# Check that a checkpoint for the final step exists.
final_checkpoint_path = os.path.join(output_dir_resumed, f"checkpoint-{steps_per_epoch}")
self.assertTrue(os.path.exists(final_checkpoint_path))
# Check if the model weights file exists in the final checkpoint directory.
# Trainer saves non-PreTrainedModel models as `model.safetensors` by default if safetensors is available.
final_model_path = os.path.join(final_checkpoint_path, SAFE_WEIGHTS_NAME)
self.assertTrue(os.path.exists(final_model_path), "Final model checkpoint was not saved!")
@require_torch_non_multi_accelerator
def test_resume_batch_order(self):
"""
Test that verifies dataloader order is reproducible when resuming from partial checkpoints.
Tests resuming from checkpoint 7 (within epoch 1).
"""
# --- Helper classes and functions defined locally for this test ---
class DummyDataset(torch.utils.data.Dataset):
def __init__(self, size: int = 32):
self.size = size
self.data = torch.randn((size, 10))
self.data[:, 0] = torch.arange(0, size) # Encode the data order
self.labels = torch.randint(0, 10, (size,))
def __len__(self) -> int:
return self.size
def __getitem__(self, idx: int):
return {"input_ids": self.data[idx], "labels": self.labels[idx]}
class DummyModel(nn.Module):
def __init__(self, size: int):
super().__init__()
self.fc = nn.Linear(10, 10, bias=False)
# data_order logs the order of data points seen by the model
self.register_buffer("data_order", torch.empty(0, dtype=torch.long))
def load_state_dict(self, state_dict, strict=True):
# Handle data_order buffer size mismatch during checkpoint loading
if "data_order" in state_dict:
saved_data_order = state_dict["data_order"]
if hasattr(self, "data_order") and self.data_order.shape != saved_data_order.shape:
# Resize the buffer to match the saved state
self.data_order = saved_data_order.clone()
return super().load_state_dict(state_dict, strict=strict)
def forward(self, input_ids: torch.Tensor, labels: torch.Tensor = None):
logits = self.fc(input_ids)
loss = None
if labels is not None:
loss_fn = nn.CrossEntropyLoss()
loss = loss_fn(logits, labels)
# Log the data order for verification
data_indices = input_ids[:, 0].int()
self.data_order = torch.cat([self.data_order, data_indices.detach().clone()])
return {"loss": loss, "logits": logits}
# Scenario 1: Run baseline training to completion
# 1.1 Run training to completion
set_seed(42)
train_dataset = DummyDataset(size=10)
model_baseline = DummyModel(size=10)
exp_dir_baseline = self.get_auto_remove_tmp_dir()
args_baseline = TrainingArguments(
output_dir=str(exp_dir_baseline),
seed=42,
learning_rate=0.1,
per_device_train_batch_size=2,
gradient_accumulation_steps=1,
save_strategy="steps",
save_steps=1,
num_train_epochs=3,
optim="sgd",
disable_tqdm=True,
dataloader_num_workers=0, # Ensures that main process loads the data
)
trainer_baseline = Trainer(
model=model_baseline,
args=args_baseline,
train_dataset=train_dataset,
)
trainer_baseline.train()
# 1.2 Get the data order from the last saved checkpoint for the full run
last_checkpoint_path = get_last_checkpoint(exp_dir_baseline)
last_ckpt_num = int(os.path.basename(last_checkpoint_path).split("-")[1]) # Must be 15
baseline_state_dict = safetensors.torch.load_file(
os.path.join(exp_dir_baseline, f"checkpoint-{last_ckpt_num}", "model.safetensors")
)
baseline_data_order = baseline_state_dict["data_order"]
# Scenario 2: Resume training from checkpoint in the middle of the second epoch
# 2.1 Resume training from the second batch of epoch 1 (target_ckpt_num = 7)
# 1 epoch consists of 10 points, so 5 steps with batch size 2
target_ckpt_num = 7
checkpoint_path = os.path.join(exp_dir_baseline, f"checkpoint-{target_ckpt_num - 1}")
set_seed(42)
model_resume = DummyModel(size=10)
exp_dir_resume = self.get_auto_remove_tmp_dir()
args_resume = TrainingArguments(
output_dir=str(exp_dir_resume),
seed=42,
learning_rate=0.1,
per_device_train_batch_size=2,
gradient_accumulation_steps=1,
save_strategy="steps",
save_steps=1,
num_train_epochs=3,
optim="sgd",
disable_tqdm=True,
dataloader_num_workers=0, # Ensures that main process loads the data
)
trainer_resume = Trainer(
model=model_resume,
args=args_resume,
train_dataset=train_dataset,
)
trainer_resume.train(resume_from_checkpoint=checkpoint_path)
# 2.2 Get the data order from the last saved checkpoint for the resumed run
resumed_state_dict = safetensors.torch.load_file(
os.path.join(exp_dir_resume, f"checkpoint-{last_ckpt_num}", "model.safetensors")
)
resumed_data_order = resumed_state_dict["data_order"]
# 3. Compare results: the data order should be identical
self.assertTrue(
torch.equal(baseline_data_order, resumed_data_order),
f"Data order mismatch after checkpoint deletion and resume.\n"
f"Baseline: {baseline_data_order}\n"
f"Resumed: {resumed_data_order}",
)
# ---------------------------------------------------------------------------
# JIT checkpoint tests
# ---------------------------------------------------------------------------
@require_torch
class JITCheckpointTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
import shutil
shutil.rmtree(self.test_dir, ignore_errors=True)
def get_trainer(self, enable_jit=True):
"""Helper method to create a trainer with JIT checkpointing enabled."""
from transformers import Trainer
model_config = RegressionModelConfig(a=1.5, b=2.5)
model = RegressionPreTrainedModel(model_config)
args = TrainingArguments(
output_dir=self.test_dir,
enable_jit_checkpoint=enable_jit,
per_device_train_batch_size=16,
learning_rate=0.1,
logging_steps=1,
num_train_epochs=1,
max_steps=10,
save_steps=10,
)
train_dataset = RegressionDataset(length=64)
return Trainer(model=model, args=args, train_dataset=train_dataset)
def test_checkpoint_manager_initialization(self):
"""Test CheckpointManager initialization with different configurations."""
trainer = self.get_trainer()
# Test with default parameters
manager = CheckpointManager(trainer)
self.assertEqual(manager.trainer, trainer)
self.assertEqual(manager.kill_wait, 3)
self.assertFalse(manager.is_checkpoint_requested)
# Test with custom parameters
manager_custom = CheckpointManager(trainer, kill_wait=5)
self.assertEqual(manager_custom.kill_wait, 5)
def test_signal_handler_setup(self):
"""Test signal handler setup and restoration."""
trainer = self.get_trainer()
manager = CheckpointManager(trainer)
# Store original handler
original_handler = signal.signal(signal.SIGTERM, signal.SIG_DFL)
try:
# Setup JIT signal handler
manager.setup_signal_handler()
# Verify handler is set
current_handler = signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.assertNotEqual(current_handler, signal.SIG_DFL)
# Verify original handler is stored
self.assertIsNotNone(manager._original_sigterm_handler)
finally:
# Restore original handler
signal.signal(signal.SIGTERM, original_handler)
@patch("threading.Timer")
def test_sigterm_handler_flow(self, mock_timer):
"""Test SIGTERM handler execution flow."""
trainer = self.get_trainer()
manager = CheckpointManager(trainer, kill_wait=2)
# Mock timer to prevent actual threading
mock_timer_instance = Mock()
mock_timer.return_value = mock_timer_instance
# Test first SIGTERM call
self.assertFalse(manager.is_checkpoint_requested)
manager._sigterm_handler(signal.SIGTERM, None)
# Verify checkpoint was NOT immediately requested (timer is used)
self.assertFalse(manager.is_checkpoint_requested)
# Verify timer was created with kill_wait period and correct callback
mock_timer.assert_called_once_with(2, manager._enable_checkpoint)
mock_timer_instance.start.assert_called_once()
# Manually trigger the timer callback to test flag setting
manager._enable_checkpoint()
# Verify checkpoint is now requested
self.assertTrue(manager.is_checkpoint_requested)
# Test second SIGTERM call (should be ignored)
mock_timer.reset_mock()
manager._sigterm_handler(signal.SIGTERM, None)
# Verify no additional timer was created
mock_timer.assert_not_called()
def test_toggle_checkpoint_flag(self):
"""Test the toggle checkpoint flag method."""
trainer = self.get_trainer()
manager = CheckpointManager(trainer)
# Initially should not be requested
self.assertFalse(manager.is_checkpoint_requested)
# Toggle flag
manager._enable_checkpoint()
# Should now be requested
self.assertTrue(manager.is_checkpoint_requested)
def test_execute_jit_checkpoint(self):
"""Test the checkpoint execution logic with sentinel file."""
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
trainer = self.get_trainer()
manager = CheckpointManager(trainer)
# Mock trainer's save checkpoint method
trainer._save_checkpoint = Mock()
trainer.state.global_step = 42
# Set checkpoint requested flag
manager.is_checkpoint_requested = True
# Execute checkpoint
manager.execute_jit_checkpoint()
# Verify checkpoint was called
trainer._save_checkpoint.assert_called_once_with(trainer.model, trial=None)
# Verify checkpoint flag was reset
self.assertFalse(manager.is_checkpoint_requested)
# Verify sentinel file was removed (should be in checkpoint-42 folder)
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-42"
sentinel_file = os.path.join(self.test_dir, checkpoint_folder, "checkpoint-is-incomplete.txt")
self.assertFalse(os.path.exists(sentinel_file))
def test_execute_jit_checkpoint_sentinel_file_cleanup(self):
"""Test that sentinel file is cleaned up after successful checkpoint."""
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
trainer = self.get_trainer()
manager = CheckpointManager(trainer)
# Mock trainer's save checkpoint method
trainer._save_checkpoint = Mock()
trainer.state.global_step = 42
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-42"
sentinel_file = os.path.join(self.test_dir, checkpoint_folder, "checkpoint-is-incomplete.txt")
# Execute checkpoint
manager.execute_jit_checkpoint()
# Verify sentinel file doesn't exist after successful checkpoint
self.assertFalse(os.path.exists(sentinel_file))
def test_execute_jit_checkpoint_with_exception(self):
"""Test checkpoint execution with exception handling."""
trainer = self.get_trainer()
manager = CheckpointManager(trainer)
# Mock trainer's save checkpoint method to raise exception
trainer._save_checkpoint = Mock(side_effect=Exception("Checkpoint failed"))
trainer.state.global_step = 42
# Test that exception is re-raised
with self.assertRaises(Exception) as context:
manager.execute_jit_checkpoint()
self.assertEqual(str(context.exception), "Checkpoint failed")
# Verify checkpoint flag was still reset to avoid multiple attempts
self.assertFalse(manager.is_checkpoint_requested)
def test_jit_checkpoint_callback_initialization(self):
"""Test JITCheckpointCallback initialization."""
callback = JITCheckpointCallback()
self.assertIsNone(callback.trainer)
self.assertIsNone(callback.jit_manager)
def test_jit_checkpoint_callback_set_trainer_enabled(self):
"""Test setting trainer with JIT checkpointing enabled."""
trainer = self.get_trainer(enable_jit=True)
callback = JITCheckpointCallback()
with patch.object(CheckpointManager, "setup_signal_handler") as mock_setup:
callback.set_trainer(trainer)
self.assertEqual(callback.trainer, trainer)
self.assertIsNotNone(callback.jit_manager)
self.assertIsInstance(callback.jit_manager, CheckpointManager)
mock_setup.assert_called_once()
def test_jit_checkpoint_callback_set_trainer_disabled(self):
"""Test setting trainer with JIT checkpointing disabled."""
trainer = self.get_trainer(enable_jit=False)
callback = JITCheckpointCallback()
callback.set_trainer(trainer)
self.assertEqual(callback.trainer, trainer)
self.assertIsNone(callback.jit_manager)
def test_jit_checkpoint_callback_on_pre_optimizer_step(self):
"""Test callback behavior during pre-optimizer step."""
trainer = self.get_trainer()
callback = JITCheckpointCallback()
callback.set_trainer(trainer)
# Mock control object
control = Mock()
control.should_training_stop = False
# Mock execute method
with patch.object(callback.jit_manager, "execute_jit_checkpoint") as mock_execute:
# Test when checkpoint not requested
callback.jit_manager.is_checkpoint_requested = False
callback.on_pre_optimizer_step(trainer.args, trainer.state, control)
self.assertFalse(control.should_training_stop)
mock_execute.assert_not_called()
# Test when checkpoint requested
callback.jit_manager.is_checkpoint_requested = True
callback.on_pre_optimizer_step(trainer.args, trainer.state, control)
self.assertTrue(control.should_training_stop)
mock_execute.assert_called_once()
def test_jit_checkpoint_callback_on_step_begin(self):
"""Test callback behavior at step begin."""
trainer = self.get_trainer()
callback = JITCheckpointCallback()
callback.set_trainer(trainer)
# Mock control object
control = Mock()
control.should_training_stop = False
# Mock execute method
with patch.object(callback.jit_manager, "execute_jit_checkpoint") as mock_execute:
# Test when checkpoint not requested
callback.jit_manager.is_checkpoint_requested = False
callback.on_step_begin(trainer.args, trainer.state, control)
self.assertFalse(control.should_training_stop)
mock_execute.assert_not_called()
# Test when checkpoint requested
callback.jit_manager.is_checkpoint_requested = True
callback.on_step_begin(trainer.args, trainer.state, control)
self.assertTrue(control.should_training_stop)
mock_execute.assert_called_once()
def test_jit_checkpoint_callback_on_step_end(self):
"""Test callback behavior at step end."""
trainer = self.get_trainer()
callback = JITCheckpointCallback()
callback.set_trainer(trainer)
# Mock control object
control = Mock()
control.should_training_stop = False
control.should_save = True
# Mock execute method
with patch.object(callback.jit_manager, "execute_jit_checkpoint") as mock_execute:
# Test when checkpoint not requested
callback.jit_manager.is_checkpoint_requested = False
callback.on_step_end(trainer.args, trainer.state, control)
self.assertFalse(control.should_training_stop)
mock_execute.assert_not_called()
# Reset control
control.should_save = True
# Test when checkpoint requested
callback.jit_manager.is_checkpoint_requested = True
callback.on_step_end(trainer.args, trainer.state, control)
self.assertTrue(control.should_training_stop)
self.assertFalse(control.should_save)
mock_execute.assert_called_once()
def test_jit_checkpoint_callback_on_epoch_end(self):
"""Test callback behavior at epoch end."""
trainer = self.get_trainer()
callback = JITCheckpointCallback()
callback.set_trainer(trainer)
# Mock control object
control = Mock()
control.should_save = True
control.should_training_stop = False
# Mock execute method
with patch.object(callback.jit_manager, "execute_jit_checkpoint") as mock_execute:
# Test when checkpoint not requested
callback.jit_manager.is_checkpoint_requested = False
callback.on_epoch_end(trainer.args, trainer.state, control)
# should_save should remain unchanged when checkpoint not requested
self.assertTrue(control.should_save)
self.assertFalse(control.should_training_stop)
mock_execute.assert_not_called()
# Reset control
control.should_save = True
control.should_training_stop = False
# Test when checkpoint requested
callback.jit_manager.is_checkpoint_requested = True
callback.on_epoch_end(trainer.args, trainer.state, control)
self.assertFalse(control.should_save)
self.assertTrue(control.should_training_stop)
mock_execute.assert_called_once()
def test_jit_checkpoint_callback_on_train_end(self):
"""Test signal handler restoration on training end."""
trainer = self.get_trainer()
callback = JITCheckpointCallback()
# Store original SIGTERM handler
original_handler = signal.signal(signal.SIGTERM, signal.SIG_DFL)
try:
callback.set_trainer(trainer)
# Verify signal handler was set up
self.assertIsNotNone(callback.jit_manager._original_sigterm_handler)
# Mock control object
control = Mock()
# Call on_train_end
callback.on_train_end(trainer.args, trainer.state, control)
# Verify signal handler was restored
current_handler = signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.assertEqual(current_handler, callback.jit_manager._original_sigterm_handler)
finally:
# Restore original handler for cleanup
signal.signal(signal.SIGTERM, original_handler)
@patch("threading.Timer")
def test_kill_wait_period(self, mock_timer):
"""Test the kill wait period functionality."""
trainer = self.get_trainer()
manager = CheckpointManager(trainer, kill_wait=5)
mock_timer_instance = Mock()
mock_timer.return_value = mock_timer_instance
manager._sigterm_handler(signal.SIGTERM, None)
# Verify Timer was created with the correct kill_wait period and callback
mock_timer.assert_called_once_with(5, manager._enable_checkpoint)
mock_timer_instance.start.assert_called_once()
def test_integration_with_trainer(self):
"""Test integration of JIT checkpointing with Trainer."""
trainer = self.get_trainer(enable_jit=True)
# Check that JIT callback was added
jit_callbacks = [cb for cb in trainer.callback_handler.callbacks if isinstance(cb, JITCheckpointCallback)]
self.assertEqual(len(jit_callbacks), 1)
jit_callback = jit_callbacks[0]
self.assertIsNotNone(jit_callback.jit_manager)
self.assertEqual(jit_callback.trainer, trainer)
# ---------------------------------------------------------------------------
# Trainer saving tests (tokenizer, image processor, feature extractor, etc.)
# ---------------------------------------------------------------------------
@require_torch
class TrainerSavingTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_trainer_saves_tokenizer(self):
MODEL_ID = "google-bert/bert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
with tempfile.TemporaryDirectory() as tmp_dir:
config = RegressionModelConfig(a=1.5, b=2.5)
trainer = Trainer(
model=RegressionPreTrainedModel(config),
args=TrainingArguments(output_dir=tmp_dir),
processing_class=tokenizer,
)
trainer.save_model()
reloaded_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
# For tokenizers, there isn't a direct to_dict method and the properties stored in the configs e.g.
# saved tokens change overtime, so we check that two tokenizers are equal by comparing their encoded outputs
test_sentence = "This is a test sentence"
self.assertListEqual(
tokenizer(test_sentence, padding="max_length").input_ids,
reloaded_tokenizer(test_sentence, padding="max_length").input_ids,
)
@require_vision
def test_trainer_saves_image_processor(self):
MODEL_ID = "openai/clip-vit-base-patch32"
image_processor = AutoImageProcessor.from_pretrained(MODEL_ID)
with tempfile.TemporaryDirectory() as tmp_dir:
config = RegressionModelConfig(a=1.5, b=2.5)
trainer = Trainer(
model=RegressionPreTrainedModel(config),
args=TrainingArguments(output_dir=tmp_dir),
processing_class=image_processor,
)
trainer.save_model()
reloaded_image_processor = AutoImageProcessor.from_pretrained(tmp_dir)
self.assertDictEqual(image_processor.to_dict(), reloaded_image_processor.to_dict())
def test_trainer_saves_feature_extractor(self):
MODEL_ID = "facebook/wav2vec2-base-960h"
feature_extractor = AutoFeatureExtractor.from_pretrained(MODEL_ID)
with tempfile.TemporaryDirectory() as tmp_dir:
config = RegressionModelConfig(a=1.5, b=2.5)
trainer = Trainer(
model=RegressionPreTrainedModel(config),
args=TrainingArguments(output_dir=tmp_dir),
processing_class=feature_extractor,
)
trainer.save_model()
reloaded_feature_extractor = AutoFeatureExtractor.from_pretrained(tmp_dir)
self.assertDictEqual(feature_extractor.to_dict(), reloaded_feature_extractor.to_dict())
@require_vision
def test_trainer_saves_processor(self):
MODEL_ID = "openai/clip-vit-base-patch32"
image_processor = AutoImageProcessor.from_pretrained(MODEL_ID)
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
processor = AutoProcessor.from_pretrained(MODEL_ID)
with tempfile.TemporaryDirectory() as tmp_dir:
config = RegressionModelConfig(a=1.5, b=2.5)
trainer = Trainer(
model=RegressionPreTrainedModel(config),
args=TrainingArguments(output_dir=tmp_dir),
processing_class=processor,
)
trainer.save_model()
reloaded_processor = AutoProcessor.from_pretrained(tmp_dir)
reloaded_image_processor = AutoImageProcessor.from_pretrained(tmp_dir)
reloaded_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertDictEqual(reloaded_processor.to_dict(), processor.to_dict())
image_processor_dict = image_processor.to_dict()
reloaded_image_processor_dict = reloaded_image_processor.to_dict()
self.assertDictEqual(image_processor_dict, reloaded_image_processor_dict)
# For tokenizers, there isn't a direct to_dict method and the properties stored in the configs e.g.
# saved tokens change overtime, so we check that two tokenizers are equal by comparing their encoded outputs
test_sentence = "This is a test sentence"
self.assertListEqual(
tokenizer(test_sentence, padding="max_length").input_ids,
reloaded_tokenizer(test_sentence, padding="max_length").input_ids,
)
# ---------------------------------------------------------------------------
# Best model selection and loading tests
# ---------------------------------------------------------------------------
@require_torch
class TrainerBestModelTest(TestCasePlus, TrainerIntegrationCommon):
"""Tests for best model selection, loading, and checkpoint behavior."""
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_load_best_model_with_save(self):
tmp_dir = self.get_auto_remove_tmp_dir()
trainer = get_regression_trainer(
output_dir=tmp_dir,
save_steps=5,
eval_strategy="steps",
eval_steps=5,
max_steps=9,
)
trainer.train()
# Check that we have the last known step:
assert os.path.exists(os.path.join(tmp_dir, f"checkpoint-{trainer.state.max_steps}")), (
f"Could not find checkpoint-{trainer.state.max_steps}"
)
# And then check the last step
assert os.path.exists(os.path.join(tmp_dir, "checkpoint-9")), "Could not find checkpoint-9"
# Now test that using a limit works
# Should result in:
# - save at step 5 (but is deleted)
# - save at step 10 (loaded in at the end when `load_best_model=True`)
# - save at step 11
tmp_dir = self.get_auto_remove_tmp_dir()
trainer = get_regression_trainer(
output_dir=tmp_dir,
save_steps=5,
eval_strategy="steps",
eval_steps=5,
load_best_model_at_end=True,
save_total_limit=2,
max_steps=11,
)
trainer.train()
# Check that we have the last known step:
assert os.path.exists(os.path.join(tmp_dir, "checkpoint-11")), "Could not find checkpoint-11"
# And then check the last multiple
assert os.path.exists(os.path.join(tmp_dir, "checkpoint-10")), "Could not find checkpoint-10"
# Finally check that we don't have an old one
assert not os.path.exists(os.path.join(tmp_dir, "checkpoint-5")), "Found checkpoint-5, limit not respected"
# Finally check that the right model was loaded in, checkpoint-10
# this goes by the last `eval` step check to do so, so it won't be
# the last model *saved*
model_state = trainer.model.state_dict()
final_model_weights = safetensors.torch.load_file(os.path.join(tmp_dir, "checkpoint-10", "model.safetensors"))
for k, v in model_state.items():
assert torch.allclose(v, final_model_weights[k]), f"{k} is not the same"
def test_save_best_checkpoint(self):
freq = int(64 / self.batch_size)
total = int(self.n_epochs * 64 / self.batch_size)
# Case 1: args.metric_for_best_model == "accuracy".
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_strategy="epoch",
save_strategy="best",
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
)
self.assertTrue(trainer.args.metric_for_best_model == "accuracy")
with patch.object(
trainer,
"_evaluate",
side_effect=[
{"eval_loss": 0.03, "eval_accuracy": 0.60, "epoch": 1.0},
{"eval_loss": 0.02, "eval_accuracy": 0.65, "epoch": 2.0},
{"eval_loss": 0.01, "eval_accuracy": 0.64, "epoch": 3.0},
],
):
trainer.train()
self.assertEqual(len(os.listdir(tmpdir)), 2)
self.check_saved_checkpoints(
output_dir=tmpdir,
freq=freq,
total=total,
)
# Case 2: args.metric_for_best_model == "loss".
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_strategy="epoch",
save_strategy="best",
metric_for_best_model="loss",
compute_metrics=AlmostAccuracy(),
)
self.assertTrue(trainer.args.metric_for_best_model == "loss")
with patch.object(
trainer,
"_evaluate",
side_effect=[
{"eval_loss": 0.03, "eval_accuracy": 0.60, "epoch": 1.0},
{"eval_loss": 0.02, "eval_accuracy": 0.65, "epoch": 2.0},
{"eval_loss": 0.03, "eval_accuracy": 0.66, "epoch": 3.0},
],
):
trainer.train()
self.assertEqual(len(os.listdir(tmpdir)), 2)
self.check_saved_checkpoints(
output_dir=tmpdir,
freq=freq,
total=total,
)
def test_metric_for_best_model_behavior(self):
# Case 1: Metric name not provided when `save_strategy == "best"`.
# Should raise ValueError.
with tempfile.TemporaryDirectory() as tmpdir:
with self.assertRaises(ValueError) as context:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_strategy="epoch",
save_strategy="best",
compute_metrics=AlmostAccuracy(),
)
self.assertIn("`args.metric_for_best_model` must be provided", str(context.exception))
# Case 2: Metric name not provided when `load_best_model_at_end == True`.
# `metric_for_best_model` should be set to `"loss"` by default.
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_strategy="steps",
save_strategy="steps",
load_best_model_at_end=True,
)
self.assertTrue(trainer.args.metric_for_best_model == "loss")
def test_best_model_checkpoint_behavior(self):
# Case 1. Never evaluated, save_total_limit > 1 and save_steps == 1.
# Both best_metric and best_model_checkpoint should be None.
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
eval_strategy="steps",
save_strategy="steps",
save_steps=1,
metric_for_best_model="accuracy",
greater_is_better=True,
)
trainer.train()
assert trainer.state.best_metric is None
assert trainer.state.best_model_checkpoint is None
assert len(os.listdir(tmpdir)) == trainer.state.global_step
# Case 2. Never evaluated and save_total_limit == 1.
# Both best_metric and best_model_checkpoint should be None.
# Only the last checkpoint should remain.
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
eval_strategy="steps",
save_strategy="steps",
save_steps=1,
metric_for_best_model="accuracy",
greater_is_better=True,
save_total_limit=1,
)
trainer.train()
num_steps = trainer.state.global_step
assert trainer.state.best_metric is None
assert trainer.state.best_model_checkpoint is None
assert len(os.listdir(tmpdir)) == 1
ckpt = os.path.join(tmpdir, f"{PREFIX_CHECKPOINT_DIR}-{num_steps}")
assert os.path.isdir(ckpt)
assert os.listdir(tmpdir)[0] == f"{PREFIX_CHECKPOINT_DIR}-{num_steps}"
# Case 3. eval_strategy == save_strategy.
# best_model_checkpoint should be at epoch 1.
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
eval_strategy="epoch",
save_strategy="epoch",
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
greater_is_better=True,
load_best_model_at_end=False,
)
with patch.object(
trainer,
"_evaluate",
side_effect=evaluate_side_effect_factory(
[
{"eval_accuracy": 0.59},
{"eval_accuracy": 0.57},
{"eval_accuracy": 0.55},
]
),
):
trainer.train()
steps_per_epoch = get_steps_per_epoch(trainer)
assert trainer.state.best_metric == 0.59
assert trainer.state.best_global_step == steps_per_epoch
best_ckpt = os.path.join(tmpdir, f"{PREFIX_CHECKPOINT_DIR}-{trainer.state.best_global_step}")
assert trainer.state.best_model_checkpoint == best_ckpt
assert len(os.listdir(tmpdir)) == trainer.state.num_train_epochs
# Case 4. eval_strategy != save_strategy.
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
eval_strategy="epoch",
save_strategy="steps",
save_steps=1,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
greater_is_better=True,
load_best_model_at_end=False,
)
with patch.object(
trainer,
"_evaluate",
side_effect=evaluate_side_effect_factory(
[
{"eval_accuracy": 0.59},
{"eval_accuracy": 0.57},
{"eval_accuracy": 0.55},
]
),
):
trainer.train()
steps_per_epoch = get_steps_per_epoch(trainer)
assert trainer.state.best_metric == 0.59
assert trainer.state.best_global_step == steps_per_epoch
best_ckpt = os.path.join(tmpdir, f"{PREFIX_CHECKPOINT_DIR}-{trainer.state.best_global_step}")
assert trainer.state.best_model_checkpoint == best_ckpt
assert len(os.listdir(tmpdir)) == trainer.state.global_step
# Case 5. Multiple checkpoints, save_total_limit == 1.
# Best metric is found at step 1 and that checkpoint should be saved.
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
eval_strategy="steps",
eval_steps=1,
save_strategy="steps",
save_steps=1,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
greater_is_better=True,
save_total_limit=1,
)
with patch.object(
trainer,
"_evaluate",
side_effect=evaluate_side_effect_factory(
[
{"eval_accuracy": 0.90},
{"eval_accuracy": 0.80},
{"eval_accuracy": 0.70},
]
),
):
trainer.train()
assert trainer.state.best_metric == 0.90
assert trainer.state.best_global_step == 1
best_ckpt = os.path.join(tmpdir, f"{PREFIX_CHECKPOINT_DIR}-{trainer.state.best_global_step}")
assert trainer.state.best_model_checkpoint == best_ckpt
assert len(os.listdir(tmpdir)) == 1
# Case 6. Saving happens more often and eval/save mismatch.
# `best_model_checkpoint` should be None due to a step mismatch.
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
eval_strategy="steps",
eval_steps=3,
save_strategy="steps",
save_steps=2,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
greater_is_better=True,
)
with patch.object(
trainer,
"_evaluate",
side_effect=evaluate_side_effect_factory(
[
{"eval_accuracy": 0.90},
{"eval_accuracy": 0.80},
{"eval_accuracy": 0.70},
]
),
):
trainer.train()
assert trainer.state.best_metric == 0.90
assert trainer.state.best_global_step == 3
assert trainer.state.best_model_checkpoint is None
assert len(os.listdir(tmpdir)) == trainer.state.global_step // 2
def test_load_best_model_at_end(self):
total = int(self.n_epochs * 64 / self.batch_size)
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
eval_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
)
self.assertFalse(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss")
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
eval_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
)
self.assertTrue(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_accuracy", greater_is_better=True)
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
metric_for_best_model="accuracy",
compute_metrics=AlmostAccuracy(),
)
self.assertTrue(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total)
self.check_best_model_has_been_loaded(
tmpdir, 64 // self.batch_size, total, trainer, "eval_accuracy", greater_is_better=True
)
# Test this works with a non PreTrainedModel
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
eval_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
pretrained=False,
)
self.assertFalse(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=False)
def test_load_best_model_from_safetensors(self):
total = int(self.n_epochs * 64 / self.batch_size)
for pretrained in [False, True]:
with tempfile.TemporaryDirectory() as tmpdir:
trainer = get_regression_trainer(
a=1.5,
b=2.5,
output_dir=tmpdir,
learning_rate=0.1,
eval_steps=5,
eval_strategy="steps",
save_steps=5,
load_best_model_at_end=True,
pretrained=pretrained,
)
self.assertFalse(trainer.args.greater_is_better)
trainer.train()
self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=pretrained)
self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=pretrained)
# ---------------------------------------------------------------------------
# Hub integration tests (push, tags, revision)
# ---------------------------------------------------------------------------
@require_torch
@is_staging_test
class TrainerIntegrationWithHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = TOKEN
def test_push_to_hub(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
output_dir_name = tmp_repo.repo_name
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_token=self._token,
)
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, f"{USER}/{output_dir_name}")
model = RegressionPreTrainedModel.from_pretrained(repo_name)
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item())
def test_push_to_hub_in_organization(self):
with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=tmp_dir)
trainer.save_model()
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_model_id=f"valid_org/{output_dir_name}",
hub_token=self._token,
)
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, f"valid_org/{output_dir_name}")
model = RegressionPreTrainedModel.from_pretrained(f"valid_org/{output_dir_name}")
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item())
def get_commit_history(self, repo):
commit_logs = subprocess.run(
["git", "log"],
capture_output=True,
check=True,
encoding="utf-8",
cwd=repo,
).stdout
commits = commit_logs.split("\n\n")[1::2]
return [commit.strip() for commit in commits]
# TODO: @ydshieh or @SunMarc
@unittest.skip("unknown failure reason, possibly staging hub issue")
def test_push_to_hub_with_saves_each_epoch(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertLogs(level="WARNING") as logs:
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_token=self._token,
# To avoid any flakiness if the training goes faster than the uploads.
hub_always_push=True,
save_strategy="epoch",
)
trainer.train()
commits = list_repo_commits(f"{USER}/{output_dir_name}", token=self._token)
commits = [c.title for c in commits]
self.assertIn("initial commit", commits)
self.assertIn("Training in progress, epoch 1", commits)
self.assertIn("Training in progress, epoch 2", commits)
# Epochs 3 and 4 are not guaranteed to be present (empty commits)
self.assertTrue(any("Skipping to prevent empty commit." in record.message for record in logs.records))
def test_push_to_hub_with_saves_each_n_steps(self):
num_gpus = max(1, backend_device_count(torch_device))
if num_gpus > 2:
self.skipTest(reason="More than 2 GPUs available")
with TemporaryHubRepo(token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
with self.assertLogs(level="WARNING") as logs:
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_token=self._token,
# To avoid any flakiness if the training goes faster than the uploads.
hub_always_push=True,
save_strategy="steps",
save_steps=5,
)
trainer.train()
commits = list_repo_commits(f"{USER}/{output_dir_name}", token=self._token)
commits = [c.title for c in commits]
self.assertIn("initial commit", commits)
# Some commits are skipped if nothing has changed
# We expect 1 commit per 5 epochs + 1 commit at the end
nb_empty_commits = len(
[record for record in logs.records if "Skipping to prevent empty commit." in record.message]
)
nb_epoch_commits = len([commit for commit in commits if "Training in progress, step" in commit])
# max_steps depend on the number of available GPUs
max_steps = math.ceil(trainer.args.num_train_epochs * len(trainer.get_train_dataloader()))
nb_expected_commits = len(range(5, max_steps, 5))
# '>=' since final commit might be an empty commit as well (not deterministic)
self.assertGreaterEqual(nb_empty_commits + nb_epoch_commits, nb_expected_commits)
@require_tensorboard
def test_push_to_hub_with_tensorboard_logs(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
hub_token=self._token,
save_strategy="epoch",
report_to=["tensorboard"],
)
trainer.train()
# Push the runs via `push_to_hub()`
trainer.push_to_hub()
files = list_repo_files(f"{USER}/{output_dir_name}", token=self._token)
found_log = False
for f in files:
if len(f.split("runs")) > 1 and "events.out.tfevents" in f:
found_log = True
assert found_log is True, "No tensorboard log found in repo"
def test_push_to_hub_tags(self):
# Checks if `trainer.push_to_hub()` works correctly by adding the desired
# tag without having to pass `tags` in `push_to_hub`
# see:
with TemporaryHubRepo(token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_token=self._token,
)
trainer.model.add_model_tags(["test-trainer-tags"])
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, f"{USER}/{output_dir_name}")
model_card = ModelCard.load(repo_name)
self.assertTrue("test-trainer-tags" in model_card.data.tags)
def test_push_to_hub_with_revision(self):
# Checks if `trainer.push_to_hub()` works correctly by adding revision
with TemporaryHubRepo(token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_token=self._token,
)
branch = "v1.0"
create_branch(repo_id=trainer.hub_model_id, branch=branch, token=self._token, exist_ok=True)
push_commit = trainer.push_to_hub(revision=branch)
commits = list_repo_commits(repo_id=trainer.hub_model_id, revision=branch, token=self._token)
self.assertEqual(commits[0].commit_id, push_commit.oid)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/trainer/test_trainer_checkpointing.py",
"license": "Apache License 2.0",
"lines": 1843,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/trainer/test_trainer_evaluation.py | # Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Trainer evaluation and prediction tests: evaluate, predict, batched metrics, dynamic shapes,
iterable datasets, early stopping, FP16/BF16 full eval memory, torch.compile, and MRPC/LM eval.
"""
import gc
import tempfile
import numpy as np
from transformers import (
AutoTokenizer,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import (
TestCasePlus,
backend_device_count,
get_tests_dir,
require_torch,
require_torch_accelerator,
require_torch_bf16,
require_torch_fp16,
slow,
torch_device,
)
from .trainer_test_utils import (
PATH_SAMPLE_TEXT,
AlmostAccuracy,
AlmostAccuracyBatched,
RegressionDataset,
RegressionDictModel,
TrainerIntegrationCommon,
get_dataset,
get_regression_trainer,
)
if is_torch_available():
import torch
from transformers import (
AutoModelForCausalLM,
AutoModelForSequenceClassification,
GlueDataset,
GlueDataTrainingArguments,
Trainer,
)
# ---------------------------------------------------------------------------
# Core evaluate / predict tests
# ---------------------------------------------------------------------------
@require_torch
class TrainerEvaluationTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_evaluate(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), output_dir=tmp_dir)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(
a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy(), output_dir=tmp_dir
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With logits preprocess
trainer = get_regression_trainer(
a=1.5,
b=2.5,
compute_metrics=AlmostAccuracy(),
preprocess_logits_for_metrics=lambda logits, labels: logits + 1,
output_dir=tmp_dir,
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
def test_predict(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(a=1.5, b=2.5, output_dir=tmp_dir)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, output_dir=tmp_dir)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
# With more than one output of the model
trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, output_dir=tmp_dir)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertEqual(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
# With more than one output/label of the model
trainer = get_regression_trainer(
a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], output_dir=tmp_dir
)
outputs = trainer.predict(trainer.eval_dataset)
preds = outputs.predictions
labels = outputs.label_ids
x = trainer.eval_dataset.x
self.assertEqual(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0]))
self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1]))
def test_train_and_predict_loss_parity(self):
"""
Tests that the loss computed during a training_step is the same as the one computed during prediction_step.
for the same inputs
"""
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM")
# Create a dummy batch of inputs
inputs = {}
inputs["input_ids"] = []
for row_ind in range(4):
seq_len = torch.randint(32, 64, (1,)).item()
x = torch.randint(1, 100, (seq_len,))
inputs["input_ids"].append(x)
inputs["input_ids"] = torch.nn.utils.rnn.pad_sequence(inputs["input_ids"], batch_first=True, padding_value=0)
inputs["labels"] = inputs["input_ids"].clone()
inputs["labels"][inputs["input_ids"] == 0] = -100
num_items_in_batch = inputs["labels"].ne(-100).sum().item()
def custom_loss_func(outputs, labels, num_items_in_batch=None):
logits = outputs["logits"]
loss_fct = torch.nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
if num_items_in_batch is not None:
return loss / num_items_in_batch # multiply by number of items to get the sum
return loss
trainer = Trainer(model, train_dataset=None, compute_loss_func=custom_loss_func)
# creating log history of trainer, results don't matter
train_loss = trainer.training_step(model, inputs, num_items_in_batch)
predict_loss = trainer.prediction_step(model, inputs, prediction_loss_only=True)[0]
torch.testing.assert_close(train_loss, predict_loss, atol=1e-6, rtol=0)
def test_eval_use_gather_object(self):
train_dataset = RegressionDataset()
eval_dataset = RegressionDataset()
model = RegressionDictModel()
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(tmp_dir, eval_use_gather_object=True)
trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
_ = trainer.evaluate()
_ = trainer.predict(eval_dataset)
# ---------------------------------------------------------------------------
# Batch eval metrics tests
# ---------------------------------------------------------------------------
@require_torch
class TrainerBatchEvalMetricsTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_evaluate_with_batch_eval_metrics(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
a=1.5, b=2.5, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, output_dir=tmp_dir
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(
a=1.5,
b=2.5,
eval_len=66,
compute_metrics=AlmostAccuracyBatched(),
batch_eval_metrics=True,
output_dir=tmp_dir,
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
# With logits preprocess
trainer = get_regression_trainer(
a=1.5,
b=2.5,
compute_metrics=AlmostAccuracyBatched(),
batch_eval_metrics=True,
preprocess_logits_for_metrics=lambda logits, labels: logits + 1,
output_dir=tmp_dir,
)
results = trainer.evaluate()
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
pred = 1.5 * x + 2.5
expected_loss = ((pred - y) ** 2).mean()
self.assertAlmostEqual(results["eval_loss"], expected_loss)
expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"]
self.assertAlmostEqual(results["eval_accuracy"], expected_acc)
def test_predict_with_batch_eval_metrics(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
a=1.5, b=2.5, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, output_dir=tmp_dir
)
results = trainer.predict(trainer.eval_dataset)
preds = results.predictions
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
gt = 1.5 * x + 2.5
self.assertTrue(np.allclose(preds, gt))
expected_acc = AlmostAccuracy()((preds, y))["accuracy"]
self.assertAlmostEqual(results.metrics["test_accuracy"], expected_acc)
# With a number of elements not a round multiple of the batch size
trainer = get_regression_trainer(
a=1.5,
b=2.5,
eval_len=66,
compute_metrics=AlmostAccuracyBatched(),
batch_eval_metrics=True,
output_dir=tmp_dir,
)
results = trainer.predict(trainer.eval_dataset)
preds = results.predictions
x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0]
self.assertTrue(np.allclose(preds, 1.5 * x + 2.5))
expected_acc = AlmostAccuracy()((preds, y))["accuracy"]
self.assertAlmostEqual(results.metrics["test_accuracy"], expected_acc)
# With more than one output of the model
trainer = get_regression_trainer(
a=1.5,
b=2.5,
double_output=True,
compute_metrics=AlmostAccuracyBatched(),
batch_eval_metrics=True,
output_dir=tmp_dir,
)
preds = trainer.predict(trainer.eval_dataset).predictions
x = trainer.eval_dataset.x
self.assertEqual(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
# With more than one output/label of the model
trainer = get_regression_trainer(
a=1.5,
b=2.5,
double_output=True,
label_names=["labels", "labels_2"],
compute_metrics=AlmostAccuracyBatched(),
batch_eval_metrics=True,
output_dir=tmp_dir,
)
outputs = trainer.predict(trainer.eval_dataset)
preds = outputs.predictions
labels = outputs.label_ids
x = trainer.eval_dataset.x
self.assertEqual(len(preds), 2)
self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5))
self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5))
self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0]))
self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1]))
# ---------------------------------------------------------------------------
# FP16 / BF16 full eval memory tests
# ---------------------------------------------------------------------------
@require_torch
class TrainerFullEvalMemoryTest(TestCasePlus):
@require_torch_fp16
@require_torch_accelerator
def test_fp16_full_eval(self):
# this is a sensitive test so let's keep debugging printouts in place for quick diagnosis.
# it's using pretty large safety margins, but small enough to detect broken functionality.
debug = 0
n_gpus = backend_device_count(torch_device)
with tempfile.TemporaryDirectory() as tmp_dir:
bs = 8
eval_len = 16 * n_gpus
# make the params somewhat big so that there will be enough RAM consumed to be able to
# measure things. We should get about 64KB for a+b in fp32
a = torch.ones(1000, bs) + 0.001
b = torch.ones(1000, bs) - 0.001
# 1. with fp16_full_eval disabled
trainer = get_regression_trainer(
a=a, b=b, eval_len=eval_len, skip_memory_metrics=False, output_dir=tmp_dir
)
metrics = trainer.evaluate()
del trainer
gc.collect()
fp32_init = metrics["init_mem_gpu_alloc_delta"]
fp32_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp32_init {fp32_init}")
print(f"fp32_eval {fp32_eval}")
# here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram.
# perfect world: fp32_init == 64<<10
self.assertGreater(fp32_init, 59_000)
# after eval should be no extra memory allocated - with a small margin (other than the peak
# memory consumption for the forward calculation that gets recovered)
# perfect world: fp32_eval == close to zero
self.assertLess(fp32_eval, 5_000)
# 2. with fp16_full_eval enabled
trainer = get_regression_trainer(
a=a, b=b, eval_len=eval_len, fp16_full_eval=True, skip_memory_metrics=False, output_dir=tmp_dir
)
metrics = trainer.evaluate()
fp16_init = metrics["init_mem_gpu_alloc_delta"]
fp16_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp16_init {fp16_init}")
print(f"fp16_eval {fp16_eval}")
# here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0
# perfect world: fp16_init == close to zero
self.assertLess(fp16_init, 5_000)
# here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back)
# perfect world: fp32_init == 32<<10
self.assertGreater(fp16_eval, 27_000)
# 3. relative comparison fp32 vs full fp16
# should be about half of fp16_init
# perfect world: fp32_init/2 == fp16_eval
self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000)
@require_torch_accelerator
@require_torch_bf16
def test_bf16_full_eval(self):
# note: most of the logic is the same as test_fp16_full_eval
# this is a sensitive test so let's keep debugging printouts in place for quick diagnosis.
# it's using pretty large safety margins, but small enough to detect broken functionality.
debug = 0
n_gpus = backend_device_count(torch_device)
bs = 8
eval_len = 16 * n_gpus
# make the params somewhat big so that there will be enough RAM consumed to be able to
# measure things. We should get about 64KB for a+b in fp32
a = torch.ones(1000, bs) + 0.001
b = torch.ones(1000, bs) - 0.001
with tempfile.TemporaryDirectory() as tmp_dir:
# 1. with bf16_full_eval disabled
trainer = get_regression_trainer(
a=a, b=b, eval_len=eval_len, skip_memory_metrics=False, output_dir=tmp_dir
)
metrics = trainer.evaluate()
del trainer
gc.collect()
fp32_init = metrics["init_mem_gpu_alloc_delta"]
fp32_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"fp32_init {fp32_init}")
print(f"fp32_eval {fp32_eval}")
# here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram.
# perfect world: fp32_init == 64<<10
self.assertGreater(fp32_init, 59_000)
# after eval should be no extra memory allocated - with a small margin (other than the peak
# memory consumption for the forward calculation that gets recovered)
# perfect world: fp32_eval == close to zero
self.assertLess(fp32_eval, 5_000)
# 2. with bf16_full_eval enabled
trainer = get_regression_trainer(
a=a, b=b, eval_len=eval_len, bf16_full_eval=True, skip_memory_metrics=False, output_dir=tmp_dir
)
metrics = trainer.evaluate()
bf16_init = metrics["init_mem_gpu_alloc_delta"]
bf16_eval = metrics["eval_mem_gpu_alloc_delta"]
if debug:
print(f"bf16_init {bf16_init}")
print(f"bf16_eval {bf16_eval}")
# here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0
# perfect world: bf16_init == close to zero
self.assertLess(bf16_init, 5_000)
# here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back)
# perfect world: fp32_init == 32<<10
self.assertGreater(bf16_eval, 27_000)
# 3. relative comparison fp32 vs full bf16
# should be about half of bf16_init
# perfect world: fp32_init/2 == bf16_eval
self.assertAlmostEqual(bf16_eval, fp32_init / 2, delta=5_000)
# ---------------------------------------------------------------------------
# Slow external model eval tests
# ---------------------------------------------------------------------------
@require_torch
class TrainerSlowEvalTest(TestCasePlus):
@slow
def test_trainer_eval_mrpc(self):
MODEL_ID = "google-bert/bert-base-cased-finetuned-mrpc"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True
)
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(output_dir=tmp_dir, use_cpu=True)
trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset)
result = trainer.evaluate()
self.assertLess(result["eval_loss"], 0.2)
@slow
def test_trainer_eval_multiple(self):
MODEL_ID = "openai-community/gpt2"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
dataset = get_dataset(PATH_SAMPLE_TEXT, tokenizer, 100)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
use_cpu=True,
per_device_eval_batch_size=1,
)
trainer = Trainer(
model=model,
args=training_args,
eval_dataset={
"data1": dataset,
"data2": dataset,
},
)
result = trainer.evaluate()
self.assertIn("eval_data1_loss", result)
self.assertIn("eval_data2_loss", result)
@slow
def test_trainer_eval_lm(self):
MODEL_ID = "distilbert/distilroberta-base"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
dataset = get_dataset(PATH_SAMPLE_TEXT, tokenizer, 100)
self.assertEqual(len(dataset), 31)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/trainer/test_trainer_evaluation.py",
"license": "Apache License 2.0",
"lines": 445,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/trainer/test_trainer_hyperparameter.py | # Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Trainer hyperparameter search tests: Optuna (single/multi-objective, full eval),
Ray Tune (with client), W&B sweeps, and backend availability detection.
"""
import tempfile
import unittest
from transformers import TrainingArguments
from transformers.hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS, HPSearchBackend
from transformers.testing_utils import require_optuna, require_ray, require_torch, require_wandb, torch_device
from transformers.trainer_utils import IntervalStrategy
from transformers.utils.hp_naming import TrialShortNamer
from .trainer_test_utils import (
AlmostAccuracy,
RegressionModelConfig,
RegressionPreTrainedModel,
get_regression_trainer,
)
@require_torch
@require_optuna
class TrainerHyperParameterOptunaIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
return {}
def model_init(trial):
if trial is not None:
a = trial.suggest_int("a", -4, 4)
b = trial.suggest_int("b", -4, 4)
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config).to(torch_device)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
eval_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(direction="minimize", hp_space=hp_space, hp_name=hp_name, n_trials=4)
@require_torch
@require_optuna
class TrainerHyperParameterMultiObjectOptunaIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
return {}
def model_init(trial):
if trial is not None:
a = trial.suggest_int("a", -4, 4)
b = trial.suggest_int("b", -4, 4)
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config).to(torch_device)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.params)
def compute_objective(metrics: dict[str, float]) -> list[float]:
return metrics["eval_loss"], metrics["eval_accuracy"]
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
eval_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=10,
disable_tqdm=True,
load_best_model_at_end=True,
run_name="test",
model_init=model_init,
compute_metrics=AlmostAccuracy(),
)
trainer.hyperparameter_search(
direction=["minimize", "maximize"],
hp_space=hp_space,
hp_name=hp_name,
n_trials=4,
compute_objective=compute_objective,
)
@require_torch
@require_optuna
class TrainerHyperParameterOptunaIntegrationTestWithFullEval(unittest.TestCase):
def test_hyperparameter_search(self):
def hp_space(trial):
return {}
def model_init(trial):
if trial is not None:
a = trial.suggest_int("a", -4, 4)
b = trial.suggest_int("b", -4, 4)
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config).to(torch_device)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
disable_tqdm=True,
model_init=model_init,
fp16_full_eval=True,
)
trainer.hyperparameter_search(
direction="minimize",
hp_space=hp_space,
n_trials=2,
)
@require_torch
@require_ray
@unittest.skip("don't work because of a serialization issue")
class TrainerHyperParameterRayIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def ray_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {"a": 0, "b": 0}
def hp_space(trial):
from ray import tune
return {
"a": tune.randint(-4, 4),
"b": tune.randint(-4, 4),
}
def model_init(config):
if config is None:
a = 0
b = 0
else:
a = config["a"]
b = config["b"]
model_config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(model_config).to(torch_device)
def hp_name(params):
return MyTrialShortNamer.shortname(params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
eval_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
run_name="test",
model_init=model_init,
)
trainer.hyperparameter_search(
direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="ray", n_trials=4
)
def test_hyperparameter_search(self):
self.ray_hyperparameter_search()
def test_hyperparameter_search_ray_client(self):
import ray
from ray.util.client.ray_client_helpers import ray_start_client_server
with ray_start_client_server():
assert ray.util.client.ray.is_connected()
self.ray_hyperparameter_search()
@require_torch
@require_wandb
class TrainerHyperParameterWandbIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
def hp_space(trial):
return {
"method": "random",
"metric": {},
"parameters": {
"a": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"b": {"distribution": "int_uniform", "min": 1, "max": 6},
},
}
def model_init(config):
if config is None:
a = 0
b = 0
else:
a = config["a"]
b = config["b"]
model_config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(model_config).to(torch_device)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
output_dir=tmp_dir,
learning_rate=0.1,
logging_steps=1,
eval_strategy=IntervalStrategy.EPOCH,
save_strategy=IntervalStrategy.EPOCH,
num_train_epochs=4,
disable_tqdm=True,
load_best_model_at_end=True,
run_name="test",
model_init=model_init,
)
sweep_kwargs = {
"direction": "minimize",
"hp_space": hp_space,
"backend": "wandb",
"n_trials": 4,
}
best_run = trainer.hyperparameter_search(**sweep_kwargs)
self.assertIsNotNone(best_run.run_id)
self.assertIsNotNone(best_run.run_summary)
hp_keys = set(best_run.hyperparameters.keys())
self.assertSetEqual(hp_keys, {"a", "b", "assignments", "metric"})
# pretend restarting the process purged the environ
import os
del os.environ["WANDB_ENTITY"]
del os.environ["WANDB_PROJECT"]
sweep_kwargs["sweep_id"] = best_run.run_summary
updated_best_run = trainer.hyperparameter_search(**sweep_kwargs)
self.assertIsNotNone(updated_best_run.run_id)
self.assertEqual(updated_best_run.run_summary, best_run.run_summary)
updated_hp_keys = set(updated_best_run.hyperparameters.keys())
self.assertSetEqual(updated_hp_keys, {"a", "b", "assignments", "metric"})
class HyperParameterSearchBackendsTest(unittest.TestCase):
def test_hyperparameter_search_backends(self):
self.assertEqual(
list(ALL_HYPERPARAMETER_SEARCH_BACKENDS.keys()),
list(HPSearchBackend),
)
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/trainer/test_trainer_hyperparameter.py",
"license": "Apache License 2.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
huggingface/transformers:tests/trainer/trainer_test_utils.py | # Copyright 2018 the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared test infrastructure for the Trainer test suite."""
import dataclasses
import gc
import json
import os
import random
import numpy as np
from transformers import (
AutoTokenizer,
PreTrainedConfig,
TrainerCallback,
TrainingArguments,
is_datasets_available,
is_torch_available,
)
from transformers.testing_utils import (
backend_empty_cache,
backend_max_memory_allocated,
backend_memory_allocated,
backend_reset_max_memory_allocated,
get_tests_dir,
torch_device,
)
from transformers.utils import (
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
is_accelerate_available,
)
if torch_device == "hpu":
RTOL = 1e-3
ATOL = 1e-3
else:
RTOL = 1e-5
ATOL = 1e-5
if is_torch_available():
import safetensors.torch
import torch
from torch import nn
from torch.utils.data import IterableDataset
from transformers import (
AutoModelForCausalLM,
PreTrainedModel,
Trainer,
TrainerState,
)
if is_datasets_available():
import datasets
# for version specific tests in TrainerIntegrationTest
if is_accelerate_available():
pass
PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt"
def get_dataset(file_path, tokenizer, max_len):
dataset = datasets.load_dataset("text", data_files=file_path)
# Filter out empty lines
dataset = dataset.filter(lambda example: len(example["text"].strip()) > 0)
# Define tokenization function
def tokenize_function(examples):
tokenized = tokenizer(examples["text"], add_special_tokens=True, truncation=True, max_length=max_len)
# Add labels as a copy of input_ids
tokenized["labels"] = tokenized["input_ids"].copy()
return tokenized
# Apply tokenization and remove original text column
tokenized_dataset = dataset.map(tokenize_function, batched=True, remove_columns=["text"])
return tokenized_dataset["train"]
class StoreLossCallback(TrainerCallback):
"""
Simple callback to store the loss.
"""
def __init__(self):
self.losses = []
def on_log(self, args, state, control, logs=None, **kwargs):
if "loss" in logs:
self.losses.append(logs["loss"])
class MockCudaOOMCallback(TrainerCallback):
"""
Simple callback to simulate CUDA OOM error if
the batch size is >= to `batch_size_limit`.
"""
def __init__(self, batch_size_limit=16):
self.batch_size_limit = batch_size_limit
def on_step_end(self, args, state, control, **kwargs):
# simulate OOM on the first step
if state.train_batch_size >= self.batch_size_limit:
raise RuntimeError("CUDA out of memory.")
class RegressionDataset:
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
np.random.seed(seed)
self.label_names = ["labels"] if label_names is None else label_names
self.length = length
self.x = np.random.normal(size=(length,)).astype(np.float32)
self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names]
self.ys = [y.astype(np.float32) for y in self.ys]
def __len__(self):
return self.length
def __getitem__(self, i):
result = {name: y[i] for name, y in zip(self.label_names, self.ys)}
result["input_x"] = self.x[i]
return result
# Converting Bytes to Megabytes
def bytes2megabytes(x):
return int(x / 2**20)
# Copied from accelerate: https://github.com/huggingface/accelerate/blob/ee163b66fb7848892519e804688cb4ae981aacbe/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py#L40C1-L73C68
class TorchTracemalloc:
def __enter__(self):
gc.collect()
if torch_device in ["cuda", "xpu"]:
backend_empty_cache(torch_device)
backend_reset_max_memory_allocated(torch_device) # reset the peak gauge to zero
self.begin = backend_memory_allocated(torch_device)
else:
self.begin = 0
return self
def __exit__(self, *exc):
gc.collect()
if torch_device in ["cuda", "xpu"]:
backend_empty_cache(torch_device)
self.end = backend_memory_allocated(torch_device)
self.peak = backend_max_memory_allocated(torch_device)
else:
self.end = 0
self.peak = 0
self.used = bytes2megabytes(self.end - self.begin)
self.peaked = bytes2megabytes(self.peak - self.begin)
@dataclasses.dataclass
class RegressionTrainingArguments(TrainingArguments):
a: float = 0.0
b: float = 0.0
class RepeatDataset:
def __init__(self, x, length=64):
self.x = x
self.length = length
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_ids": self.x, "labels": self.x}
class SequenceClassificationDataset:
def __init__(self, length=64, vocab_size=100, num_labels=5):
self.length = length
self.sequences = [torch.randint(0, vocab_size, (64,)).tolist() for _ in range(length)]
self.labels = torch.randint(0, num_labels, (length,)).tolist()
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_ids": self.sequences[i], "label": self.labels[i]}
class DynamicShapesDataset:
def __init__(self, length=64, seed=42, batch_size=8):
self.length = length
np.random.seed(seed)
sizes = np.random.randint(1, 20, (length // batch_size,))
# For easy batching, we make every batch_size consecutive samples the same size.
self.xs = [np.random.normal(size=(s,)).astype(np.float32) for s in sizes.repeat(batch_size)]
self.ys = [np.random.normal(size=(s,)).astype(np.float32) for s in sizes.repeat(batch_size)]
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_x": self.xs[i], "labels": self.ys[i]}
class AlmostAccuracy:
def __init__(self, thresh=0.25):
self.thresh = thresh
def __call__(self, eval_pred):
predictions, labels = eval_pred
true = np.abs(predictions - labels) <= self.thresh
return {"accuracy": true.astype(np.float32).mean().item()}
class AlmostAccuracyBatched:
def __init__(self, thresh=0.25):
self.thresh = thresh
self.batch_acc = []
def __call__(self, eval_pred, compute_result):
predictions, labels = eval_pred
if isinstance(predictions, tuple):
predictions = predictions[0]
if isinstance(labels, tuple):
labels = labels[0]
batch_size = len(predictions)
true = torch.abs(predictions - labels) <= self.thresh
acc = true.type(torch.FloatTensor).mean().item()
self.batch_acc.extend([acc] * batch_size)
if compute_result:
result = {"accuracy": np.mean(self.batch_acc).item()}
self.batch_acc = []
return result
class RegressionModelConfig(PreTrainedConfig):
def __init__(self, a=0, b=0, double_output=False, random_torch=True, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
self.double_output = double_output
self.random_torch = random_torch
self.hidden_size = 1
if is_torch_available():
class SampleIterableDataset(IterableDataset):
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names)
def __iter__(self):
for i in range(len(self.dataset)):
yield self.dataset[i]
class FiniteIterableDataset(SampleIterableDataset):
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
super().__init__(a, b, length, seed, label_names)
self.current_sample = 0
def __iter__(self):
while self.current_sample < len(self.dataset):
yield self.dataset[self.current_sample]
self.current_sample += 1
class MultiLoader:
def __init__(self, loaders):
self.loaders = loaders
def __len__(self):
return sum(len(loader) for loader in self.loaders)
def __iter__(self):
for loader in self.loaders:
yield from loader
class CustomDataloaderTrainer(Trainer):
def get_train_dataloader(self):
dataloaders = [super().get_train_dataloader(), super().get_train_dataloader()]
return MultiLoader(dataloaders)
def get_eval_dataloader(self, eval_dataset):
dataloaders = [super().get_eval_dataloader(eval_dataset), super().get_eval_dataloader(eval_dataset)]
return MultiLoader(dataloaders)
class RegressionModel(nn.Module):
def __init__(self, a=0, b=0, double_output=False):
super().__init__()
self.a = nn.Parameter(torch.tensor(a).float())
self.b = nn.Parameter(torch.tensor(b).float())
self.double_output = double_output
self.config = None
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if labels is None:
return (y, y) if self.double_output else (y,)
loss = nn.functional.mse_loss(y, labels)
return (loss, y, y) if self.double_output else (loss, y)
class RegressionDictModel(nn.Module):
def __init__(self, a=0, b=0):
super().__init__()
self.a = nn.Parameter(torch.tensor(a).float())
self.b = nn.Parameter(torch.tensor(b).float())
self.config = None
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
result = {"output": y}
if labels is not None:
result["loss"] = nn.functional.mse_loss(y, labels)
return result
class RegressionPreTrainedModel(PreTrainedModel):
config_class = RegressionModelConfig
base_model_prefix = "regression"
def __init__(self, config):
super().__init__(config)
self.a = nn.Parameter(torch.tensor(config.a).float())
self.b = nn.Parameter(torch.tensor(config.b).float())
self.double_output = config.double_output
self.post_init()
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if labels is None:
return (y, y) if self.double_output else (y,)
loss = nn.functional.mse_loss(y, labels)
return (loss, y, y) if self.double_output else (loss, y)
class RegressionPreTrainedModelWithGradientCheckpointing(PreTrainedModel):
config_class = RegressionModelConfig
base_model_prefix = "regression"
supports_gradient_checkpointing = True
def __init__(self, config):
super().__init__(config)
self.layers = nn.ModuleList([nn.Linear(config.hidden_size, config.hidden_size) for _ in range(4)])
self.head = nn.Linear(config.hidden_size, 1)
self.gradient_checkpointing = False
self.double_output = config.double_output
self.post_init()
def forward(self, input_x, labels=None, **kwargs):
y = input_x.unsqueeze(0)
for layer in self.layers:
if self.training and self.gradient_checkpointing:
outputs = self._gradient_checkpointing_func(layer.__call__, y)
else:
outputs = layer(y)
y = outputs * 3
logits = self.head(y)
if labels is None:
return (logits, logits) if self.double_output else (logits,)
loss = nn.functional.mse_loss(logits, labels)
return (loss, y, y) if self.double_output else (loss, y)
class RegressionRandomPreTrainedModel(PreTrainedModel):
config_class = RegressionModelConfig
base_model_prefix = "regression"
def __init__(self, config):
super().__init__(config)
self.a = nn.Parameter(torch.tensor(config.a).float())
self.b = nn.Parameter(torch.tensor(config.b).float())
self.random_torch = config.random_torch
self.post_init()
def forward(self, input_x, labels=None, **kwargs):
y = input_x * self.a + self.b
if self.random_torch:
torch_rand = torch.randn(1).squeeze()
np_rand = np.random.rand()
rand_rand = random.random()
if self.random_torch:
y += 0.05 * torch_rand
y += 0.05 * torch.tensor(np_rand + rand_rand)
if labels is None:
return (y,)
loss = nn.functional.mse_loss(y, labels)
return (loss, y)
class BasicTextGenerationModel(nn.Module):
def __init__(self, vocab_size, hidden_size):
super().__init__()
self.embedding = nn.Embedding(vocab_size, hidden_size)
self.lstm = nn.LSTM(hidden_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, vocab_size)
def forward(self, input_ids, labels=None, **kwargs):
embedded = self.embedding(input_ids)
lstm_out, _ = self.lstm(embedded)
logits = self.fc(lstm_out)
if labels is None:
return logits
loss = nn.functional.cross_entropy(logits.view(-1, logits.size(-1)), labels.view(-1))
return loss, logits
def create_dummy_dataset_for_text_generation(vocab_size, seq_length, num_samples):
import numpy as np
# Create random input sequences
input_ids = np.random.randint(0, vocab_size, (num_samples, seq_length))
# Create a datasets.Dataset
dataset = datasets.Dataset.from_dict({"input_ids": input_ids, "labels": input_ids})
return dataset
class TstLayer(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.linear1 = nn.Linear(hidden_size, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.bias = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h = self.ln1(nn.functional.relu(self.linear1(x)))
h = nn.functional.relu(self.linear2(x))
return self.ln2(x + h + self.bias)
def get_regression_trainer(
a=0,
b=0,
double_output=False,
train_len=64,
eval_len=64,
pretrained=True,
output_dir=None,
**kwargs,
):
label_names = kwargs.get("label_names")
gradient_checkpointing = kwargs.get("gradient_checkpointing", False)
train_dataset = RegressionDataset(length=train_len, label_names=label_names)
eval_dataset = RegressionDataset(length=eval_len, label_names=label_names)
model_init = kwargs.pop("model_init", None)
if model_init is not None:
model = None
else:
if pretrained:
config = RegressionModelConfig(a=a, b=b, double_output=double_output)
# We infer the correct model class if one uses gradient_checkpointing or not
target_cls = (
RegressionPreTrainedModel
if not gradient_checkpointing
else RegressionPreTrainedModelWithGradientCheckpointing
)
model = target_cls(config)
else:
model = RegressionModel(a=a, b=b, double_output=double_output)
compute_metrics = kwargs.pop("compute_metrics", None)
data_collator = kwargs.pop("data_collator", None)
optimizers = kwargs.pop("optimizers", (None, None))
preprocess_logits_for_metrics = kwargs.pop("preprocess_logits_for_metrics", None)
assert output_dir is not None, "output_dir should be specified for testing"
args = RegressionTrainingArguments(output_dir, a=a, b=b, **kwargs)
trainer = Trainer(
model,
args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
optimizers=optimizers,
model_init=model_init,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
# TODO: loss function defined in RegressionModel doesn't accept num_item_per_batch, to fix later
trainer.model_accepts_loss_kwargs = False
return trainer
def get_language_model_trainer(**kwargs):
dataset = datasets.load_dataset("fka/awesome-chatgpt-prompts")
model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
tokenizer.pad_token = tokenizer.eos_token
def _tokenize_function(examples):
model_inputs = tokenizer(examples["prompt"], padding="max_length", truncation=True)
model_inputs["labels"] = np.array(model_inputs["input_ids"]).astype(np.int64)
return model_inputs
tokenized_datasets = dataset.map(_tokenize_function, batched=True)
training_args = TrainingArguments(**kwargs)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
)
return trainer
class TrainerIntegrationCommon:
def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True, use_scaler=False):
weights_file = SAFE_WEIGHTS_NAME
file_list = [weights_file, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"]
if is_pretrained:
file_list.append("config.json")
if use_scaler:
file_list.append("scaler.pt")
for step in range(freq, total, freq):
checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
self.assertTrue(os.path.isdir(checkpoint))
for filename in file_list:
self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename)))
def check_best_model_has_been_loaded(
self,
output_dir,
freq,
total,
trainer,
metric,
greater_is_better=False,
is_pretrained=True,
):
checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}")
log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history
values = [d[metric] for d in log_history]
best_value = max(values) if greater_is_better else min(values)
best_checkpoint = (values.index(best_value) + 1) * freq
checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}")
if is_pretrained:
best_model = RegressionPreTrainedModel.from_pretrained(checkpoint)
best_model.to(trainer.args.device)
else:
best_model = RegressionModel()
state_dict = safetensors.torch.load_file(os.path.join(checkpoint, SAFE_WEIGHTS_NAME))
best_model.load_state_dict(state_dict)
best_model.to(trainer.args.device)
torch.testing.assert_close(best_model.a, trainer.model.a)
torch.testing.assert_close(best_model.b, trainer.model.b)
metrics = trainer.evaluate()
self.assertEqual(metrics[metric], best_value)
def remove_nan_logs(self, log):
for key in list(log.keys()):
if log[key] != log[key]: # Check if the value is NaN
del log[key]
def check_trainer_state_are_the_same(self, trainer_state, trainer_state1):
# We'll pop things so operate on copies.
state = trainer_state.copy()
state1 = trainer_state1.copy()
# Log history main contain different logs for the time metrics (after resuming a training).
log_history = state.pop("log_history", None)
log_history1 = state1.pop("log_history", None)
self.assertEqual(state, state1)
skip_log_keys = ["train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss"]
for log, log1 in zip(log_history, log_history1):
for key in skip_log_keys:
_ = log.pop(key, None)
_ = log1.pop(key, None)
self.remove_nan_logs(log)
self.remove_nan_logs(log1)
self.assertEqual(log, log1)
def convert_to_sharded_checkpoint(self, folder):
# Converts a checkpoint of a regression model to a sharded checkpoint.
loader = safetensors.torch.load_file
weights_file = os.path.join(folder, SAFE_WEIGHTS_NAME)
extension = "safetensors"
saver = safetensors.torch.save_file
index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME)
shard_name = SAFE_WEIGHTS_NAME
state_dict = loader(weights_file)
os.remove(weights_file)
keys = list(state_dict.keys())
shard_files = [
shard_name.replace(f".{extension}", f"-{idx + 1:05d}-of-{len(keys):05d}.{extension}")
for idx in range(len(keys))
]
index = {"metadata": {}, "weight_map": {key: shard_files[i] for i, key in enumerate(keys)}}
with open(index_file, "w", encoding="utf-8") as f:
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
f.write(content)
for param_name, shard_file in zip(keys, shard_files):
saver({param_name: state_dict[param_name]}, os.path.join(folder, shard_file))
| {
"repo_id": "huggingface/transformers",
"file_path": "tests/trainer/trainer_test_utils.py",
"license": "Apache License 2.0",
"lines": 499,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.